r5765 - trunk/cumin/python/cumin/gridhadoop
by croberts@fedoraproject.org
Author: croberts
Date: 2013-06-21 14:35:16 +0000 (Fri, 21 Jun 2013)
New Revision: 5765
Modified:
trunk/cumin/python/cumin/gridhadoop/datanode.py
trunk/cumin/python/cumin/gridhadoop/hadoop.py
trunk/cumin/python/cumin/gridhadoop/hadoopobjectframe.py
trunk/cumin/python/cumin/gridhadoop/jobtracker.py
trunk/cumin/python/cumin/gridhadoop/namenode.py
trunk/cumin/python/cumin/gridhadoop/tasktracker.py
Log:
Support fetching hadoop objects by id or ipc, whichever we have.
Modified: trunk/cumin/python/cumin/gridhadoop/datanode.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/datanode.py 2013-06-20 17:56:10 UTC (rev 5764)
+++ trunk/cumin/python/cumin/gridhadoop/datanode.py 2013-06-21 14:35:16 UTC (rev 5765)
@@ -225,7 +225,7 @@
retval = "Data node:'%s' (%s)" % (xml_escape(self.object.get(session).Name), xml_escape(self.object.get(session).Id))
return retval
- def get_object(self, session, id):
+ def get_object(self, session, id, ipc):
host = self.frame.host.get(session)
response = self.app.remote.get_data_node(self.frame.host.get(session), [id])
obj = HadoopObject(response, self.app.model.com_redhat_cumin_grid_hadoop.DataNode)
Modified: trunk/cumin/python/cumin/gridhadoop/hadoop.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-06-20 17:56:10 UTC (rev 5764)
+++ trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-06-21 14:35:16 UTC (rev 5765)
@@ -239,7 +239,7 @@
def render_item_value(self, session, item):
value = None
- if item.ipc:
+ if item.ipc and item.ipc != "N/A":
value = item.ipc
else:
value = item.id
@@ -280,7 +280,7 @@
properties = [prop for prop in cls._properties]
for prop in properties:
try:
- if prop.name.lower() in ["id", "ipc"]:
+ if prop.name.lower() in ["id", "ipc", "http"]:
setattr(self, prop.name, aviary_response[1][0]["ref"][prop.name.lower()])
elif prop.name.lower() in ["parent_id", "parent_ipc"]:
setattr(self, prop.name, aviary_response[1][0]["parent"][prop.name.lower().split("_")[1]])
Modified: trunk/cumin/python/cumin/gridhadoop/hadoopobjectframe.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/hadoopobjectframe.py 2013-06-20 17:56:10 UTC (rev 5764)
+++ trunk/cumin/python/cumin/gridhadoop/hadoopobjectframe.py 2013-06-21 14:35:16 UTC (rev 5765)
@@ -50,8 +50,9 @@
def do_process(self, session):
id = self.id.get(session)
+ ipc = self.ipc.get(session)
assert id
- obj = self.get_object(session, id)
+ obj = self.get_object(session, id, ipc)
self.object.set(session, obj)
super(HadoopObjectFrame, self).do_process(session)
Modified: trunk/cumin/python/cumin/gridhadoop/jobtracker.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-06-20 17:56:10 UTC (rev 5764)
+++ trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-06-21 14:35:16 UTC (rev 5765)
@@ -260,9 +260,12 @@
pass
return retval
- def get_object(self, session, id):
+ def get_object(self, session, id, ipc):
host = self.app.remote.get_hosts("SCHEDULER", "HADOOP")[0]
- response = self.app.remote.get_job_tracker(host, [id])
+ ref = id
+ if not id or id == "N/A":
+ ref = ipc
+ response = self.app.remote.get_job_tracker(host, [ref])
obj = HadoopObject(response, self.app.model.com_redhat_cumin_grid_hadoop.JobTracker)
return obj
Modified: trunk/cumin/python/cumin/gridhadoop/namenode.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-06-20 17:56:10 UTC (rev 5764)
+++ trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-06-21 14:35:16 UTC (rev 5765)
@@ -243,10 +243,13 @@
retval = "Name node:'%s' (%s)" % (xml_escape(self.object.get(session).Name), xml_escape(self.object.get(session).Id))
return retval
- def get_object(self, session, id):
+ def get_object(self, session, id, ipc):
hadoop_schedulers = self.app.remote.get_hosts("SCHEDULER", "HADOOP")
host = hadoop_schedulers[0]
- response = self.app.remote.get_name_node(host, [id])
+ ref = id
+ if not id or id == "N/A":
+ ref = ipc
+ response = self.app.remote.get_name_node(host, [ref])
obj = HadoopObject(response, self.app.model.com_redhat_cumin_grid_hadoop.NameNode)
return obj
Modified: trunk/cumin/python/cumin/gridhadoop/tasktracker.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/tasktracker.py 2013-06-20 17:56:10 UTC (rev 5764)
+++ trunk/cumin/python/cumin/gridhadoop/tasktracker.py 2013-06-21 14:35:16 UTC (rev 5765)
@@ -294,7 +294,7 @@
pass
return retval
- def get_object(self, session, id):
+ def get_object(self, session, id, ipc):
host = self.frame.host.get(session)
response = self.app.remote.get_task_tracker(self.frame.host.get(session), [id])
obj = HadoopObject(response, self.app.model.com_redhat_cumin_grid_hadoop.TaskTracker)
10 years, 10 months
r5764 - trunk/cumin/python/cumin/gridhadoop
by croberts@fedoraproject.org
Author: croberts
Date: 2013-06-20 17:56:10 +0000 (Thu, 20 Jun 2013)
New Revision: 5764
Modified:
trunk/cumin/python/cumin/gridhadoop/hadoop.py
Log:
Now gracefully handling the potential for the uptime column to be empty or contain non-numeric values. In those cases, we forego the parsing and just display what we are given (probably something like N/A).
Modified: trunk/cumin/python/cumin/gridhadoop/hadoop.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-06-20 16:43:02 UTC (rev 5763)
+++ trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-06-20 17:56:10 UTC (rev 5764)
@@ -263,10 +263,15 @@
class UptimeColumn(ObjectTableColumn):
def render_cell_content(self, session, record):
value = self.field.get_content(session, record)
- days = value / 86400
- hours = (value / 3600) - (days * 24)
- minutes = (value / 60) - (days * 1440) - (hours * 60)
- return '%02d:%02d:%02d' % (days, hours, minutes)
+ content = value
+ try:
+ days = value / 86400
+ hours = (value / 3600) - (days * 24)
+ minutes = (value / 60) - (days * 1440) - (hours * 60)
+ content = '%02d:%02d:%02d' % (days, hours, minutes)
+ except:
+ pass
+ return content
class HadoopObject(RosemaryObject):
def __init__(self, aviary_response, cls):
10 years, 10 months
r5763 - trunk/sage/python/sage/aviary
by tmckay@fedoraproject.org
Author: tmckay
Date: 2013-06-20 16:43:02 +0000 (Thu, 20 Jun 2013)
New Revision: 5763
Modified:
trunk/sage/python/sage/aviary/aviaryoperations.py
Log:
Change empty strings to None in add_external_xx
Modified: trunk/sage/python/sage/aviary/aviaryoperations.py
===================================================================
--- trunk/sage/python/sage/aviary/aviaryoperations.py 2013-06-20 16:29:48 UTC (rev 5762)
+++ trunk/sage/python/sage/aviary/aviaryoperations.py 2013-06-20 16:43:02 UTC (rev 5763)
@@ -1148,12 +1148,12 @@
callback(*result_tuple(result, host))
ref = client.factory.create("ns1:HadoopID")
- ref.id = ""
+ ref.id = None
ref.ipc = ipc
ref.http = url
t = CallThread(self.call_client_retry, my_callback,
- client, "startNameNode", "", "", "", ref)
+ client, "startNameNode", None, None, None, ref)
t.start()
def add_external_job_tracker(self, host, ipc, url, callback):
@@ -1184,7 +1184,7 @@
callback(*result_tuple(result, host))
ref = client.factory.create("ns1:HadoopID")
- ref.id = ""
+ ref.id = None
ref.ipc = ipc
ref.http = url
10 years, 10 months
r5762 - trunk/cumin/python/cumin/gridhadoop
by croberts@fedoraproject.org
Author: croberts
Date: 2013-06-20 16:29:48 +0000 (Thu, 20 Jun 2013)
New Revision: 5762
Modified:
trunk/cumin/python/cumin/gridhadoop/jobtracker.py
trunk/cumin/python/cumin/gridhadoop/namenode.py
Log:
Now passing host to add_external calls.
Modified: trunk/cumin/python/cumin/gridhadoop/jobtracker.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-06-20 15:20:44 UTC (rev 5761)
+++ trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-06-20 16:29:48 UTC (rev 5762)
@@ -101,7 +101,8 @@
def do_invoke(self, session, object, invoc, args):
self.invoc = invoc
(ipc, http) = args
- self.app.remote.add_external_job_tracker(ipc, http, invoc.make_callback())
+ host = self.app.remote.get_hosts("SCHEDULER", "HADOOP")[0]
+ self.app.remote.add_external_job_tracker(host, ipc, http, invoc.make_callback())
def get_title(self, session, x):
return "Add an infrastructure job tracker"
Modified: trunk/cumin/python/cumin/gridhadoop/namenode.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-06-20 15:20:44 UTC (rev 5761)
+++ trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-06-20 16:29:48 UTC (rev 5762)
@@ -100,7 +100,8 @@
def do_invoke(self, session, object, invoc, args):
self.invoc = invoc
(ipc, http) = args
- self.app.remote.add_external_name_node(ipc, http, invoc.make_callback())
+ host = self.app.remote.get_hosts("SCHEDULER", "HADOOP")[0]
+ self.app.remote.add_external_name_node(host, ipc, http, invoc.make_callback())
def get_title(self, session, x):
return "Add an infrastructure name node"
10 years, 10 months
r5761 - trunk/sage/rpc-defs/aviary
by tmckay@fedoraproject.org
Author: tmckay
Date: 2013-06-20 15:20:44 +0000 (Thu, 20 Jun 2013)
New Revision: 5761
Modified:
trunk/sage/rpc-defs/aviary/aviary-hadoop.xsd
Log:
Update the bundled hadoop xsd to match Aviary
Modified: trunk/sage/rpc-defs/aviary/aviary-hadoop.xsd
===================================================================
--- trunk/sage/rpc-defs/aviary/aviary-hadoop.xsd 2013-06-20 14:56:58 UTC (rev 5760)
+++ trunk/sage/rpc-defs/aviary/aviary-hadoop.xsd 2013-06-20 15:20:44 UTC (rev 5761)
@@ -17,119 +17,117 @@
*/
-->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:hdp="http://hadoop.aviary.grid.redhat.com" xmlns:mrg="http://common.aviary.grid.redhat.com" targetNamespace="http://hadoop.aviary.grid.redhat.com">
- <xs:import namespace="http://common.aviary.grid.redhat.com" schemaLocation="aviary-common.xsd"/>
- <!-- declare message-level elements using anonymous complex types for simpler generated types -->
- <xs:simpleType name="HadoopStateType">
- <xs:restriction base="xs:string">
- <xs:enumeration value="PENDING"/>
- <xs:enumeration value="RUNNING"/>
- <xs:enumeration value="EXITING"/>
- </xs:restriction>
- </xs:simpleType>
- <xs:complexType name="HadoopID">
- <xs:sequence>
- <xs:element name="id" type="xs:string" minOccurs="0"/>
- <xs:element name="ipc" type="xs:string" minOccurs="0"/>
- </xs:sequence>
- </xs:complexType>
- <xs:complexType name="HadoopNameNodeStart">
- <xs:sequence>
- <!-- path to a versioned Hadoop tar/zip binary dist file -->
- <xs:element name="bin_file" type="xs:string" minOccurs="0"/>
- <xs:element name="owner" type="xs:string" minOccurs="0"/>
- <xs:element name="description" type="xs:string" minOccurs="0"/>
- </xs:sequence>
- </xs:complexType>
- <xs:complexType name="HadoopStart">
- <xs:sequence>
- <!-- a dependent reference to another Hadoop entity in the cluster -->
- <xs:element name="ref" type="hdp:HadoopID"/>
- <!-- path to a versioned Hadoop tar/zip binary dist file -->
- <xs:element name="bin_file" type="xs:string" minOccurs="0"/>
- <xs:element name="description" type="xs:string" minOccurs="0"/>
- <xs:element name="owner" type="xs:string" minOccurs="0"/>
- <xs:element name="count" type="xs:int" minOccurs="0" default="1"/>
- </xs:sequence>
- </xs:complexType>
- <xs:complexType name="HadoopStartResponse">
- <xs:sequence>
- <xs:element name="ref" type="hdp:HadoopID" minOccurs="0"/>
- <xs:element name="status" type="mrg:Status"/>
- </xs:sequence>
- </xs:complexType>
- <xs:complexType name="HadoopStop">
- <xs:sequence>
- <xs:element name="refs" type="hdp:HadoopID" minOccurs="0" maxOccurs="unbounded"/>
- </xs:sequence>
- </xs:complexType>
- <xs:complexType name="HadoopStopResult">
- <xs:sequence>
- <xs:element name="ref" type="hdp:HadoopID" minOccurs="0"/>
- <xs:element name="status" type="mrg:Status"/>
- </xs:sequence>
- </xs:complexType>
- <xs:complexType name="HadoopStopResponse">
- <xs:sequence>
- <xs:element name="results" type="hdp:HadoopStopResult" minOccurs="0" maxOccurs="unbounded"/>
- <xs:element name="status" type="mrg:Status"/>
- </xs:sequence>
- </xs:complexType>
- <xs:complexType name="HadoopQuery">
- <xs:sequence>
- <xs:element name="refs" type="hdp:HadoopID" minOccurs="0" maxOccurs="unbounded"/>
- </xs:sequence>
- </xs:complexType>
- <xs:complexType name="HadoopQueryResult">
- <xs:sequence>
- <xs:element name="ref" type="hdp:HadoopID"/>
- <xs:element name="parent" type="hdp:HadoopID"/>
- <xs:element name="owner" type="xs:string"/>
- <xs:element name="description" type="xs:string"/>
- <xs:element name="submitted" type="xs:int"/>
- <xs:element name="uptime" type="xs:int"/>
- <xs:element name="state" type="hdp:HadoopStateType"/>
- <xs:element name="status" type="mrg:Status"/>
- <xs:element name="bin_file" type="xs:string"/>
- <xs:element name="http" type="xs:anyURI"/>
- </xs:sequence>
- </xs:complexType>
- <xs:complexType name="HadoopQueryResponse">
- <xs:sequence>
- <xs:element name="results" type="hdp:HadoopQueryResult" minOccurs="0" maxOccurs="unbounded"/>
- <xs:element name="status" type="mrg:Status"/>
- </xs:sequence>
- </xs:complexType>
- <!-- NameNode -->
- <xs:element name="StartNameNode" type="hdp:HadoopNameNodeStart"/>
- <xs:element name="StartNameNodeResponse" type="hdp:HadoopStartResponse"/>
- <xs:element name="StopNameNode" type="hdp:HadoopStop"/>
- <xs:element name="StopNameNodeResponse" type="hdp:HadoopStopResponse"/>
- <xs:element name="GetNameNode" type="hdp:HadoopQuery"/>
- <xs:element name="GetNameNodeResponse" type="hdp:HadoopQueryResponse"/>
- <xs:element name="GetAssociatedDataNodes" type="hdp:HadoopQuery"/>
- <xs:element name="GetAssociatedDataNodesResponse" type="hdp:HadoopQueryResponse"/>
- <!-- these all follow the same type pattern so reuse -->
- <!-- DataNode -->
- <xs:element name="StartDataNode" type="hdp:HadoopStart"/>
- <xs:element name="StartDataNodeResponse" type="hdp:HadoopStartResponse"/>
- <xs:element name="StopDataNode" type="hdp:HadoopStop"/>
- <xs:element name="StopDataNodeResponse" type="hdp:HadoopStopResponse"/>
- <xs:element name="GetDataNode" type="hdp:HadoopQuery"/>
- <xs:element name="GetDataNodeResponse" type="hdp:HadoopQueryResponse"/>
- <!-- JobTracker -->
- <xs:element name="StartJobTracker" type="hdp:HadoopStart"/>
- <xs:element name="StartJobTrackerResponse" type="hdp:HadoopStartResponse"/>
- <xs:element name="StopJobTracker" type="hdp:HadoopStop"/>
- <xs:element name="StopJobTrackerResponse" type="hdp:HadoopStopResponse"/>
- <xs:element name="GetJobTracker" type="hdp:HadoopQuery"/>
- <xs:element name="GetJobTrackerResponse" type="hdp:HadoopQueryResponse"/>
- <xs:element name="GetAssociatedTaskTrackers" type="hdp:HadoopQuery"/>
- <xs:element name="GetAssociatedTaskTrackersResponse" type="hdp:HadoopQueryResponse"/>
- <!-- TaskTracker -->
- <xs:element name="StartTaskTracker" type="hdp:HadoopStart"/>
- <xs:element name="StartTaskTrackerResponse" type="hdp:HadoopStartResponse"/>
- <xs:element name="StopTaskTracker" type="hdp:HadoopStop"/>
- <xs:element name="StopTaskTrackerResponse" type="hdp:HadoopStopResponse"/>
- <xs:element name="GetTaskTracker" type="hdp:HadoopQuery"/>
- <xs:element name="GetTaskTrackerResponse" type="hdp:HadoopQueryResponse"/>
+ <xs:import namespace="http://common.aviary.grid.redhat.com" schemaLocation="aviary-common.xsd"/>
+ <!-- declare message-level elements using anonymous complex types for simpler generated types -->
+ <xs:simpleType name="HadoopStateType">
+ <xs:restriction base="xs:string">
+ <xs:enumeration value="PENDING"/>
+ <xs:enumeration value="RUNNING"/>
+ <xs:enumeration value="EXITING"/>
+ </xs:restriction>
+ </xs:simpleType>
+ <xs:complexType name="HadoopID">
+ <xs:sequence>
+ <xs:element name="id" type="xs:string" minOccurs="0"/>
+ <xs:element name="ipc" type="xs:string" minOccurs="0"/>
+ <xs:element name="http" type="xs:string" minOccurs="0"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="HadoopNameNodeStart">
+ <xs:sequence>
+ <!-- path to a versioned Hadoop tar/zip binary dist file -->
+ <xs:element name="bin_file" type="xs:string" minOccurs="0"/>
+ <xs:element name="owner" type="xs:string" minOccurs="0"/>
+ <xs:element name="description" type="xs:string" minOccurs="0"/>
+ <!--reference to an unmanaged NameNode -->
+ <xs:element name="unmanaged" type="hdp:HadoopID" minOccurs="0"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="HadoopStart">
+ <xs:sequence>
+ <!-- a dependent reference to another Hadoop entity in the cluster -->
+ <xs:element name="ref" type="hdp:HadoopID"/>
+ <!-- path to a versioned Hadoop tar/zip binary dist file -->
+ <xs:element name="bin_file" type="xs:string" minOccurs="0"/>
+ <xs:element name="owner" type="xs:string" minOccurs="0"/>
+ <xs:element name="description" type="xs:string" minOccurs="0"/>
+ <xs:element name="count" type="xs:int" minOccurs="0" default="1"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="HadoopStartResponse">
+ <xs:sequence>
+ <xs:element name="ref" type="hdp:HadoopID" minOccurs="0"/>
+ <xs:element name="status" type="mrg:Status"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="HadoopStop">
+ <xs:sequence>
+ <xs:element name="refs" type="hdp:HadoopID" minOccurs="0" maxOccurs="unbounded"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="HadoopStopResult">
+ <xs:sequence>
+ <xs:element name="ref" type="hdp:HadoopID" minOccurs="0"/>
+ <xs:element name="status" type="mrg:Status"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="HadoopStopResponse">
+ <xs:sequence>
+ <xs:element name="results" type="hdp:HadoopStopResult" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="status" type="mrg:Status"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="HadoopQuery">
+ <xs:sequence>
+ <xs:element name="refs" type="hdp:HadoopID" minOccurs="0" maxOccurs="unbounded"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="HadoopQueryResult">
+ <xs:sequence>
+ <xs:element name="ref" type="hdp:HadoopID"/>
+ <xs:element name="parent" type="hdp:HadoopID"/>
+ <xs:element name="owner" type="xs:string"/>
+ <xs:element name="description" type="xs:string"/>
+ <xs:element name="submitted" type="xs:int"/>
+ <xs:element name="uptime" type="xs:int"/>
+ <xs:element name="state" type="hdp:HadoopStateType"/>
+ <xs:element name="status" type="mrg:Status"/>
+ <xs:element name="bin_file" type="xs:string"/>
+ </xs:sequence>
+ </xs:complexType>
+ <xs:complexType name="HadoopQueryResponse">
+ <xs:sequence>
+ <xs:element name="results" type="hdp:HadoopQueryResult" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="status" type="mrg:Status"/>
+ </xs:sequence>
+ </xs:complexType>
+ <!-- NameNode -->
+ <xs:element name="StartNameNode" type="hdp:HadoopNameNodeStart"/>
+ <xs:element name="StartNameNodeResponse" type="hdp:HadoopStartResponse"/>
+ <xs:element name="StopNameNode" type="hdp:HadoopStop"/>
+ <xs:element name="StopNameNodeResponse" type="hdp:HadoopStopResponse"/>
+ <xs:element name="GetNameNode" type="hdp:HadoopQuery"/>
+ <xs:element name="GetNameNodeResponse" type="hdp:HadoopQueryResponse"/>
+ <!-- these all follow the same type pattern so reuse -->
+ <!-- DataNode -->
+ <xs:element name="StartDataNode" type="hdp:HadoopStart"/>
+ <xs:element name="StartDataNodeResponse" type="hdp:HadoopStartResponse"/>
+ <xs:element name="StopDataNode" type="hdp:HadoopStop"/>
+ <xs:element name="StopDataNodeResponse" type="hdp:HadoopStopResponse"/>
+ <xs:element name="GetDataNode" type="hdp:HadoopQuery"/>
+ <xs:element name="GetDataNodeResponse" type="hdp:HadoopQueryResponse"/>
+ <!-- JobTracker -->
+ <xs:element name="StartJobTracker" type="hdp:HadoopStart"/>
+ <xs:element name="StartJobTrackerResponse" type="hdp:HadoopStartResponse"/>
+ <xs:element name="StopJobTracker" type="hdp:HadoopStop"/>
+ <xs:element name="StopJobTrackerResponse" type="hdp:HadoopStopResponse"/>
+ <xs:element name="GetJobTracker" type="hdp:HadoopQuery"/>
+ <xs:element name="GetJobTrackerResponse" type="hdp:HadoopQueryResponse"/>
+ <!-- TaskTracker -->
+ <xs:element name="StartTaskTracker" type="hdp:HadoopStart"/>
+ <xs:element name="StartTaskTrackerResponse" type="hdp:HadoopStartResponse"/>
+ <xs:element name="StopTaskTracker" type="hdp:HadoopStop"/>
+ <xs:element name="StopTaskTrackerResponse" type="hdp:HadoopStopResponse"/>
+ <xs:element name="GetTaskTracker" type="hdp:HadoopQuery"/>
+ <xs:element name="GetTaskTrackerResponse" type="hdp:HadoopQueryResponse"/>
</xs:schema>
10 years, 10 months
r5760 - trunk/sage/python/sage/aviary
by tmckay@fedoraproject.org
Author: tmckay
Date: 2013-06-20 14:56:58 +0000 (Thu, 20 Jun 2013)
New Revision: 5760
Modified:
trunk/sage/python/sage/aviary/aviaryoperations.py
Log:
Use Aviary api for addition of external name nodes and jt's
Modified: trunk/sage/python/sage/aviary/aviaryoperations.py
===================================================================
--- trunk/sage/python/sage/aviary/aviaryoperations.py 2013-06-20 11:52:20 UTC (rev 5759)
+++ trunk/sage/python/sage/aviary/aviaryoperations.py 2013-06-20 14:56:58 UTC (rev 5760)
@@ -1122,90 +1122,76 @@
def get_task_tracker_list(self, owner=None, callback=None):
return self._get_node_list(self.get_task_tracker, owner, callback)
- def add_external_name_node(self, ipc, url, callback=None):
- self._hadoop_lock.acquire()
- if not (ipc, url) in self._external_name_nodes:
- self._external_name_nodes.append((ipc, url))
- try:
- self._write_external(self._external_nn_path,
- self._external_name_nodes)
- except Exception:
- log.debug("Failed to write external name nodes")
- self._hadoop_lock.release()
- if callback:
- callback("OK")
- else:
- return "OK"
+ def add_external_name_node(self, host, ipc, url, callback):
+ # This is special because the HadoopID is special
+ assert callable(callback)
+
+ client = self.client_pool.get_object()
+ self._setup_client(client,
+ self.servers,
+ host,
+ "startNameNode")
+
+ def result_tuple(result, host):
+ data = None
+ result = self._pretty_result(result, host)
+ if isinstance(result, Exception):
+ status = result
+ else:
+ status = _AviaryCommon._get_status(result.status)
+ if status == "OK" and hasattr(result, "ref"):
+ data = result.ref
+ return (status, data)
+
+ def my_callback(result):
+ self.client_pool.return_object(client)
+ callback(*result_tuple(result, host))
+
+ ref = client.factory.create("ns1:HadoopID")
+ ref.id = ""
+ ref.ipc = ipc
+ ref.http = url
+
+ t = CallThread(self.call_client_retry, my_callback,
+ client, "startNameNode", "", "", "", ref)
+ t.start()
- def get_external_name_nodes(self, callback=None):
- status = "OK"
- res = []
- self._hadoop_lock.acquire()
- try:
- res = self._read_external(self._external_nn_path,
- self._external_name_nodes)
- except Exception:
- status = "FAILED"
- log.debug("Failed to read external name nodes")
- self._hadoop_lock.release()
- if callback:
- callback(status, res)
- else:
- return (status, res)
+ def add_external_job_tracker(self, host, ipc, url, callback):
+ # This is special because the HadoooID is special, and
+ # also because the id refers to the jt itself as an external
+ # and not a namenode
+ assert callable(callback)
- def add_external_job_tracker(self, ipc, url, callback=None):
- self._hadoop_lock.acquire()
- if not (ipc, url) in self._external_job_trackers:
- self._external_job_trackers.append((ipc, url))
- try:
- self._write_external(self._external_jt_path,
- self._external_job_trackers)
- except Exception:
- log.debug("Failed to write external job trackers")
- self._hadoop_lock.release()
- if callback:
- callback("OK")
- else:
- return "OK"
+ client = self.client_pool.get_object()
+ self._setup_client(client,
+ self.servers,
+ host,
+ "startJobTracker")
- def get_external_job_trackers(self, callback=None):
- status = "OK"
- res = []
- self._hadoop_lock.acquire()
- try:
- res = self._read_external(self._external_jt_path,
- self._external_job_trackers)
- except Exception:
- status = "FAILED"
- log.debug("Failed to read external job trackers")
- self._hadoop_lock.release()
- if callback:
- callback(status, res)
- else:
- return (status, res)
+ def result_tuple(result, host):
+ data = None
+ result = self._pretty_result(result, host)
+ if isinstance(result, Exception):
+ status = result
+ else:
+ status = _AviaryCommon._get_status(result.status)
+ if status == "OK" and hasattr(result, "ref"):
+ data = result.ref
+ return (status, data)
- def _read_external(self, path, vals):
- res = []
- file = open(path, "r")
- lines = file.readlines()
- for l in lines:
- ipc, url = l.split()
- res.append((ipc, url))
- return res
+ def my_callback(result):
+ self.client_pool.return_object(client)
+ callback(*result_tuple(result, host))
- def _write_external(self, path, vals):
- p = os.path.split(path)[0]
- if os.path.isdir(p):
- try:
- file = open(path, "w+")
- for v in vals:
- file.write(v[0] + " " + v[1] + "\n")
- file.close()
- return True
- except Exception:
- pass
- return False
+ ref = client.factory.create("ns1:HadoopID")
+ ref.id = ""
+ ref.ipc = ipc
+ ref.http = url
+ t = CallThread(self.call_client_retry, my_callback,
+ client, "startJobTracker", ref)
+ t.start()
+
def _make_id(self, client, val):
def url(v):
10 years, 10 months
r5759 - trunk/cumin/python/cumin/gridhadoop
by croberts@fedoraproject.org
Author: croberts
Date: 2013-06-20 11:52:20 +0000 (Thu, 20 Jun 2013)
New Revision: 5759
Modified:
trunk/cumin/python/cumin/gridhadoop/hadoop.py
Log:
One more tweak required to finish removing the scheduler selection page from hadoop.
Modified: trunk/cumin/python/cumin/gridhadoop/hadoop.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-06-19 20:59:23 UTC (rev 5758)
+++ trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-06-20 11:52:20 UTC (rev 5759)
@@ -298,7 +298,7 @@
class HadoopNodeDeleteTask(ObjectSelectorTask):
def enter(self, session):
form_session = super(HadoopNodeDeleteTask, self).enter(session)
- self.form.hadoophost.set(form_session, self.module.frame.children_by_name[self.module.frame.name + ".ghschedframe"].host.get(session))
+ self.form.hadoophost.set(form_session, self.app.remote.get_hosts("SCHEDULER", "HADOOP")[0])
return form_session
def invoke(self, session, selection, *args):
10 years, 10 months
r5758 - trunk/cumin/python/cumin/gridhadoop
by croberts@fedoraproject.org
Author: croberts
Date: 2013-06-19 20:59:23 +0000 (Wed, 19 Jun 2013)
New Revision: 5758
Modified:
trunk/cumin/python/cumin/gridhadoop/hadoopobjectframe.py
trunk/cumin/python/cumin/gridhadoop/jobtracker.py
trunk/cumin/python/cumin/gridhadoop/namenode.py
Log:
Now passing ipc on the links to the frames for Name nodes and Job Trackers. This will be needed for getting the details of external infrastructure nodes or any node where id is blank.
Modified: trunk/cumin/python/cumin/gridhadoop/hadoopobjectframe.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/hadoopobjectframe.py 2013-06-19 20:09:47 UTC (rev 5757)
+++ trunk/cumin/python/cumin/gridhadoop/hadoopobjectframe.py 2013-06-19 20:59:23 UTC (rev 5758)
@@ -20,6 +20,9 @@
self.host = StringParameter(app, "host")
self.add_parameter(self.host)
+
+ self.ipc = StringParameter(app, "ipc")
+ self.add_parameter(self.ipc)
# This will be given a value during the "process" pass after
# self.id is determined (lookup by id)
@@ -36,9 +39,11 @@
for task in self.tasks:
task.init()
- def get_href(self, session, id, host):
+ def get_href(self, session, id, host, ipc=None):
branch = session.branch()
self.id.set(branch, id)
+ if ipc:
+ self.ipc.set(branch, ipc)
self.host.set(branch, host)
self.view.show(branch)
return branch.marshal()
Modified: trunk/cumin/python/cumin/gridhadoop/jobtracker.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-06-19 20:09:47 UTC (rev 5757)
+++ trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-06-19 20:59:23 UTC (rev 5758)
@@ -66,18 +66,22 @@
self.uptime_col = UptimeColumn(app, "uptimecol", cls.Uptime)
self.uptime_col.width = "10%"
self.owner_col = ObjectTableColumn(app, "ownercol", cls.Owner)
+ self.ipc_col = ObjectTableColumn(app, "ipccol", cls.Ipc)
+ self.ipc_col.visible = False
self.add_column(self.id_col)
self.add_column(self.http_col)
self.add_column(self.state_col)
self.add_column(self.uptime_col)
self.add_column(self.owner_col)
+ self.add_column(self.ipc_col)
class JobTrackerColumn(ObjectLinkColumn):
def render_cell_href(self, session, record):
id = unescape_entity(record[self.id_field.index])
+ ipc = unescape_entity(record[self.parent.ipc_col.field.index])
frame = self.frame.children_by_name[self.frame.name + self.frame_path]
- return frame.get_href(session, id, self.app.remote.get_hosts("SCHEDULER", "HADOOP")[0])
+ return frame.get_href(session, id, self.app.remote.get_hosts("SCHEDULER", "HADOOP")[0], ipc)
class JobTrackerAddExternal(Task):
def __init__(self, app, name, module):
Modified: trunk/cumin/python/cumin/gridhadoop/namenode.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-06-19 20:09:47 UTC (rev 5757)
+++ trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-06-19 20:59:23 UTC (rev 5758)
@@ -62,21 +62,25 @@
self.uptime_col = UptimeColumn(app, "uptimecol", cls.Uptime)
self.uptime_col.width = "10%"
self.owner_col = ObjectTableColumn(app, "ownercol", cls.Owner)
+ self.ipc_col = ObjectTableColumn(app, "ipccol", cls.Ipc)
+ self.ipc_col.visible = False
self.add_column(self.id_col)
self.add_column(self.http_col)
self.add_column(self.state_col)
self.add_column(self.uptime_col)
self.add_column(self.owner_col)
+ self.add_column(self.ipc_col)
class NameNodeColumn(ObjectLinkColumn):
def render_cell_href(self, session, record):
id = unescape_entity(record[self.id_field.index])
+ ipc = unescape_entity(record[self.parent.ipc_col.field.index])
#TODO might need id to be http instead if regular id isn't available
#maybe pass all options along (id, ipc, http)
frame = self.frame.children_by_name[self.frame.name + self.frame_path]
hadoop_schedulers = self.app.remote.get_hosts("SCHEDULER", "HADOOP")
- return frame.get_href(session, id, hadoop_schedulers[0])
+ return frame.get_href(session, id, hadoop_schedulers[0], ipc)
class NameNodeAddExternal(Task):
def __init__(self, app, name, module):
10 years, 10 months
r5757 - trunk/sage/python/sage/aviary
by tmckay@fedoraproject.org
Author: tmckay
Date: 2013-06-19 20:09:47 +0000 (Wed, 19 Jun 2013)
New Revision: 5757
Modified:
trunk/sage/python/sage/aviary/aviaryoperations.py
Log:
Add file backing for external name nodes and job trackers
Modified: trunk/sage/python/sage/aviary/aviaryoperations.py
===================================================================
--- trunk/sage/python/sage/aviary/aviaryoperations.py 2013-06-19 18:46:21 UTC (rev 5756)
+++ trunk/sage/python/sage/aviary/aviaryoperations.py 2013-06-19 20:09:47 UTC (rev 5757)
@@ -1032,6 +1032,19 @@
self._external_name_nodes = []
self._external_job_trackers = []
+ path = os.path.join(os.sep, "var", "lib", "cumin")
+ if os.path.isdir(path):
+ self._external_nn_path = os.path.join(path, "name_nodes")
+ self._external_jt_path = os.path.join(path, "job_trackers")
+ else:
+ hdef = os.path.expanduser("~")
+ path = os.environ.get("CUMIN_HOME", hdef)
+ self._external_nn_path = os.path.join(path, ".name_nodes")
+ self._external_jt_path = os.path.join(path, ".job_trackers")
+
+ log.debug("External name nodes path %s" % self._external_nn_path)
+ log.debug("External job trackers path %s" % self._external_jt_path)
+
def start_name_node(self, host, bin_file, owner, description, callback):
assert callable(callback)
@@ -1113,25 +1126,42 @@
self._hadoop_lock.acquire()
if not (ipc, url) in self._external_name_nodes:
self._external_name_nodes.append((ipc, url))
+ try:
+ self._write_external(self._external_nn_path,
+ self._external_name_nodes)
+ except Exception:
+ log.debug("Failed to write external name nodes")
self._hadoop_lock.release()
if callback:
callback("OK")
else:
return "OK"
- def get_external_name_nodes(self, callback=None):
+ def get_external_name_nodes(self, callback=None):
+ status = "OK"
+ res = []
self._hadoop_lock.acquire()
- res = copy.deepcopy(self._external_name_nodes)
+ try:
+ res = self._read_external(self._external_nn_path,
+ self._external_name_nodes)
+ except Exception:
+ status = "FAILED"
+ log.debug("Failed to read external name nodes")
self._hadoop_lock.release()
if callback:
- callback("OK", res)
+ callback(status, res)
else:
- return ("OK", res)
+ return (status, res)
def add_external_job_tracker(self, ipc, url, callback=None):
self._hadoop_lock.acquire()
if not (ipc, url) in self._external_job_trackers:
self._external_job_trackers.append((ipc, url))
+ try:
+ self._write_external(self._external_jt_path,
+ self._external_job_trackers)
+ except Exception:
+ log.debug("Failed to write external job trackers")
self._hadoop_lock.release()
if callback:
callback("OK")
@@ -1139,14 +1169,43 @@
return "OK"
def get_external_job_trackers(self, callback=None):
+ status = "OK"
+ res = []
self._hadoop_lock.acquire()
- res = copy.deepcopy(self._external_job_trackers)
+ try:
+ res = self._read_external(self._external_jt_path,
+ self._external_job_trackers)
+ except Exception:
+ status = "FAILED"
+ log.debug("Failed to read external job trackers")
self._hadoop_lock.release()
if callback:
- callback("OK", res)
+ callback(status, res)
else:
- return ("OK", res)
+ return (status, res)
+ def _read_external(self, path, vals):
+ res = []
+ file = open(path, "r")
+ lines = file.readlines()
+ for l in lines:
+ ipc, url = l.split()
+ res.append((ipc, url))
+ return res
+
+ def _write_external(self, path, vals):
+ p = os.path.split(path)[0]
+ if os.path.isdir(p):
+ try:
+ file = open(path, "w+")
+ for v in vals:
+ file.write(v[0] + " " + v[1] + "\n")
+ file.close()
+ return True
+ except Exception:
+ pass
+ return False
+
def _make_id(self, client, val):
def url(v):
10 years, 10 months
r5756 - trunk/cumin/python/cumin/gridhadoop
by croberts@fedoraproject.org
Author: croberts
Date: 2013-06-19 18:46:21 +0000 (Wed, 19 Jun 2013)
New Revision: 5756
Modified:
trunk/cumin/python/cumin/gridhadoop/hadoop.py
trunk/cumin/python/cumin/gridhadoop/jobtracker.py
trunk/cumin/python/cumin/gridhadoop/namenode.py
Log:
Adding links to add external infrastructure name nodes/job trackers. Also including forms and calls to stubbed functionality to add the nodes. Not yet showing the external nodes in the lists.
Modified: trunk/cumin/python/cumin/gridhadoop/hadoop.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-06-19 18:38:42 UTC (rev 5755)
+++ trunk/cumin/python/cumin/gridhadoop/hadoop.py 2013-06-19 18:46:21 UTC (rev 5756)
@@ -179,8 +179,16 @@
class Owner(StringField):
def render_title(self, session):
return "Username of the owner"
+
+class IpcField(StringField):
+ def render_title(self, session):
+ return "Ipc"
+class HttpField(StringField):
+ def render_title(self, session):
+ return "Http"
+
class NameNodeField(ScalarField):
def __init__(self, app, name):
super(NameNodeField, self).__init__(app, name, None)
@@ -329,3 +337,26 @@
def render_cell_href(self, session, record):
link = self.field.get_content(session, record)
return link
+
+class HadoopExternalAddForm(HadoopNodeCreateForm):
+ def __init__(self, app, name, task, cls):
+ super(HadoopExternalAddForm, self).__init__(app, name, task, cls)
+
+ self.ipc = IpcField(app, "ipc")
+ self.add_field(self.ipc)
+
+ self.http = HttpField(app, "http")
+ self.add_field(self.http)
+
+ def process_submit(self, session):
+ self.validate(session)
+
+ url = self.return_url.get(session)
+ self.page.redirect.set(session, url)
+
+ if not self.errors.get(session):
+ ipc = self.ipc.get(session)
+ http = self.http.get(session)
+ self.task.invoke(session, None, (ipc, http))
+ self.task.exit_with_redirect(session, url)
+
\ No newline at end of file
Modified: trunk/cumin/python/cumin/gridhadoop/jobtracker.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-06-19 18:38:42 UTC (rev 5755)
+++ trunk/cumin/python/cumin/gridhadoop/jobtracker.py 2013-06-19 18:46:21 UTC (rev 5756)
@@ -39,6 +39,10 @@
task = JobTrackerCreate(app, name + ".create", self.module)
link = TaskLink(app, "jobTracker_create", task)
self.links.add_child(link)
+
+ task = JobTrackerAddExternal(app, name + ".add", module)
+ link = TaskLink(app, "jobTracker_add", task)
+ self.links.add_child(link)
JobTrackerDelete(app, self, "jobTracker_delete", self.module)
@@ -75,6 +79,45 @@
frame = self.frame.children_by_name[self.frame.name + self.frame_path]
return frame.get_href(session, id, self.app.remote.get_hosts("SCHEDULER", "HADOOP")[0])
+class JobTrackerAddExternal(Task):
+ def __init__(self, app, name, module):
+ super(JobTrackerAddExternal, self).__init__(app, name)
+ cls = app.model.com_redhat_cumin_grid_hadoop.JobTracker
+
+ self.module = module
+
+ self.form = JobTrackerAddForm(app, self.name, self, cls)
+ self.invoc = None
+
+ def callback(self, result):
+ if result == False:
+ self.invoc.status = self.invoc.FAILED
+ self.invoc.end()
+
+ def do_invoke(self, session, object, invoc, args):
+ self.invoc = invoc
+ (ipc, http) = args
+ self.app.remote.add_external_job_tracker(ipc, http, invoc.make_callback())
+
+ def get_title(self, session, x):
+ return "Add an infrastructure job tracker"
+
+ def enter(self, session, obj):
+ form_session = wooly.Session(self.app.form_page)
+
+ if obj:
+ self.form.id.set(form_session, obj._id)
+
+ self.form.return_url.set(form_session, session.marshal())
+ self.form.show(form_session)
+
+ self.do_enter(session, obj, form_session)
+ return form_session
+
+class JobTrackerAddForm(HadoopExternalAddForm):
+ def render_title(self, session):
+ return "Add an infrastructure job tracker"
+
class JobTrackerDelete(HadoopNodeDeleteTask):
def __init__(self, app, selector, name, module):
super(JobTrackerDelete, self).__init__(app, selector)
Modified: trunk/cumin/python/cumin/gridhadoop/namenode.py
===================================================================
--- trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-06-19 18:38:42 UTC (rev 5755)
+++ trunk/cumin/python/cumin/gridhadoop/namenode.py 2013-06-19 18:46:21 UTC (rev 5756)
@@ -35,6 +35,10 @@
task = NameNodeCreate(app, name + ".create", module)
link = TaskLink(app, "nameNode_create", task)
self.links.add_child(link)
+
+ task = NameNodeAddExternal(app, name + ".add", module)
+ link = TaskLink(app, "nameNode_add", task)
+ self.links.add_child(link)
NameNodeDelete(app, self, "nodeName_delete", module)
@@ -68,10 +72,51 @@
class NameNodeColumn(ObjectLinkColumn):
def render_cell_href(self, session, record):
id = unescape_entity(record[self.id_field.index])
+ #TODO might need id to be http instead if regular id isn't available
+ #maybe pass all options along (id, ipc, http)
frame = self.frame.children_by_name[self.frame.name + self.frame_path]
hadoop_schedulers = self.app.remote.get_hosts("SCHEDULER", "HADOOP")
return frame.get_href(session, id, hadoop_schedulers[0])
+class NameNodeAddExternal(Task):
+ def __init__(self, app, name, module):
+ super(NameNodeAddExternal, self).__init__(app, name)
+ cls = app.model.com_redhat_cumin_grid_hadoop.NameNode
+
+ self.module = module
+
+ self.form = NameNodeAddForm(app, self.name, self, cls)
+ self.invoc = None
+
+ def callback(self, result):
+ if result == False:
+ self.invoc.status = self.invoc.FAILED
+ self.invoc.end()
+
+ def do_invoke(self, session, object, invoc, args):
+ self.invoc = invoc
+ (ipc, http) = args
+ self.app.remote.add_external_name_node(ipc, http, invoc.make_callback())
+
+ def get_title(self, session, x):
+ return "Add an infrastructure name node"
+
+ def enter(self, session, obj):
+ form_session = wooly.Session(self.app.form_page)
+
+ if obj:
+ self.form.id.set(form_session, obj._id)
+
+ self.form.return_url.set(form_session, session.marshal())
+ self.form.show(form_session)
+
+ self.do_enter(session, obj, form_session)
+ return form_session
+
+class NameNodeAddForm(HadoopExternalAddForm):
+ def render_title(self, session):
+ return "Add an infrastructure name node"
+
class NameNodeCreate(Task):
def __init__(self, app, name, module):
super(NameNodeCreate, self).__init__(app, name)
10 years, 10 months