commit bd24262286af1363d998dea05e4b31cf126c81aa Author: W. David Ashley w.david.ashley@gmail.com Date: Wed Jul 8 10:08:51 2015 -0500
Domains Chapter General - finished the indentation corrections for the whole chapter
en-US/Guest_Domains.xml | 779 +++++++++++++++++++++++------------------------ 1 files changed, 384 insertions(+), 395 deletions(-) --- diff --git a/en-US/Guest_Domains.xml b/en-US/Guest_Domains.xml index 8464d53..5e7130a 100644 --- a/en-US/Guest_Domains.xml +++ b/en-US/Guest_Domains.xml @@ -761,7 +761,7 @@ </section>
<section id="libvirt_application_development_guide_using_python-Guest_Domains-Domain_Config-Memory_CPU"> - <title>Memory / CPU resources</title> + <title>Memory / CPU Resources</title>
<para> CPU and memory resources can be set at the time the domain is created or dynamically @@ -800,485 +800,474 @@ </example> </section>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Domain_Config-Lifecycle"> - <title>Lifecycle controls</title> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Domain_Config-Lifecycle"> + <title>Lifecycle controls</title>
- <para> - TBD - - </para> - - </section> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Domain_Config-Clock"> - <title>Clock sync</title> + <para> + TBD + </para>
- <para> - TBD + </section>
- </para> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Domain_Config-Clock"> + <title>Clock Sync</title>
- </section> + <para> + TBD + </para>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Domain_Config-Features"> - <title>Features</title> + </section>
- <para> - TBD + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Domain_Config-Features"> + <title>Features</title>
- </para> + <para> + TBD + </para>
- </section> + </section>
</section>
<section id="libvirt_application_development_guide_using_python-Guest_Domains-Monitoring"> - <title>Monitoring performance</title> - - <para> - Statistical metrics are available for monitoring the utilization rates of domains, vCPUs, memory, block devices, and network interfaces. - </para> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Monitoring-Domain"> - <title>Domain performance</title> + <title>Monitoring Performance</title>
<para> - TBD - - </para> - - </section> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Monitoring-vCPU"> - <title>vCPU performance</title> - - <para> - TBD - + Statistical metrics are available for monitoring the utilization rates of domains, vCPUs, memory, + block devices, and network interfaces. </para>
- </section> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Monitoring-IO_stats"> - <title>I/O statistics</title> - - <para> - TBD + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Monitoring-Domain"> + <title>Domain Performance</title>
- </para> - - </section> - - </section> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config"> - <title>Device configuration</title> - - <para> - TBD - - </para> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Emulator"> - <title>Emulator</title> - - <para> - TBD - </para> - - </section> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Disks"> - <title>Disks</title> - - <para> - TBD - - </para> - - </section> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Networking"> - <title>Networking</title> - - <para> - TBD - </para> - - </section> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Filesystems"> - <title>Filesystems</title> + <para> + TBD + </para>
- <para> - TBD - </para> + </section>
- </section> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Monitoring-vCPU"> + <title>vCPU Performance</title>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Mice"> - <title>Mice & tablets</title> + <para> + TBD + </para>
- <para> - TBD - </para> + </section>
- </section> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Monitoring-IO_stats"> + <title>I/O Statistics</title>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-USB_Pass"> - <title>USB device passthrough</title> + <para> + TBD + </para>
- <para> - TBD - </para> + </section>
</section>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-PCI_Pass"> - <title>PCI device passthrough</title> - - <para> - The PCI device passthrough capability allows a physical PCI device from - the host machine to be assigned directly to a guest machine.The guest - OS drivers can use the device hardware directly without relying on any - driver capabilities from the host OS. - </para> - - <para> - Some caveats apply when using PCI device passthrough. When a PCI device is - directly assigned to a guest, migration will not be possible, without - first hot-unplugging the device from the guest. In addition - libvirt does not guarantee that direct device assignment is secure, leaving - security policy decisions to the underlying virtualization technology. Secure - PCI device passthrough typically requires special hardware capabilities, such - the VT-d feature for Intel chipset, or IOMMU for AMD chipsets. - </para> - - <para> - There are two modes in which a PCI device can be attached, "managed" or - "unmanaged" mode, although at time of writing only KVM supports "managed" - mode attachment. In managed mode, the configured device will be automatically - detached from the host OS drivers when the guest is started, and then - re-attached when the guest shuts down. In unmanaged mode, the device - must be explicit detached ahead of booting the guest. The guest will - refuse to start if the device is still attached to the host OS. The - libvirt 'Node Device' APIs provide a means to detach/reattach PCI devices - from/to host drivers. Alternatively the host OS may be configured to - blacklist the PCI devices used for guest, so that they never get attached - to host OS drivers. - </para> - - <para> - In both modes, the virtualization technology will always perform a reset - on the device before starting a guest, and after the guest shuts down. - This is critical to ensure isolation between host and guest OS. There - are a variety of ways in which a PCI device can be reset. Some reset - techniques are limited in scope to a single device/function, while - others may affect multiple devices at once. In the latter case, it will - be necessary to co-assign all affect devices to the same guest, - otherwise a reset will be impossible to do safely. The node device - APIs can be used to determine whether a device needs to be co-assigned, - by manually detaching the device and then attempting to perform the - reset operation. If this succeeds, then it will be possible to assign - the device to a guest on its own. If it fails, then it will be necessary - to co-assign the device will others on the same PCI bus. The section - documenting node device APIs covers this topic in detail, but as a - quick demonstration the following code checks whether a PCI device - (represented by a virNodeDevicePtr object instance) can be reset and - is thus assignable to a guest - </para> - <programlisting> - <![CDATA[ - virNodeDevicePtr dev = ....get virNodeDevicePtr for the PCI device... - - if (virNodeDeviceDettach(dev) < 0) { - fprintf(stderr, "Device cannot be dettached from the host OS drivers\n"); - return; - } + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config"> + <title>Device configuration</title> + + <para> + TBD + </para> + + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Emulator"> + <title>Emulator</title> + + <para> + TBD + </para> + + </section> + + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Disks"> + <title>Disks</title> + + <para> + TBD + </para> + + </section> + + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Networking"> + <title>Networking</title> + + <para> + TBD + </para> + + </section> + + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Filesystems"> + <title>Filesystems</title> + + <para> + TBD + </para> + + </section> + + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-Mice"> + <title>Mice & Tablets</title> + + <para> + TBD + </para> + + </section> + + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-USB_Pass"> + <title>USB Device Passthrough</title> + + <para> + TBD + </para> + + </section> + + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Device_Config-PCI_Pass"> + <title>PCI device passthrough</title> + + <para> + The PCI device passthrough capability allows a physical PCI device from + the host machine to be assigned directly to a guest machine.The guest + OS drivers can use the device hardware directly without relying on any + driver capabilities from the host OS. + </para> + + <para> + Some caveats apply when using PCI device passthrough. When a PCI device is + directly assigned to a guest, migration will not be possible, without + first hot-unplugging the device from the guest. In addition + libvirt does not guarantee that direct device assignment is secure, leaving + security policy decisions to the underlying virtualization technology. Secure + PCI device passthrough typically requires special hardware capabilities, such + the VT-d feature for Intel chipset, or IOMMU for AMD chipsets. + </para> + + <para> + There are two modes in which a PCI device can be attached, "managed" or + "unmanaged" mode, although at time of writing only KVM supports "managed" + mode attachment. In managed mode, the configured device will be automatically + detached from the host OS drivers when the guest is started, and then + re-attached when the guest shuts down. In unmanaged mode, the device + must be explicit detached ahead of booting the guest. The guest will + refuse to start if the device is still attached to the host OS. The + libvirt 'Node Device' APIs provide a means to detach/reattach PCI devices + from/to host drivers. Alternatively the host OS may be configured to + blacklist the PCI devices used for guest, so that they never get attached + to host OS drivers. + </para> + + <para> + In both modes, the virtualization technology will always perform a reset + on the device before starting a guest, and after the guest shuts down. + This is critical to ensure isolation between host and guest OS. There + are a variety of ways in which a PCI device can be reset. Some reset + techniques are limited in scope to a single device/function, while + others may affect multiple devices at once. In the latter case, it will + be necessary to co-assign all affect devices to the same guest, + otherwise a reset will be impossible to do safely. The node device + APIs can be used to determine whether a device needs to be co-assigned, + by manually detaching the device and then attempting to perform the + reset operation. If this succeeds, then it will be possible to assign + the device to a guest on its own. If it fails, then it will be necessary + to co-assign the device will others on the same PCI bus. The section + documenting node device APIs covers this topic in detail, but as a + quick demonstration the following code checks whether a PCI device + (represented by a virNodeDevicePtr object instance) can be reset and + is thus assignable to a guest + </para> + <programlisting> + <![CDATA[ + virNodeDevicePtr dev = ....get virNodeDevicePtr for the PCI device... + + if (virNodeDeviceDettach(dev) < 0) { + fprintf(stderr, "Device cannot be dettached from the host OS drivers\n"); + return; + } + + if (virNodeDeviceReset(dev) < 0) { + fprintf(stderr, "Device cannot be safely reset without affecting other devices\n"); + return; + } + + fprintf(stderr, "Device is suitable for passthrough to a guest\n"); + ]]> + </programlisting> + + <para> + A PCI device is attached to a guest using the 'hostdevice' element. + The 'mode' attribute should always be set to 'subsystem', and the + 'type' attribute to 'pci'. The 'managed' attribute can be either + 'yes' or 'no' as required by the application. Within the 'hostdevice' + element there is a 'source' element and within that a further 'address' + element is used to specify the PCI device to be attached. The address + element expects attributes for 'domain', 'bus', 'slot' and 'function'. + This is easiest to see with a short example + </para> + + <programlisting> + <![CDATA[ + <hostdev mode='subsystem' type='pci' managed='yes'> + <source> + <address domain='0x0000' + bus='0x06' + slot='0x12' + function='0x5'/> + </source> + </hostdev> + ]]> + </programlisting> + + </section>
- if (virNodeDeviceReset(dev) < 0) { - fprintf(stderr, "Device cannot be safely reset without affecting other devices\n"); - return; - } + </section>
- fprintf(stderr, "Device is suitable for passthrough to a guest\n"); - ]]> - </programlisting> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config"> + <title>Live Configuration Change</title>
<para> - A PCI device is attached to a guest using the 'hostdevice' element. - The 'mode' attribute should always be set to 'subsystem', and the - 'type' attribute to 'pci'. The 'managed' attribute can be either - 'yes' or 'no' as required by the application. Within the 'hostdevice' - element there is a 'source' element and within that a further 'address' - element is used to specify the PCI device to be attached. The address - element expects attributes for 'domain', 'bus', 'slot' and 'function'. - This is easiest to see with a short example + TBD </para>
- <programlisting> - <![CDATA[ - <hostdev mode='subsystem' type='pci' managed='yes'> - <source> - <address domain='0x0000' - bus='0x06' - slot='0x12' - function='0x5'/> - </source> - </hostdev> - ]]> - </programlisting> - - </section> - - </section> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-Memory"> + <title>Memory Ballooning</title>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config"> - <title>Live configuration change</title> + <para> + TBD + </para>
- <para> - TBD - </para> + </section>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-Memory"> - <title>Memory ballooning</title> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-CPU"> + <title>CPU hotplug</title>
- <para> - TBD - </para> + <para> + TBD + </para>
- </section> + </section>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-CPU"> - <title>CPU hotplug</title> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-Device_Plug"> + <title>Device hotplug / unplug</title>
- <para> - TBD - </para> + <para> + TBD + </para>
- </section> + </section>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-Device_Plug"> - <title>Device hotplug / unplug</title> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-Device_Media"> + <title>Device media change</title>
- <para> - TBD - </para> + <para> + TBD + </para>
- </section> + </section>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-Device_Media"> - <title>Device media change</title> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-Block_Jobs"> + <title>Block Device Jobs</title>
- <para> - TBD - </para> + <para> + Libvirt provides a generic Block Job API that can be used to initiate + and manage operations on disks that belong to a domain. Jobs are + started by calling the function associated with the desired operation + (eg. <literal>virDomainBlockPull</literal>). Once started, all block + jobs are managed in the same manner. They can be aborted, throttled, + and queried. Upon completion, an asynchronous event is issued to + indicate the final status. + </para>
- </section> + <para> + The following block jobs can be started: + </para> + <orderedlist> + <listitem> + <para> + <literal>virDomainBlockPull()</literal> starts a block pull + operation for the specified disk. This operation is valid only for + specially configured disks. BlockPull will populate a disk image + with data from its backing image. Once all data from its backing + image has been pulled, the disk no longer depends on a backing + image. + </para> + </listitem> + </orderedlist>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Live_Config-Block_Jobs"> - <title>Block Device Jobs</title> + <para> + A disk can be queried for active block jobs by using + <literal>virDomainGetBlockJobInfo()</literal>. If found, job + information is reported in a structure that contains: the job type, + bandwidth throttling setting, and progress information. + </para>
- <para> - Libvirt provides a generic Block Job API that can be used to initiate - and manage operations on disks that belong to a domain. Jobs are - started by calling the function associated with the desired operation - (eg. <literal>virDomainBlockPull</literal>). Once started, all block - jobs are managed in the same manner. They can be aborted, throttled, - and queried. Upon completion, an asynchronous event is issued to - indicate the final status. - </para> + <para> + <literal>virDomainBlockJobAbort()</literal> can be used to cancel the + active block job on the specified disk. + </para>
- <para> - The following block jobs can be started: - </para> - <orderedlist> - <listitem> <para> - <literal>virDomainBlockPull()</literal> starts a block pull - operation for the specified disk. This operation is valid only for - specially configured disks. BlockPull will populate a disk image - with data from its backing image. Once all data from its backing - image has been pulled, the disk no longer depends on a backing - image. + Use <literal>virDomainBlockJobSetSpeed()</literal> to limit the amount + of bandwidth that a block job may consume. Bandwidth is specified in + units of MB/sec. </para> - </listitem> - </orderedlist>
- <para> - A disk can be queried for active block jobs by using - <literal>virDomainGetBlockJobInfo()</literal>. If found, job - information is reported in a structure that contains: the job type, - bandwidth throttling setting, and progress information. - </para> + <para> + When a block job operation completes, the final status is reported using + an asynchronous event. To receive this event, register a + <literal>virConnectDomainEventBlockJobCallback</literal> function which + will receive the disk, event type, and status as parameters. + </para>
- <para> - <literal>virDomainBlockJobAbort()</literal> can be used to cancel the - active block job on the specified disk. - </para> + <programlisting> + <![CDATA[/* example blockpull-example.c */ + /* compile with: gcc -g -Wall blockpull-example.c -o blockpull-example -lvirt */ + #include <stdio.h> + #include <stdlib.h> + #include <unistd.h> + #include <libvirt/libvirt.h> + + int do_cmd(const char *cmdline) + { + int status = system(cmdline); + if (status < 0) + return -1; + else + return WEXITSTATUS(status); + }
- <para> - Use <literal>virDomainBlockJobSetSpeed()</literal> to limit the amount - of bandwidth that a block job may consume. Bandwidth is specified in - units of MB/sec. - </para> + virDomainPtr make_domain(virConnectPtr conn) + { + virDomainPtr dom; + char domxml[] = \ + "<domain type='kvm'> \ + <name>example</name> \ + <memory>131072</memory> \ + <vcpu>1</vcpu> \ + <os> \ + <type arch='x86_64' machine='pc-0.13'>hvm</type> \ + </os> \ + <devices> \ + <disk type='file' device='disk'> \ + <driver name='qemu' type='qed'/> \ + <source file='/var/lib/libvirt/images/example.qed' /> \ + <target dev='vda' bus='virtio'/> \ + </disk> \ + </devices> \ + </domain>"; + + do_cmd("qemu-img create -f raw /var/lib/libvirt/images/backing.qed 100M"); + do_cmd("qemu-img create -f qed -b /var/lib/libvirt/images/backing.qed \ + /var/lib/libvirt/images/example.qed"); + + dom = virDomainCreateXML(conn, domxml, 0); + return dom; + }
- <para> - When a block job operation completes, the final status is reported using - an asynchronous event. To receive this event, register a - <literal>virConnectDomainEventBlockJobCallback</literal> function which - will receive the disk, event type, and status as parameters. - </para> + int main(int argc, char *argv[]) + { + virConnectPtr conn; + virDomainPtr dom = NULL; + char disk[] = "/var/lib/libvirt/images/example.qed";
- <programlisting> - <![CDATA[/* example blockpull-example.c */ - /* compile with: gcc -g -Wall blockpull-example.c -o blockpull-example -lvirt */ - #include <stdio.h> - #include <stdlib.h> - #include <unistd.h> - #include <libvirt/libvirt.h> - - int do_cmd(const char *cmdline) - { - int status = system(cmdline); - if (status < 0) - return -1; - else - return WEXITSTATUS(status); - } - - virDomainPtr make_domain(virConnectPtr conn) - { - virDomainPtr dom; - char domxml[] = \ - "<domain type='kvm'> \ - <name>example</name> \ - <memory>131072</memory> \ - <vcpu>1</vcpu> \ - <os> \ - <type arch='x86_64' machine='pc-0.13'>hvm</type> \ - </os> \ - <devices> \ - <disk type='file' device='disk'> \ - <driver name='qemu' type='qed'/> \ - <source file='/var/lib/libvirt/images/example.qed' /> \ - <target dev='vda' bus='virtio'/> \ - </disk> \ - </devices> \ - </domain>"; - - do_cmd("qemu-img create -f raw /var/lib/libvirt/images/backing.qed 100M"); - do_cmd("qemu-img create -f qed -b /var/lib/libvirt/images/backing.qed \ - /var/lib/libvirt/images/example.qed"); - - dom = virDomainCreateXML(conn, domxml, 0); - return dom; - } - - int main(int argc, char *argv[]) - { - virConnectPtr conn; - virDomainPtr dom = NULL; - char disk[] = "/var/lib/libvirt/images/example.qed"; - - conn = virConnectOpen("qemu:///system"); - if (conn == NULL) { - fprintf(stderr, "Failed to open connection to qemu:///system\n"); - goto error; - } + conn = virConnectOpen("qemu:///system"); + if (conn == NULL) { + fprintf(stderr, "Failed to open connection to qemu:///system\n"); + goto error; + }
- dom = make_domain(conn); - if (dom == NULL) { - fprintf(stderr, "Failed to create domain\n"); - goto error; - } + dom = make_domain(conn); + if (dom == NULL) { + fprintf(stderr, "Failed to create domain\n"); + goto error; + }
- if ((virDomainBlockPull(dom, disk, 0, 0)) < 0) { - fprintf(stderr, "Failed to start block pull"); - goto error; - } + if ((virDomainBlockPull(dom, disk, 0, 0)) < 0) { + fprintf(stderr, "Failed to start block pull"); + goto error; + }
- while (1) { - virDomainBlockJobInfo info; - int ret = virDomainGetBlockJobInfo(dom, disk, &info, 0); - - if (ret == 1) { - printf("BlockPull progress: %0.0f %%\n", - (float)(100 * info.cur / info.end)); - } else if (ret == 0) { - printf("BlockPull complete\n"); - break; - } else { - fprintf(stderr, "Failed to query block jobs\n"); - break; + while (1) { + virDomainBlockJobInfo info; + int ret = virDomainGetBlockJobInfo(dom, disk, &info, 0); + + if (ret == 1) { + printf("BlockPull progress: %0.0f %%\n", + (float)(100 * info.cur / info.end)); + } else if (ret == 0) { + printf("BlockPull complete\n"); + break; + } else { + fprintf(stderr, "Failed to query block jobs\n"); + break; + } + usleep(100000); } - usleep(100000); - }
- error: - unlink("/var/lib/libvirt/images/backing.qed"); - unlink("/var/lib/libvirt/images/example.qed"); - if (dom != NULL) { - virDomainDestroy(dom); - virDomainFree(dom); - } - if (conn != NULL) - virConnectClose(conn); - return 0; - }]]> - </programlisting> + error: + unlink("/var/lib/libvirt/images/backing.qed"); + unlink("/var/lib/libvirt/images/example.qed"); + if (dom != NULL) { + virDomainDestroy(dom); + virDomainFree(dom); + } + if (conn != NULL) + virConnectClose(conn); + return 0; + }]]> + </programlisting>
- </section> + </section>
</section>
<section id="libvirt_application_development_guide_using_python-Guest_Domains-Guest_Domains-Security"> - <title>Security model</title> + <title>Security Model</title>
- <para> - TBD - </para> + <para> + TBD + </para>
</section>
<section id="libvirt_application_development_guide_using_python-Guest_Domains-Event_Not"> - <title>Event notifications</title> + <title>Event Notifications</title>
- <para> - TBD - - </para> + <para> + TBD + </para>
</section>
<section id="libvirt_application_development_guide_using_python-Guest_Domains-Tuning"> - <title>Tuning</title> - - <para> - TBD - - </para> - - <section id="libvirt_application_development_guide_using_python-Guest_Domains-Tuning-Schedular"> - <title>Scheduler parameters</title> + <title>Tuning</title>
<para> - TBD - + TBD </para>
- </section> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Tuning-Schedular"> + <title>Scheduler Parameters</title>
- <section id="libvirt_application_development_guide_using_python-Guest_Domains-Tuning-NUMA"> - <title>NUMA placement</title> + <para> + TBD + </para>
- <para> - TBD + </section>
- </para> + <section id="libvirt_application_development_guide_using_python-Guest_Domains-Tuning-NUMA"> + <title>NUMA Placement</title>
- </section> + <para> + TBD + </para>
- </section> + </section> + + </section>
</chapter>