[xorg-x11-drv-intel/f21] backport some SNA and MST fixes.
Dave Airlie
airlied at fedoraproject.org
Thu Sep 11 04:37:40 UTC 2014
commit daf827e84c083b86c80c42abee9bba2e42066dc8
Author: Dave Airlie <airlied at redhat.com>
Date: Thu Sep 11 14:31:59 2014 +1000
backport some SNA and MST fixes.
sna-fixes.patch | 1059 +++++++++++++++++++++++++++++++++++++++++++++++
xorg-x11-drv-intel.spec | 8 +-
2 files changed, 1066 insertions(+), 1 deletions(-)
---
diff --git a/sna-fixes.patch b/sna-fixes.patch
new file mode 100644
index 0000000..4c92c04
--- /dev/null
+++ b/sna-fixes.patch
@@ -0,0 +1,1059 @@
+diff --git a/src/sna/Makefile.am b/src/sna/Makefile.am
+index 7ec1c36..a995b44 100644
+--- a/src/sna/Makefile.am
++++ b/src/sna/Makefile.am
+@@ -106,6 +106,7 @@ libsna_la_SOURCES = \
+ gen8_render.h \
+ gen8_vertex.c \
+ gen8_vertex.h \
++ xassert.h \
+ $(NULL)
+
+ if DRI2
+diff --git a/src/sna/brw/brw_eu.c b/src/sna/brw/brw_eu.c
+index 9bd8ba5..c5705f5 100644
+--- a/src/sna/brw/brw_eu.c
++++ b/src/sna/brw/brw_eu.c
+@@ -29,6 +29,10 @@
+ * Keith Whitwell <keith at tungstengraphics.com>
+ */
+
++#ifdef HAVE_CONFIG_H
++#include "config.h"
++#endif
++
+ #include "brw_eu.h"
+
+ #include <string.h>
+diff --git a/src/sna/brw/brw_eu.h b/src/sna/brw/brw_eu.h
+index 0124ac2..e8210a1 100644
+--- a/src/sna/brw/brw_eu.h
++++ b/src/sna/brw/brw_eu.h
+@@ -29,13 +29,13 @@
+ * Keith Whitwell <keith at tungstengraphics.com>
+ */
+
+-
+ #ifndef BRW_EU_H
+ #define BRW_EU_H
+
+ #include <stdbool.h>
+ #include <stdint.h>
+ #include <stdio.h>
++
+ #include <assert.h>
+
+ #define BRW_SWIZZLE4(a,b,c,d) (((a)<<0) | ((b)<<2) | ((c)<<4) | ((d)<<6))
+diff --git a/src/sna/kgem.h b/src/sna/kgem.h
+index c3a9b13..8bd5715 100644
+--- a/src/sna/kgem.h
++++ b/src/sna/kgem.h
+@@ -428,6 +428,13 @@ static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
+ kgem->mode = mode;
+ }
+
++static inline int kgem_batch_space(struct kgem *kgem)
++{
++ int rem = kgem->surface - kgem->nbatch;
++ assert(rem > 0);
++ return rem - KGEM_BATCH_RESERVED;
++}
++
+ static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
+ {
+ assert(num_dwords > 0);
+diff --git a/src/sna/sna.h b/src/sna/sna.h
+index bb0cbb2..ad74870 100644
+--- a/src/sna/sna.h
++++ b/src/sna/sna.h
+@@ -76,6 +76,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ #include <signal.h>
+ #include <setjmp.h>
+
++#include "xassert.h"
+ #include "compiler.h"
+
+ #if HAS_DEBUG_FULL
+diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
+index 7df522b..8a54442 100644
+--- a/src/sna/sna_accel.c
++++ b/src/sna/sna_accel.c
+@@ -2635,7 +2635,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
+ discard_gpu = false;
+ }
+ }
+- sna_damage_add(&priv->cpu_damage, region);
++ sna_damage_add_to_pixmap(&priv->cpu_damage, region, pixmap);
+
+ if (dx | dy)
+ RegionTranslate(region, -dx, -dy);
+@@ -2723,7 +2723,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
+ if (flags & MOVE_WRITE) {
+ if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
+ assert(!priv->clear);
+- sna_damage_add(&priv->gpu_damage, region);
++ sna_damage_add_to_pixmap(&priv->gpu_damage, region, pixmap);
+ if (sna_damage_is_all(&priv->gpu_damage,
+ pixmap->drawable.width,
+ pixmap->drawable.height)) {
+@@ -2800,7 +2800,7 @@ move_to_cpu:
+ if (flags & MOVE_WRITE) {
+ if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
+ assert(!priv->clear);
+- sna_damage_add(&priv->gpu_damage, region);
++ sna_damage_add_to_pixmap(&priv->gpu_damage, region, pixmap);
+ if (sna_damage_is_all(&priv->gpu_damage,
+ pixmap->drawable.width,
+ pixmap->drawable.height)) {
+@@ -3040,7 +3040,7 @@ done:
+ DBG(("%s: applying cpu damage\n", __FUNCTION__));
+ assert(!DAMAGE_IS_ALL(priv->cpu_damage));
+ assert_pixmap_contains_box(pixmap, RegionExtents(region));
+- sna_damage_add(&priv->cpu_damage, region);
++ sna_damage_add_to_pixmap(&priv->cpu_damage, region, pixmap);
+ sna_damage_reduce_all(&priv->cpu_damage, pixmap);
+ if (DAMAGE_IS_ALL(priv->cpu_damage)) {
+ DBG(("%s: replaced entire pixmap\n", __FUNCTION__));
+@@ -4828,7 +4828,7 @@ done:
+ if (replaces) {
+ sna_damage_all(&priv->gpu_damage, pixmap);
+ } else {
+- sna_damage_add(&priv->gpu_damage, region);
++ sna_damage_add_to_pixmap(&priv->gpu_damage, region, pixmap);
+ sna_damage_reduce_all(&priv->gpu_damage, pixmap);
+ }
+ if (DAMAGE_IS_ALL(priv->gpu_damage))
+@@ -4916,7 +4916,7 @@ try_upload__blt(PixmapPtr pixmap, RegionRec *region,
+ if (region_subsumes_drawable(region, &pixmap->drawable)) {
+ sna_damage_all(&priv->gpu_damage, pixmap);
+ } else {
+- sna_damage_add(&priv->gpu_damage, region);
++ sna_damage_add_to_pixmap(&priv->gpu_damage, region, pixmap);
+ sna_damage_reduce_all(&priv->gpu_damage, pixmap);
+ }
+ if (DAMAGE_IS_ALL(priv->gpu_damage))
+@@ -5121,7 +5121,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
+
+ assert_pixmap_contains_box(pixmap, RegionExtents(region));
+ if (damage)
+- sna_damage_add(damage, region);
++ sna_damage_add_to_pixmap(damage, region, pixmap);
+ assert_pixmap_damage(pixmap);
+
+ DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
+@@ -5281,7 +5281,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
+
+ assert_pixmap_contains_box(pixmap, RegionExtents(region));
+ if (damage)
+- sna_damage_add(damage, region);
++ sna_damage_add_to_pixmap(damage, region, pixmap);
+ assert_pixmap_damage(pixmap);
+
+ DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
+@@ -5759,7 +5759,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+ sna_damage_all(&priv->gpu_damage, pixmap);
+ } else {
+ RegionTranslate(region, tx, ty);
+- sna_damage_add(&priv->gpu_damage, region);
++ sna_damage_add_to_pixmap(&priv->gpu_damage, region, pixmap);
+ }
+ }
+ assert_pixmap_damage(pixmap);
+@@ -6118,7 +6118,7 @@ upload_inplace:
+
+ if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
+ assert(!dst_priv->clear);
+- sna_damage_add(&dst_priv->gpu_damage, region);
++ sna_damage_add_to_pixmap(&dst_priv->gpu_damage, region, dst_pixmap);
+ if (sna_damage_is_all(&dst_priv->gpu_damage,
+ dst_pixmap->drawable.width,
+ dst_pixmap->drawable.height)) {
+@@ -6369,7 +6369,7 @@ discard_cow:
+ }
+
+ if (damage)
+- sna_damage_add(damage, region);
++ sna_damage_add_to_pixmap(damage, region, dst_pixmap);
+ return;
+ }
+
+@@ -6406,7 +6406,7 @@ discard_cow:
+ }
+
+ if (damage)
+- sna_damage_add(damage, region);
++ sna_damage_add_to_pixmap(damage, region, dst_pixmap);
+ return;
+ }
+
+@@ -6443,7 +6443,7 @@ discard_cow:
+ }
+
+ if (damage)
+- sna_damage_add(damage, region);
++ sna_damage_add_to_pixmap(damage, region, dst_pixmap);
+ return;
+ }
+
+@@ -6486,7 +6486,7 @@ discard_cow:
+ }
+
+ if (damage)
+- sna_damage_add(damage, region);
++ sna_damage_add_to_pixmap(damage, region, dst_pixmap);
+ return;
+ }
+
+@@ -6540,7 +6540,7 @@ discard_cow:
+
+ if (ok) {
+ if (damage)
+- sna_damage_add(damage, region);
++ sna_damage_add_to_pixmap(damage, region, dst_pixmap);
+ return;
+ }
+ }
+@@ -6618,7 +6618,7 @@ discard_cow:
+ tmp->drawable.pScreen->DestroyPixmap(tmp);
+
+ if (damage)
+- sna_damage_add(damage, region);
++ sna_damage_add_to_pixmap(damage, region, dst_pixmap);
+ return;
+ } else {
+ DBG(("%s: dst is on the GPU, src is on the CPU, uploading into dst\n",
+@@ -8294,7 +8294,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
+
+ if (arg->damage) {
+ RegionTranslate(region, dx, dy);
+- sna_damage_add(arg->damage, region);
++ sna_damage_add_to_pixmap(arg->damage, region, pixmap);
+ }
+ assert_pixmap_damage(pixmap);
+ sna->blt_state.fill_bo = 0;
+@@ -8526,7 +8526,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
+
+ if (arg->damage) {
+ RegionTranslate(region, dx, dy);
+- sna_damage_add(arg->damage, region);
++ sna_damage_add_to_pixmap(arg->damage, region, dst_pixmap);
+ }
+ assert_pixmap_damage(dst_pixmap);
+ sna->blt_state.fill_bo = 0;
+@@ -9917,7 +9917,7 @@ spans_fallback:
+ if (data.dx | data.dy)
+ pixman_region_translate(&data.region, data.dx, data.dy);
+ assert_pixmap_contains_box(data.pixmap, &data.region.extents);
+- sna_damage_add(data.damage, &data.region);
++ sna_damage_add_to_pixmap(data.damage, &data.region, data.pixmap);
+ assert_pixmap_damage(data.pixmap);
+ }
+ RegionUninit(&data.region);
+@@ -10765,7 +10765,7 @@ spans_fallback:
+ if (data.dx | data.dy)
+ pixman_region_translate(&data.region, data.dx, data.dy);
+ assert_pixmap_contains_box(data.pixmap, &data.region.extents);
+- sna_damage_add(data.damage, &data.region);
++ sna_damage_add_to_pixmap(data.damage, &data.region, data.pixmap);
+ }
+ assert_pixmap_damage(data.pixmap);
+ RegionUninit(&data.region);
+@@ -11576,7 +11576,7 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
+ if (data.dx | data.dy)
+ pixman_region_translate(&data.region, data.dx, data.dy);
+ assert_pixmap_contains_box(data.pixmap, &data.region.extents);
+- sna_damage_add(data.damage, &data.region);
++ sna_damage_add_to_pixmap(data.damage, &data.region, data.pixmap);
+ }
+ assert_pixmap_damage(data.pixmap);
+ RegionUninit(&data.region);
+@@ -11932,7 +11932,7 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
+ if (data.dx | data.dy)
+ pixman_region_translate(&data.region, data.dx, data.dy);
+ assert_pixmap_contains_box(data.pixmap, &data.region.extents);
+- sna_damage_add(data.damage, &data.region);
++ sna_damage_add_to_pixmap(data.damage, &data.region, data.pixmap);
+ }
+ assert_pixmap_damage(data.pixmap);
+ RegionUninit(&data.region);
+@@ -12156,7 +12156,7 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
+ sna->kgem.nbatch += 6;
+ }
+ } else do {
+- int n_this_time;
++ int n_this_time, rem;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+ b = sna->kgem.batch + sna->kgem.nbatch;
+@@ -12199,8 +12199,9 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
+ }
+
+ n_this_time = n;
+- if (3*n_this_time > sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED)
+- n_this_time = (sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED) / 3;
++ rem = kgem_batch_space(&sna->kgem);
++ if (3*n_this_time > rem)
++ n_this_time = rem / 3;
+ assert(n_this_time);
+ n -= n_this_time;
+
+@@ -13061,7 +13062,7 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
+ sna->kgem.nbatch += 9;
+ }
+ } else do {
+- int n_this_time;
++ int n_this_time, rem;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+ b = sna->kgem.batch + sna->kgem.nbatch;
+@@ -13099,8 +13100,9 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
+ }
+
+ n_this_time = n;
+- if (3*n_this_time > sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED)
+- n_this_time = (sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED) / 3;
++ rem = kgem_batch_space(&sna->kgem);
++ if (3*n_this_time > rem)
++ n_this_time = rem / 3;
+ assert(n_this_time);
+ n -= n_this_time;
+
+@@ -14871,17 +14873,17 @@ sna_poly_fill_rect__gpu(DrawablePtr draw, GCPtr gc, int n, xRectangle *r)
+
+ if (gc_is_solid(gc, &color)) {
+ (void)sna_poly_fill_rect_blt(draw,
+- data->bo, data->damage,
++ data->bo, NULL,
+ gc, color, n, r,
+ &data->region.extents, true);
+ } else if (gc->fillStyle == FillTiled) {
+ (void)sna_poly_fill_rect_tiled_blt(draw,
+- data->bo, data->damage,
++ data->bo, NULL,
+ gc, n, r,
+ &data->region.extents, true);
+ } else {
+ (void)sna_poly_fill_rect_stippled_blt(draw,
+- data->bo, data->damage,
++ data->bo, NULL,
+ gc, n, r,
+ &data->region.extents, true);
+ }
+@@ -14985,7 +14987,7 @@ sna_poly_fill_arc(DrawablePtr draw, GCPtr gc, int n, xArc *arc)
+ if (data.dx | data.dy)
+ pixman_region_translate(&data.region, data.dx, data.dy);
+ assert_pixmap_contains_box(data.pixmap, &data.region.extents);
+- sna_damage_add(data.damage, &data.region);
++ sna_damage_add_to_pixmap(data.damage, &data.region, data.pixmap);
+ }
+ assert_pixmap_damage(data.pixmap);
+ RegionUninit(&data.region);
+@@ -16305,7 +16307,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
+
+ assert_pixmap_contains_box(pixmap, RegionExtents(region));
+ if (damage)
+- sna_damage_add(damage, region);
++ sna_damage_add_to_pixmap(damage, region, pixmap);
+ assert_pixmap_damage(pixmap);
+
+ DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__,
+diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
+index 4538f98..72f7f67 100644
+--- a/src/sna/sna_blt.c
++++ b/src/sna/sna_blt.c
+@@ -176,7 +176,8 @@ static bool sna_blt_fill_init(struct sna *sna,
+ {
+ uint32_t *b;
+
+- if (!kgem_check_reloc(kgem, 1)) {
++ if (!kgem_check_batch(kgem, 24) ||
++ !kgem_check_reloc(kgem, 1)) {
+ _kgem_submit(kgem);
+ if (!kgem_check_bo_fenced(kgem, bo))
+ return false;
+@@ -232,6 +233,7 @@ static bool sna_blt_fill_init(struct sna *sna,
+ sna->blt_state.fill_alu = alu;
+ }
+
++ assert(sna->kgem.mode == KGEM_BLT);
+ return true;
+ }
+
+@@ -1102,13 +1104,16 @@ inline static void _sna_blt_fill_boxes(struct sna *sna,
+
+ do {
+ uint32_t *b = kgem->batch + kgem->nbatch;
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+ nbox_this_time = nbox;
+- if (3*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 3;
+- assert(nbox_this_time);
++ rem = kgem_batch_space(kgem);
++ if (3*nbox_this_time > rem)
++ nbox_this_time = rem / 3;
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ kgem->nbatch += 3 * nbox_this_time;
+@@ -1198,13 +1203,16 @@ static void blt_composite_fill_boxes_no_offset__thread(struct sna *sna,
+
+ do {
+ uint32_t *b = kgem->batch + kgem->nbatch;
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+ nbox_this_time = nbox;
+- if (3*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 3;
+- assert(nbox_this_time);
++ rem = kgem_batch_space(kgem);
++ if (3*nbox_this_time > rem)
++ nbox_this_time = rem / 3;
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ kgem->nbatch += 3 * nbox_this_time;
+@@ -1310,13 +1318,16 @@ static void blt_composite_fill_boxes__thread(struct sna *sna,
+
+ do {
+ uint32_t *b = kgem->batch + kgem->nbatch;
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+ nbox_this_time = nbox;
+- if (3*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 3;
+- assert(nbox_this_time);
++ rem = kgem_batch_space(kgem);
++ if (3*nbox_this_time > rem)
++ nbox_this_time = rem / 3;
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ kgem->nbatch += 3 * nbox_this_time;
+@@ -1386,6 +1397,7 @@ static bool
+ begin_blt(struct sna *sna,
+ struct sna_composite_op *op)
+ {
++ assert(sna->kgem.mode == KGEM_BLT);
+ if (!kgem_check_bo_fenced(&sna->kgem, op->dst.bo)) {
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, op->dst.bo))
+@@ -1603,14 +1615,17 @@ static void blt_composite_copy_boxes__thread(struct sna *sna,
+ if ((dst_dx | dst_dy) == 0) {
+ uint64_t hdr = (uint64_t)br13 << 32 | cmd;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (8*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+- assert(nbox_this_time);
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+@@ -1656,14 +1671,17 @@ static void blt_composite_copy_boxes__thread(struct sna *sna,
+ } while (1);
+ } else {
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (8*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+- assert(nbox_this_time);
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+@@ -1733,14 +1751,17 @@ static void blt_composite_copy_boxes__thread64(struct sna *sna,
+ if ((dst_dx | dst_dy) == 0) {
+ uint64_t hdr = (uint64_t)br13 << 32 | cmd;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (10*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 10;
++ rem = kgem_batch_space(kgem);
++ if (10*nbox_this_time > rem)
++ nbox_this_time = rem / 10;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+- assert(nbox_this_time);
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ assert(kgem->mode == KGEM_BLT);
+@@ -1788,14 +1809,17 @@ static void blt_composite_copy_boxes__thread64(struct sna *sna,
+ } while (1);
+ } else {
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (10*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 10;
++ rem = kgem_batch_space(kgem);
++ if (10*nbox_this_time > rem)
++ nbox_this_time = rem / 10;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+- assert(nbox_this_time);
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ assert(kgem->mode == KGEM_BLT);
+@@ -3124,12 +3148,13 @@ fastcall static void sna_blt_fill_op_points(struct sna *sna,
+
+ do {
+ uint32_t *b = kgem->batch + kgem->nbatch;
+- int n_this_time;
++ int n_this_time, rem;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+ n_this_time = n;
+- if (2*n_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- n_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 2;
++ rem = kgem_batch_space(kgem);
++ if (2*n_this_time > rem)
++ n_this_time = rem / 2;
+ assert(n_this_time);
+ n -= n_this_time;
+
+@@ -3226,6 +3251,7 @@ bool sna_blt_fill(struct sna *sna, uint8_t alu,
+ bo, bpp, alu, pixel))
+ return false;
+
++ assert(sna->kgem.mode == KGEM_BLT);
+ fill->blt = sna_blt_fill_op_blt;
+ fill->box = sna_blt_fill_op_box;
+ fill->boxes = sna_blt_fill_op_boxes;
+@@ -3486,7 +3512,8 @@ bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
+ {
+ uint32_t *b;
+
+- if (!kgem_check_reloc(kgem, 1)) {
++ if (!kgem_check_batch(kgem, 24) ||
++ !kgem_check_reloc(kgem, 1)) {
+ _kgem_submit(kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
+@@ -3543,12 +3570,15 @@ bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
+ }
+
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (3*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 3;
+- assert(nbox_this_time);
++ rem = kgem_batch_space(kgem);
++ if (3*nbox_this_time > rem)
++ nbox_this_time = rem / 3;
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+@@ -3622,6 +3652,7 @@ bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
+ kgem->nbatch += 9;
+ }
+ assert(kgem->nbatch < kgem->surface);
++ assert(kgem_check_batch(kgem, 3));
+ }
+ } while (nbox);
+
+@@ -3728,14 +3759,17 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
+ if (kgem->gen >= 0100) {
+ uint64_t hdr = (uint64_t)br13 << 32 | cmd | 8;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (10*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (10*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+- assert(nbox_this_time);
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+@@ -3784,14 +3818,17 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
+ } else {
+ uint64_t hdr = (uint64_t)br13 << 32 | cmd | 6;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (8*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+- assert(nbox_this_time);
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+@@ -3840,14 +3877,17 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
+ if (kgem->gen >= 0100) {
+ cmd |= 8;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (10*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (10*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+- assert(nbox_this_time);
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+@@ -3896,14 +3936,17 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
+ } else {
+ cmd |= 6;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (8*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+- assert(nbox_this_time);
++ DBG(("%s: emitting %d boxes out of %d (batch space %d)\n",
++ __FUNCTION__, nbox_this_time, nbox, rem));
++ assert(nbox_this_time > 0);
+ nbox -= nbox_this_time;
+
+ assert(sna->kgem.mode == KGEM_BLT);
+diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
+index a55f859..272e83b 100644
+--- a/src/sna/sna_damage.h
++++ b/src/sna/sna_damage.h
+@@ -29,6 +29,25 @@ struct sna_damage {
+
+ struct sna_damage *sna_damage_create(void);
+
++struct sna_damage *__sna_damage_all(struct sna_damage *damage,
++ int width, int height);
++static inline struct sna_damage *
++_sna_damage_all(struct sna_damage *damage,
++ int width, int height)
++{
++ damage = __sna_damage_all(damage, width, height);
++ return DAMAGE_MARK_ALL(damage);
++}
++
++static inline void sna_damage_all(struct sna_damage **damage,
++ PixmapPtr pixmap)
++{
++ if (!DAMAGE_IS_ALL(*damage))
++ *damage = _sna_damage_all(*damage,
++ pixmap->drawable.width,
++ pixmap->drawable.height);
++}
++
+ struct sna_damage *_sna_damage_combine(struct sna_damage *l,
+ struct sna_damage *r,
+ int dx, int dy);
+@@ -49,6 +68,24 @@ static inline void sna_damage_add(struct sna_damage **damage,
+ *damage = _sna_damage_add(*damage, region);
+ }
+
++static inline bool sna_damage_add_to_pixmap(struct sna_damage **damage,
++ RegionPtr region,
++ PixmapPtr pixmap)
++{
++ assert(!DAMAGE_IS_ALL(*damage));
++ if (region->data == NULL &&
++ region->extents.x2 - region->extents.x1 >= pixmap->drawable.width &&
++ region->extents.y2 - region->extents.y1 >= pixmap->drawable.height) {
++ *damage = _sna_damage_all(*damage,
++ pixmap->drawable.width,
++ pixmap->drawable.height);
++ return true;
++ } else {
++ *damage = _sna_damage_add(*damage, region);
++ return false;
++ }
++}
++
+ fastcall struct sna_damage *_sna_damage_add_box(struct sna_damage *damage,
+ const BoxRec *box);
+ static inline void sna_damage_add_box(struct sna_damage **damage,
+@@ -131,25 +168,6 @@ static inline bool sna_damage_is_all(struct sna_damage **_damage,
+ }
+ }
+
+-struct sna_damage *__sna_damage_all(struct sna_damage *damage,
+- int width, int height);
+-static inline struct sna_damage *
+-_sna_damage_all(struct sna_damage *damage,
+- int width, int height)
+-{
+- damage = __sna_damage_all(damage, width, height);
+- return DAMAGE_MARK_ALL(damage);
+-}
+-
+-static inline void sna_damage_all(struct sna_damage **damage,
+- PixmapPtr pixmap)
+-{
+- if (!DAMAGE_IS_ALL(*damage))
+- *damage = _sna_damage_all(*damage,
+- pixmap->drawable.width,
+- pixmap->drawable.height);
+-}
+-
+ fastcall struct sna_damage *_sna_damage_subtract(struct sna_damage *damage,
+ RegionPtr region);
+ static inline void sna_damage_subtract(struct sna_damage **damage,
+diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
+index a14f789..6e80d1b 100644
+--- a/src/sna/sna_display.c
++++ b/src/sna/sna_display.c
+@@ -3492,7 +3492,7 @@ static int name_from_path(struct sna *sna,
+ char *name)
+ {
+ struct drm_mode_get_blob blob;
+- char buf[32], *path = buf;
++ char *path;
+ int id;
+
+ id = find_property(sna, sna_output, "PATH");
+@@ -3502,20 +3502,19 @@ static int name_from_path(struct sna *sna,
+
+ VG_CLEAR(blob);
+ blob.blob_id = sna_output->prop_values[id];
+- blob.length = sizeof(buf)-1;
+- blob.data = (uintptr_t)path;
+- VG(memset(path, 0, blob.length));
++ blob.length = 0;
+ if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob))
+ return 0;
+
+- if (blob.length >= sizeof(buf)) {
+- path = alloca(blob.length + 1);
++ do {
++ id = blob.length;
++ path = alloca(id + 1);
+ blob.data = (uintptr_t)path;
+- VG(memset(path, 0, blob.length));
+- DBG(("%s: reading %d bytes for path blob\n", __FUNCTION__, blob.length));
++ VG(memset(path, 0, id));
++ DBG(("%s: reading %d bytes for path blob\n", __FUNCTION__, id));
+ if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob))
+ return 0;
+- }
++ } while (id != blob.length);
+
+ path[blob.length] = '\0'; /* paranoia */
+ DBG(("%s: PATH='%s'\n", __FUNCTION__, path));
+@@ -3540,7 +3539,8 @@ static int name_from_path(struct sna *sna,
+
+ for (n = 0; n < sna->mode.num_real_output; n++) {
+ if (to_sna_output(config->output[n])->id == id)
+- return snprintf(name, 32, "%s-%s", config->output[n]->name, c + 1);
++ return snprintf(name, 32, "%s-%s",
++ config->output[n]->name, c + 1);
+ }
+ }
+
+diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
+index 66f72dc..7eed214 100644
+--- a/src/sna/sna_driver.c
++++ b/src/sna/sna_driver.c
+@@ -37,7 +37,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
+ #include "config.h"
+ #endif
+
+-#include <assert.h>
+ #include <string.h>
+ #include <stdio.h>
+ #include <unistd.h>
+@@ -264,26 +263,20 @@ static Bool sna_create_screen_resources(ScreenPtr screen)
+ assert(sna->front == new_front);
+ screen->DestroyPixmap(new_front); /* transfer ownership to screen */
+
+- if (intel_get_master(sna->scrn)) {
+- xf86DrvMsg(screen->myNum, X_ERROR,
+- "[intel] Failed to become DRM master\n");
+- screen->DestroyPixmap(sna->front);
+- sna->front = NULL;
+- return FALSE;
+- }
+-
+ sna_mode_set_primary(sna);
+
+- /* Only preserve the fbcon, not any subsequent server regens */
+- if (serverGeneration == 1 && (sna->flags & SNA_IS_HOSTED) == 0)
+- sna_copy_fbcon(sna);
++ /* Try to become master and copy the current fbcon before the
++ * actual VT switch. If we fail here, we will try to reset the
++ * mode in the eventual VT switch. This can fail if systemd has
++ * already revoked our KMS privileges, so just carry on regardless,
++ * and hope that everything is sorted after the VT switch.
++ */
++ if (intel_get_master(sna->scrn) == 0) {
++ /* Only preserve the fbcon, not any subsequent server regens */
++ if (serverGeneration == 1 && (sna->flags & SNA_IS_HOSTED) == 0)
++ sna_copy_fbcon(sna);
+
+- if (!sna_set_desired_mode(sna)) {
+- xf86DrvMsg(screen->myNum, X_ERROR,
+- "[intel] Failed to set initial mode\n");
+- screen->DestroyPixmap(sna->front);
+- sna->front = NULL;
+- return FALSE;
++ (void)sna_set_desired_mode(sna);
+ }
+
+ return TRUE;
+diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
+index 94d702a..a4052c6 100644
+--- a/src/sna/sna_glyphs.c
++++ b/src/sna/sna_glyphs.c
+@@ -727,7 +727,7 @@ next_glyph:
+ }
+ list++;
+ }
+- if (glyph_atlas)
++ if (glyph_atlas != NO_ATLAS)
+ tmp.done(sna, &tmp);
+
+ return true;
+diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
+index 51d3af0..6eede4d 100644
+--- a/src/sna/sna_io.c
++++ b/src/sna/sna_io.c
+@@ -484,11 +484,12 @@ fallback:
+ if (sna->kgem.gen >= 0100) {
+ cmd |= 8;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = tmp_nbox;
+- if (10*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (10*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc) / 2;
+ assert(nbox_this_time);
+@@ -543,11 +544,11 @@ fallback:
+ } else {
+ cmd |= 6;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = tmp_nbox;
+- if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ if (8*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc) / 2;
+ assert(nbox_this_time);
+@@ -1029,11 +1030,12 @@ tile:
+ if (kgem->gen >= 0100) {
+ cmd |= 8;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (10*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (10*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc) / 2;
+ assert(nbox_this_time);
+@@ -1122,11 +1124,12 @@ tile:
+ } else {
+ cmd |= 6;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (8*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc) / 2;
+ assert(nbox_this_time);
+@@ -1530,11 +1533,12 @@ tile:
+ if (sna->kgem.gen >= 0100) {
+ cmd |= 8;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (10*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (10*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc) / 2;
+ assert(nbox_this_time);
+@@ -1627,11 +1631,12 @@ tile:
+ } else {
+ cmd |= 6;
+ do {
+- int nbox_this_time;
++ int nbox_this_time, rem;
+
+ nbox_this_time = nbox;
+- if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+- nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
++ rem = kgem_batch_space(kgem);
++ if (8*nbox_this_time > rem)
++ nbox_this_time = rem / 8;
+ if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+ nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc) / 2;
+ assert(nbox_this_time);
+diff --git a/src/sna/sna_trapezoids_boxes.c b/src/sna/sna_trapezoids_boxes.c
+index a2045dc..9900e3f 100644
+--- a/src/sna/sna_trapezoids_boxes.c
++++ b/src/sna/sna_trapezoids_boxes.c
+@@ -85,7 +85,8 @@ static void apply_damage(struct sna_composite_op *op, RegionPtr region)
+ RegionTranslate(region, op->dst.x, op->dst.y);
+
+ assert_pixmap_contains_box(op->dst.pixmap, RegionExtents(region));
+- sna_damage_add(op->damage, region);
++ if (sna_damage_add_to_pixmap(op->damage, region, op->dst.pixmap))
++ op->damage = NULL;
+ }
+
+ static void _apply_damage_box(struct sna_composite_op *op, const BoxRec *box)
+diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
+index 80760ae..ed0e7b3 100644
+--- a/src/sna/sna_video.c
++++ b/src/sna/sna_video.c
+@@ -48,7 +48,6 @@
+ #include <inttypes.h>
+ #include <math.h>
+ #include <string.h>
+-#include <assert.h>
+ #include <errno.h>
+
+ #include <sys/mman.h>
+diff --git a/src/sna/xassert.h b/src/sna/xassert.h
+new file mode 100644
+index 0000000..7ab2591
+--- /dev/null
++++ b/src/sna/xassert.h
+@@ -0,0 +1,42 @@
++/*
++ * Copyright (c) 2014 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#ifndef __XASSERT_H___
++#define __XASSERT_H__
++
++/* Rewrap the traditional assert so that we can capture the error message
++ * via Xorg.0.log
++ */
++
++#include <assert.h>
++
++#ifndef NDEBUG
++#include <os.h>
++#include "compiler.h"
++#undef assert
++#define assert(E) do { \
++ if (unlikely(!(E))) FatalError("%s:%d assertion '%s' failed\n", __func__, __LINE__, #E); \
++} while (0)
++#endif
++
++#endif /* __XASSERT_H__ */
diff --git a/xorg-x11-drv-intel.spec b/xorg-x11-drv-intel.spec
index 68c98be..6af6ecd 100644
--- a/xorg-x11-drv-intel.spec
+++ b/xorg-x11-drv-intel.spec
@@ -26,7 +26,7 @@
Summary: Xorg X11 Intel video driver
Name: xorg-x11-drv-intel
Version: 2.99.916
-Release: 1%{?gitrev}%{?dist}
+Release: 2%{?gitrev}%{?dist}
URL: http://www.x.org
License: MIT
Group: User Interface/X Hardware Support
@@ -40,6 +40,8 @@ Source1: make-intel-gpu-tools-snapshot.sh
Source3: http://xorg.freedesktop.org/archive/individual/app/intel-gpu-tools-%{gputoolsver}.tar.bz2
Source4: make-git-snapshot.sh
Patch1: 0001-sna-dri3-Mesa-relies-upon-implicit-fences.patch
+Patch2: sna-fixes.patch
+
ExclusiveArch: %{ix86} x86_64 ia64
@@ -89,6 +91,7 @@ Debugging tools for Intel graphics chips
%setup -q -n xf86-video-intel-%{?gitdate:%{gitdate}}%{!?gitdate:%{dirsuffix}} -b3
%patch1 -p1
+%patch2 -p1 -b .snafix
%build
%configure %{?kmsonly:--enable-kms-only}
make %{?_smp_mflags}
@@ -147,6 +150,9 @@ rm -f $RPM_BUILD_ROOT%{_libdir}/libI*XvMC.so
%{_mandir}/man1/intel_*.1*
%changelog
+* Thu Sep 11 2014 Dave Airlie <airlied at redhat.com> 2.99.916-2
+- backport some SNA and MST fixes.
+
* Wed Sep 10 2014 Dave Airlie <airlied at redhat.com> 2.99.916-1
- Rebase to 2.99.916
More information about the scm-commits
mailing list