[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

xserver-xorg-video-intel: Changes to 'upstream-experimental'



 NEWS                                         |   28 ++-
 configure.ac                                 |    8 
 src/intel_module.c                           |    9 +
 src/render_program/exa_wm_src_projective.g7a |    4 
 src/render_program/exa_wm_src_projective.g7b |    2 
 src/sna/compiler.h                           |    2 
 src/sna/gen4_render.c                        |   17 +
 src/sna/kgem.c                               |   22 +-
 src/sna/sna.h                                |   13 -
 src/sna/sna_accel.c                          |   32 ++-
 src/sna/sna_composite.c                      |    1 
 src/sna/sna_display.c                        |    1 
 src/sna/sna_dri.c                            |  157 +++++++++++++----
 src/sna/sna_render.c                         |   13 -
 src/sna/sna_trapezoids.c                     |  241 +++++++--------------------
 src/sna/sna_video.c                          |   10 -
 test/.gitignore                              |    1 
 17 files changed, 295 insertions(+), 266 deletions(-)

New commits:
commit d2442c74b8d41018f260f1da13f3fe5d2795792f
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Wed Feb 20 10:53:57 2013 +0000

    2.21.3 release

diff --git a/NEWS b/NEWS
index f1951a7..4c088bd 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,25 @@
+Release 2.21.3 (2013-02-20)
+===========================
+A few minor bugfixes, another point release.
+
+ * Fix tracking of DRI pixmaps and their backing bo across reparenting. If
+   we tried to execute a SwapBuffers after a Window was reparented, but
+   before the DRI client has updated its references, then we would end up
+   manipulating an exported pixmap without a flush flag set. In the worst
+   case, this would culminate in a segfault in the driver.
+   https://bugs.launchpad.net/ubuntu/+source/xserver-xorg-video-intel/+bug/1127497
+
+ * Restore the gen4 workarounds for flickering rendering - a few cases still
+   remain, as the root cause persists.
+   https://bugs.freedesktop.org/show_bug.cgi?id=60402
+
+ * Double check that the device has KMS enabled before claiming. This allows
+   X to gracefully fallback to VESA/fbdev rather than bailing out.
+   https://bugs.freedesktop.org/show_bug.cgi?id=60987
+
+ * Fix the UXA render programs for projective transforms on Ivybridge.
+
+
 Release 2.21.2 (2013-02-10)
 ===========================
 Pass the brown paper bags, I need half a dozen or so. That seemingly
diff --git a/configure.ac b/configure.ac
index 6aa0e6c..97daee6 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.21.2],
+        [2.21.3],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])

commit 2cab7e80eb6955a7f8ea051633f6975a12248b69
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Wed Feb 20 12:36:22 2013 +0000

    sna/trapezoids: Clamp cells to valid range
    
    Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index d0e1bd1..9396baf 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -262,6 +262,7 @@ struct cell_list {
 	/* Points to the left-most cell in the scan line. */
 	struct cell head, tail;
 
+	int16_t x1, x2;
 	int16_t count, size;
 	struct cell *cells;
 	struct cell embedded[256];
@@ -331,7 +332,7 @@ cell_list_rewind(struct cell_list *cells)
 }
 
 static bool
-cell_list_init(struct cell_list *cells, int width)
+cell_list_init(struct cell_list *cells, int x1, int x2)
 {
 	cells->tail.next = NULL;
 	cells->tail.x = INT_MAX;
@@ -339,7 +340,9 @@ cell_list_init(struct cell_list *cells, int width)
 	cells->head.next = &cells->tail;
 	cell_list_rewind(cells);
 	cells->count = 0;
-	cells->size = width+1;
+	cells->x1 = x1;
+	cells->x2 = x2;
+	cells->size = x2 - x1 + 1;
 	cells->cells = cells->embedded;
 	if (cells->size > ARRAY_SIZE(cells->embedded))
 		cells->cells = malloc(cells->size * sizeof(struct cell));
@@ -392,6 +395,15 @@ cell_list_find(struct cell_list *cells, int x)
 	if (tail->x == x)
 		return tail;
 
+	if (x >= cells->x2)
+		return &cells->tail;
+
+	if (x < cells->x1)
+		x = cells->x1;
+
+	if (tail->x == x)
+		return tail;
+
 	do {
 		if (tail->next->x > x)
 			break;
@@ -980,7 +992,7 @@ tor_init(struct tor *converter, const BoxRec *box, int num_edges)
 	converter->xmax = box->x2;
 	converter->ymax = box->y2;
 
-	if (!cell_list_init(converter->coverages, box->x2 - box->x1))
+	if (!cell_list_init(converter->coverages, box->x1, box->x2))
 		return false;
 
 	active_list_reset(converter->active);
@@ -1135,32 +1147,21 @@ tor_blt(struct sna *sna,
 	int xmin, int xmax,
 	int unbounded)
 {
-	struct cell *cell = cells->head.next;
+	struct cell *cell;
 	BoxRec box;
-	int cover = 0;
-
-	/* Skip cells to the left of the clip region. */
-	while (cell->x < xmin) {
-		__DBG(("%s: skipping cell (%d, %d, %d)\n",
-		       __FUNCTION__,
-		       cell->x, cell->covered_height, cell->uncovered_area));
-
-		cover += cell->covered_height;
-		cell = cell->next;
-	}
-	cover *= FAST_SAMPLES_X*2;
+	int cover;
 
 	box.y1 = y;
 	box.y2 = y + height;
 	box.x1 = xmin;
 
 	/* Form the spans from the coverages and areas. */
-	for (; cell != NULL; cell = cell->next) {
+	cover = 0;
+	for (cell = cells->head.next; cell != &cells->tail; cell = cell->next) {
 		int x = cell->x;
 
-		if (x >= xmax)
-			break;
-
+		assert(x >= xmin);
+		assert(x < xmax);
 		__DBG(("%s: cell=(%d, %d, %d), cover=%d, max=%d\n", __FUNCTION__,
 		       cell->x, cell->covered_height, cell->uncovered_area,
 		       cover, xmax));

commit 14de90b251dd8a6ff106e989580ef01cf5c2944d
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Wed Feb 20 12:00:54 2013 +0000

    sna/trapezoids: Embed a few cells into the stack
    
    Avoid an allocation in the common case where the set of trapezoids is
    fairly narrow.
    
    Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 4ac8b8b..d0e1bd1 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -264,6 +264,7 @@ struct cell_list {
 
 	int16_t count, size;
 	struct cell *cells;
+	struct cell embedded[256];
 };
 
 /* The active list contains edges in the current scan line ordered by
@@ -339,14 +340,17 @@ cell_list_init(struct cell_list *cells, int width)
 	cell_list_rewind(cells);
 	cells->count = 0;
 	cells->size = width+1;
-	cells->cells = malloc(cells->size * sizeof(struct cell));
+	cells->cells = cells->embedded;
+	if (cells->size > ARRAY_SIZE(cells->embedded))
+		cells->cells = malloc(cells->size * sizeof(struct cell));
 	return cells->cells != NULL;
 }
 
 static void
 cell_list_fini(struct cell_list *cells)
 {
-	free(cells->cells);
+	if (cells->cells != cells->embedded)
+		free(cells->cells);
 }
 
 inline static void

commit 3eca4ea1a5d8ce04598b8d42e93e0dcb93e42e9a
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Wed Feb 20 11:20:54 2013 +0000

    sna/trapezoids: Perform the cell allocation upfront
    
    As we know the maximum extents of the trapezoids, we know the maximum
    number of cells we will need and so can preallocate them.
    
    Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index c547fb5..4ac8b8b 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -160,26 +160,6 @@ struct quorem {
 	int32_t rem;
 };
 
-struct _pool_chunk {
-	size_t size;
-	struct _pool_chunk *prev_chunk;
-	/* Actual data starts here.	 Well aligned for pointers. */
-};
-
-/* A memory pool.  This is supposed to be embedded on the stack or
- * within some other structure.	 It may optionally be followed by an
- * embedded array from which requests are fulfilled until
- * malloc needs to be called to allocate a first real chunk. */
-struct pool {
-	struct _pool_chunk *current;
-	struct _pool_chunk *first_free;
-
-	/* Header for the sentinel chunk.  Directly following the pool
-	 * struct should be some space for embedded elements from which
-	 * the sentinel chunk allocates from. */
-	struct _pool_chunk sentinel[1];
-};
-
 struct edge {
 	struct edge *next, *prev;
 
@@ -277,17 +257,13 @@ struct cell {
  * ascending x.  It is geared towards scanning the cells in order
  * using an internal cursor. */
 struct cell_list {
+	struct cell *cursor;
+
 	/* Points to the left-most cell in the scan line. */
 	struct cell head, tail;
 
-	struct cell *cursor;
-
-	/* Cells in the cell list are owned by the cell list and are
-	 * allocated from this pool.  */
-	struct {
-		struct pool base[1];
-		struct cell embedded[256];
-	} cell_pool;
+	int16_t count, size;
+	struct cell *cells;
 };
 
 /* The active list contains edges in the current scan line ordered by
@@ -345,103 +321,6 @@ floored_muldivrem(int32_t x, int32_t a, int32_t b)
 	return qr;
 }
 
-static inline void
-_pool_chunk_init(struct _pool_chunk *p,
-		 struct _pool_chunk *prev_chunk)
-{
-	p->prev_chunk = prev_chunk;
-	p->size = sizeof(*p);
-}
-
-static struct _pool_chunk *
-_pool_chunk_create(struct _pool_chunk *prev_chunk)
-{
-	size_t size = 256*sizeof(struct cell);
-	struct _pool_chunk *p;
-
-	p = malloc(size + sizeof(struct _pool_chunk));
-	if (unlikely (p == NULL))
-		abort();
-
-	_pool_chunk_init(p, prev_chunk);
-	return p;
-}
-
-static void
-pool_init(struct pool *pool)
-{
-	pool->current = pool->sentinel;
-	pool->first_free = NULL;
-	_pool_chunk_init(pool->sentinel, NULL);
-}
-
-static void
-pool_fini(struct pool *pool)
-{
-	struct _pool_chunk *p = pool->current;
-	do {
-		while (NULL != p) {
-			struct _pool_chunk *prev = p->prev_chunk;
-			if (p != pool->sentinel)
-				free(p);
-			p = prev;
-		}
-		p = pool->first_free;
-		pool->first_free = NULL;
-	} while (NULL != p);
-}
-
-static void *
-_pool_alloc_from_new_chunk(struct pool *pool)
-{
-	struct _pool_chunk *chunk;
-	void *obj;
-
-	chunk = pool->first_free;
-	if (chunk) {
-		pool->first_free = chunk->prev_chunk;
-		_pool_chunk_init(chunk, pool->current);
-	} else {
-		chunk = _pool_chunk_create(pool->current);
-	}
-	pool->current = chunk;
-
-	obj = (unsigned char*)chunk + chunk->size;
-	chunk->size += sizeof(struct cell);
-	return obj;
-}
-
-inline static void *
-pool_alloc(struct pool *pool)
-{
-	struct _pool_chunk *chunk = pool->current;
-
-	if (chunk->size < 256*sizeof(struct cell)+sizeof(*chunk)) {
-		void *obj = (unsigned char*)chunk + chunk->size;
-		chunk->size += sizeof(struct cell);
-		return obj;
-	} else
-		return _pool_alloc_from_new_chunk(pool);
-}
-
-static void
-pool_reset(struct pool *pool)
-{
-	/* Transfer all used chunks to the chunk free list. */
-	struct _pool_chunk *chunk = pool->current;
-	if (chunk != pool->sentinel) {
-		while (chunk->prev_chunk != pool->sentinel)
-			chunk = chunk->prev_chunk;
-
-		chunk->prev_chunk = pool->first_free;
-		pool->first_free = pool->current;
-	}
-
-	/* Reset the sentinel as the current chunk. */
-	pool->current = pool->sentinel;
-	pool->sentinel->size = sizeof(*chunk);
-}
-
 /* Rewinds the cell list's cursor to the beginning.  After rewinding
  * we're good to cell_list_find() the cell any x coordinate. */
 inline static void
@@ -450,21 +329,24 @@ cell_list_rewind(struct cell_list *cells)
 	cells->cursor = &cells->head;
 }
 
-static void
-cell_list_init(struct cell_list *cells)
+static bool
+cell_list_init(struct cell_list *cells, int width)
 {
-	pool_init(cells->cell_pool.base);
 	cells->tail.next = NULL;
 	cells->tail.x = INT_MAX;
 	cells->head.x = INT_MIN;
 	cells->head.next = &cells->tail;
 	cell_list_rewind(cells);
+	cells->count = 0;
+	cells->size = width+1;
+	cells->cells = malloc(cells->size * sizeof(struct cell));
+	return cells->cells != NULL;
 }
 
 static void
 cell_list_fini(struct cell_list *cells)
 {
-	pool_fini(cells->cell_pool.base);
+	free(cells->cells);
 }
 
 inline static void
@@ -472,7 +354,7 @@ cell_list_reset(struct cell_list *cells)
 {
 	cell_list_rewind(cells);
 	cells->head.next = &cells->tail;
-	pool_reset(cells->cell_pool.base);
+	cells->count = 0;
 }
 
 inline static struct cell *
@@ -482,10 +364,11 @@ cell_list_alloc(struct cell_list *cells,
 {
 	struct cell *cell;
 
-	cell = pool_alloc(cells->cell_pool.base);
-
+	assert(cells->count < cells->size);
+	cell = cells->cells + cells->count++;
 	cell->next = tail->next;
 	tail->next = cell;
+
 	cell->x = x;
 	cell->uncovered_area = 0;
 	cell->covered_height = 0;
@@ -594,7 +477,7 @@ polygon_fini(struct polygon *polygon)
 		free(polygon->edges);
 }
 
-static int
+static bool
 polygon_init(struct polygon *polygon,
 	     int num_edges,
 	     grid_scaled_y_t ymin,
@@ -627,11 +510,11 @@ polygon_init(struct polygon *polygon,
 
 	polygon->ymin = ymin;
 	polygon->ymax = ymax;
-	return 0;
+	return true;
 
 bail_no_mem:
 	polygon_fini(polygon);
-	return -1;
+	return false;
 }
 
 static void
@@ -1079,7 +962,7 @@ tor_fini(struct tor *converter)
 	cell_list_fini(converter->coverages);
 }
 
-static int
+static bool
 tor_init(struct tor *converter, const BoxRec *box, int num_edges)
 {
 	__DBG(("%s: (%d, %d),(%d, %d) x (%d, %d), num_edges=%d\n",
@@ -1093,12 +976,19 @@ tor_init(struct tor *converter, const BoxRec *box, int num_edges)
 	converter->xmax = box->x2;
 	converter->ymax = box->y2;
 
-	cell_list_init(converter->coverages);
+	if (!cell_list_init(converter->coverages, box->x2 - box->x1))
+		return false;
+
 	active_list_reset(converter->active);
-	return polygon_init(converter->polygon,
+	if (!polygon_init(converter->polygon,
 			    num_edges,
 			    box->y1 * FAST_SAMPLES_Y,
-			    box->y2 * FAST_SAMPLES_Y);
+			    box->y2 * FAST_SAMPLES_Y)) {
+		cell_list_fini(converter->coverages);
+		return false;
+	}
+
+	return true;
 }
 
 static void
@@ -4591,7 +4481,7 @@ span_thread(void *arg)
 	const xTrapezoid *t;
 	int n, y1, y2;
 
-	if (tor_init(&tor, &thread->extents, 2*thread->ntrap))
+	if (!tor_init(&tor, &thread->extents, 2*thread->ntrap))
 		return;
 
 	boxes.op = thread->op;
@@ -4753,7 +4643,7 @@ trapezoid_span_converter(struct sna *sna,
 	if (num_threads == 1) {
 		struct tor tor;
 
-		if (tor_init(&tor, &extents, 2*ntrap))
+		if (!tor_init(&tor, &extents, 2*ntrap))
 			goto skip;
 
 		for (n = 0; n < ntrap; n++) {
@@ -4774,7 +4664,6 @@ trapezoid_span_converter(struct sna *sna,
 			   choose_span(&tmp, dst, maskFormat, &clip),
 			   !was_clear && maskFormat && !operator_is_bounded(op));
 
-skip:
 		tor_fini(&tor);
 	} else {
 		struct span_thread threads[num_threads];
@@ -4815,6 +4704,7 @@ skip:
 
 		sna_threads_wait();
 	}
+skip:
 	tmp.done(sna, &tmp);
 
 	REGION_UNINIT(NULL, &clip);
@@ -4938,7 +4828,7 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	DBG(("%s: created buffer %p, stride %d\n",
 	     __FUNCTION__, scratch->devPrivate.ptr, scratch->devKind));
 
-	if (tor_init(&tor, &extents, 2*ntrap)) {
+	if (!tor_init(&tor, &extents, 2*ntrap)) {
 		sna_pixmap_destroy(scratch);
 		return true;
 	}
@@ -5690,7 +5580,7 @@ static void inplace_x8r8g8b8_thread(void *arg)
 	RegionPtr clip;
 	int y1, y2, n;
 
-	if (tor_init(&tor, &thread->extents, 2*thread->ntrap))
+	if (!tor_init(&tor, &thread->extents, 2*thread->ntrap))
 		return;
 
 	y1 = thread->extents.y1 - thread->dst->pDrawable->y;
@@ -5884,7 +5774,7 @@ trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 		struct tor tor;
 		span_func_t span;
 
-		if (tor_init(&tor, &region.extents, 2*ntrap))
+		if (!tor_init(&tor, &region.extents, 2*ntrap))
 			return true;
 
 		for (n = 0; n < ntrap; n++) {
@@ -6037,7 +5927,7 @@ static void inplace_thread(void *arg)
 	struct tor tor;
 	int n;
 
-	if (tor_init(&tor, &thread->extents, 2*thread->ntrap))
+	if (!tor_init(&tor, &thread->extents, 2*thread->ntrap))
 		return;
 
 	for (n = 0; n < thread->ntrap; n++) {
@@ -6247,7 +6137,7 @@ trapezoid_span_inplace(struct sna *sna,
 	if (num_threads == 1) {
 		struct tor tor;
 
-		if (tor_init(&tor, &region.extents, 2*ntrap))
+		if (!tor_init(&tor, &region.extents, 2*ntrap))
 			return true;
 
 		for (n = 0; n < ntrap; n++) {
@@ -6385,7 +6275,7 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 	DBG(("%s: created buffer %p, stride %d\n",
 	     __FUNCTION__, scratch->devPrivate.ptr, scratch->devKind));
 
-	if (tor_init(&tor, &extents, 2*ntrap)) {
+	if (!tor_init(&tor, &extents, 2*ntrap)) {
 		sna_pixmap_destroy(scratch);
 		return true;
 	}
@@ -6750,7 +6640,7 @@ trap_span_converter(struct sna *sna,
 
 	dx *= FAST_SAMPLES_X;
 	dy *= FAST_SAMPLES_Y;
-	if (tor_init(&tor, &extents, 2*ntrap))
+	if (!tor_init(&tor, &extents, 2*ntrap))
 		goto skip;
 
 	for (n = 0; n < ntrap; n++) {
@@ -6780,8 +6670,8 @@ trap_span_converter(struct sna *sna,
 	tor_render(sna, &tor, &tmp, clip,
 		   choose_span(&tmp, dst, NULL, clip), false);
 
-skip:
 	tor_fini(&tor);
+skip:
 	tmp.done(sna, &tmp);
 	return true;
 }
@@ -6870,7 +6760,7 @@ trap_mask_converter(struct sna *sna,
 	dy = picture->pDrawable->y;
 	dx *= FAST_SAMPLES_X;
 	dy *= FAST_SAMPLES_Y;
-	if (tor_init(&tor, &extents, 2*ntrap)) {
+	if (!tor_init(&tor, &extents, 2*ntrap)) {
 		sna_pixmap_destroy(scratch);
 		return true;
 	}
@@ -7322,7 +7212,7 @@ triangles_span_converter(struct sna *sna,
 
 	dx *= FAST_SAMPLES_X;
 	dy *= FAST_SAMPLES_Y;
-	if (tor_init(&tor, &extents, 3*count))
+	if (!tor_init(&tor, &extents, 3*count))
 		goto skip;
 
 	for (n = 0; n < count; n++) {
@@ -7340,8 +7230,8 @@ triangles_span_converter(struct sna *sna,
 		   choose_span(&tmp, dst, maskFormat, &clip),
 		   !was_clear && maskFormat && !operator_is_bounded(op));
 
-skip:
 	tor_fini(&tor);
+skip:
 	tmp.done(sna, &tmp);
 
 	REGION_UNINIT(NULL, &clip);
@@ -7422,7 +7312,7 @@ triangles_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	DBG(("%s: created buffer %p, stride %d\n",
 	     __FUNCTION__, scratch->devPrivate.ptr, scratch->devKind));
 
-	if (tor_init(&tor, &extents, 3*count)) {
+	if (!tor_init(&tor, &extents, 3*count)) {
 		sna_pixmap_destroy(scratch);
 		return true;
 	}
@@ -7687,7 +7577,7 @@ tristrip_span_converter(struct sna *sna,
 
 	dx *= FAST_SAMPLES_X;
 	dy *= FAST_SAMPLES_Y;
-	if (tor_init(&tor, &extents, 2*count))
+	if (!tor_init(&tor, &extents, 2*count))
 		goto skip;
 
 	cw = ccw = 0;
@@ -7715,8 +7605,8 @@ tristrip_span_converter(struct sna *sna,
 		   choose_span(&tmp, dst, maskFormat, &clip),
 		   !was_clear && maskFormat && !operator_is_bounded(op));
 
-skip:
 	tor_fini(&tor);
+skip:
 	tmp.done(sna, &tmp);
 
 	REGION_UNINIT(NULL, &clip);

commit 9df5e48c582e5c4edffdece75b5395c230a50b09
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Tue Feb 19 10:08:29 2013 +0000

    sna/video: Only setup XvMC if we first setup Xv
    
    Under certain circumstances, XvScreenInit can indeed fail, so do not
    bother with creatin XvMC (as it triggers internal assertions if it
    cannot find our adaptor amongst Xv's).
    
    Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index 07fa829..ecf52d2 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -569,13 +569,13 @@ void sna_video_init(struct sna *sna, ScreenPtr screen)
 	if (overlay && !prefer_overlay)
 		adaptors[num_adaptors++] = overlay;
 
-	if (num_adaptors)
-		xf86XVScreenInit(screen, adaptors, num_adaptors);
-	else
+	if (num_adaptors) {
+		Bool ok = xf86XVScreenInit(screen, adaptors, num_adaptors);
+		if (ok && textured)
+			sna_video_xvmc_setup(sna, screen, textured);
+	} else
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 			   "Disabling Xv because no adaptors could be initialized.\n");
-	if (textured)
-		sna_video_xvmc_setup(sna, screen, textured);
 
 	free(adaptors);
 }

commit b6588c48077600a3e015b6d37b101393a806ae1a
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Tue Feb 19 08:59:12 2013 +0000

    test: Add generated vsync.avi to gitignore

diff --git a/test/.gitignore b/test/.gitignore
index d3e59c5..b1c350e 100644
--- a/test/.gitignore
+++ b/test/.gitignore
@@ -14,3 +14,4 @@ render-copyarea-size
 render-copy-alphaless
 mixed-stress
 lowlevel-blt-bench
+test/vsync.avi

commit 1e2fd66ade6bdbf1e6011f3d59e423fada3f12f6
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Tue Feb 19 08:57:47 2013 +0000

    sna: Assert that the GPU damage is NULL before destroy a proxy
    
    If the GPU bo is a proxy, then it really is a pointer into a upload
    buffer for CPU data. In these cases, there should never be any GPU
    damage lying around.
    
    Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0f9a725..a577af6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11783,6 +11783,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 			if (region_subsumes_damage(&region, priv->cpu_damage)) {
 				DBG(("%s: discarding existing CPU damage\n", __FUNCTION__));
 				if (priv->gpu_bo && priv->gpu_bo->proxy) {
+					assert(priv->gpu_damage == NULL);
 					kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 					priv->gpu_bo = NULL;
 				}
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index a4b85fe..e067799 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -921,6 +921,7 @@ sna_composite_rectangles(CARD8		 op,
 		    region_subsumes_damage(&region, priv->cpu_damage)) {
 			DBG(("%s: discarding existing CPU damage\n", __FUNCTION__));
 			if (priv->gpu_bo && priv->gpu_bo->proxy) {
+				assert(priv->gpu_damage == NULL);
 				kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 				priv->gpu_bo = NULL;
 			}

commit d90a123db7ac99cf017167bf89df31c635df7e1e
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Mon Feb 18 20:06:45 2013 +0000

    sna: Assert that the gpu_bo exists if it is entirely damaged
    
    This should help catch the error slightly earlier.
    
    Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b5e41ee..0f9a725 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -675,6 +675,8 @@ bool sna_pixmap_attach_to_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 {
 	struct sna_pixmap *priv;
 
+	assert(bo);
+
 	priv = sna_pixmap_attach(pixmap);
 	if (!priv)
 		return false;
@@ -1995,6 +1997,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	    __kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
 		sna_damage_subtract(&priv->cpu_damage, region);
 		if (sna_pixmap_move_to_gpu(pixmap, MOVE_READ | MOVE_ASYNC_HINT)) {
+			assert(priv->gpu_bo);
 			sna_damage_all(&priv->gpu_damage,
 				       pixmap->drawable.width,
 				       pixmap->drawable.height);
@@ -2413,6 +2416,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 	if (sna_damage_is_all(&priv->gpu_damage,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height)) {
+		assert(priv->gpu_bo);
 		sna_damage_destroy(&priv->cpu_damage);
 		list_del(&priv->list);
 		goto done;
@@ -2579,6 +2583,7 @@ done:
 		    box_inplace(pixmap, &r.extents)) {
 			DBG(("%s: large operation on undamaged, promoting to full GPU\n",
 			     __FUNCTION__));
+			assert(priv->gpu_bo);
 			assert(priv->gpu_bo->proxy == NULL);
 			sna_damage_all(&priv->gpu_damage,
 				       pixmap->drawable.width,
@@ -3052,6 +3057,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 			      pixmap->drawable.width,
 			      pixmap->drawable.height)) {
 		DBG(("%s: already all-damaged\n", __FUNCTION__));
+		assert(priv->gpu_bo);
 		sna_damage_destroy(&priv->cpu_damage);
 		list_del(&priv->list);
 		assert(priv->cpu == false || IS_CPU_MAP(priv->gpu_bo->map));
@@ -3116,6 +3122,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 			 * synchronisation that takes the most time. This is
 			 * mitigated by avoiding fallbacks in the first place.
 			 */
+			assert(priv->gpu_bo);
 			assert(priv->gpu_bo->proxy == NULL);
 			sna_damage_all(&priv->gpu_damage,
 				       pixmap->drawable.width,
@@ -3197,6 +3204,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		DBG(("%s: disposing of system copy for large/source\n",
 		     __FUNCTION__));
 		assert(!priv->shm);
+		assert(priv->gpu_bo);
 		assert(priv->gpu_bo->proxy == NULL);
 		sna_damage_all(&priv->gpu_damage,
 			       pixmap->drawable.width,
@@ -4523,6 +4531,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			assert(dst_priv->clear == false);
 			dst_priv->cpu = false;
 			if (damage) {
+				assert(dst_priv->gpu_bo);
 				assert(dst_priv->gpu_bo->proxy == NULL);
 				if (replaces) {
 					sna_damage_destroy(&dst_priv->cpu_damage);
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 22b12f0..6979a30 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -470,6 +470,7 @@ static void damage(PixmapPtr pixmap, RegionPtr region)
 
 	priv = sna_pixmap(pixmap);
 	assert(priv != NULL);
+	assert(priv->gpu_bo);
 	if (DAMAGE_IS_ALL(priv->gpu_damage))
 		return;
 

commit a03aba6f7f55577e29c6a3267528e2614016222d
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Mon Feb 18 16:12:14 2013 +0000

    sna: A couple more assertions that we forcibly attach pixmaps correctly
    
    Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 12f078e..b5e41ee 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3041,6 +3041,9 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		sna_damage_all(&priv->cpu_damage,
 			       pixmap->drawable.width,
 			       pixmap->drawable.height);
+
+		assert(priv->gpu_bo == NULL);
+		assert(priv->gpu_damage == NULL);
 	}
 
 	assert(priv->gpu_damage == NULL || priv->gpu_bo);
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 5d5ea17..22b12f0 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -177,6 +177,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 
 	assert(priv->flush == false);
 	assert(priv->cpu_damage == NULL);
+	assert(priv->gpu_bo);
 	assert(priv->gpu_bo->proxy == NULL);
 	assert(priv->gpu_bo->flush == false);
 

commit 1f16d854264ea923303b79379266bd789fd9dd4d
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Mon Feb 18 14:30:55 2013 +0000

    sna/dri: Prevent swapping a decoupled DRI2Buffer
    
    If the DRI2Buffer is no longer valid for the Drawable, for example the
    window had just been reparent, just complete the swap without triggering
    any assertions.
    
    Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 7fea15f..5d5ea17 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -261,6 +261,7 @@ sna_dri_create_buffer(DrawablePtr draw,
 			     private->bo->handle, buffer->name));
 
 			assert(private->pixmap == pixmap);
+			assert(sna_pixmap(pixmap)->flush);
 			assert(sna_pixmap(pixmap)->gpu_bo == private->bo);
 			assert(sna_pixmap(pixmap)->pinned & PIN_DRI);
 			assert(kgem_bo_flink(&sna->kgem, private->bo) == buffer->name);
@@ -1309,7 +1310,7 @@ can_exchange(struct sna * sna,
 		     __FUNCTION__));
 		return false;
 	}
-	assert(get_private(front)->pixmap == sna->front);
+	assert(get_private(front)->pixmap != sna->front);
 
 	if (!get_private(back)->scanout) {
 		DBG(("%s: no, DRI2 drawable was too small at time of creation)\n",
@@ -1767,6 +1768,7 @@ sna_dri_page_flip_handler(struct sna *sna,
 	struct sna_dri_frame_event *info = to_frame_event(event->user_data);
 
 	DBG(("%s: pending flip_count=%d\n", __FUNCTION__, info->count));
+	assert(info->count > 0);
 
 	/* Is this the event whose info shall be delivered to higher level? */
 	if (event->user_data & 1) {
@@ -2105,9 +2107,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		      DRI2BufferPtr back, CARD64 *target_msc, CARD64 divisor,
 		      CARD64 remainder, DRI2SwapEventPtr func, void *data)
 {
-	ScreenPtr screen = draw->pScreen;
-	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
-	struct sna *sna = to_sna(scrn);
+	struct sna *sna = to_sna_from_drawable(draw);
 	drmVBlank vbl;
 	int pipe;
 	struct sna_dri_frame_event *info = NULL;
@@ -2135,13 +2135,16 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	assert(get_private(back)->bo->refcnt);
 	assert(get_private(back)->bo->flush);
 
+	if (get_private(front)->pixmap != get_drawable_pixmap(draw))
+		goto skip;
+
 	assert(sna_pixmap_from_drawable(draw)->flush);
 
 	/* Drawable not displayed... just complete the swap */
 	pipe = sna_dri_get_pipe(draw);
 	if (pipe == -1) {
 		DBG(("%s: off-screen, immediate update\n", __FUNCTION__));
-		goto blit_fallback;
+		goto blit;
 	}
 
 	if (can_flip(sna, draw, front, back) &&
@@ -2154,7 +2157,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 	info = calloc(1, sizeof(struct sna_dri_frame_event));
 	if (!info)
-		goto blit_fallback;
+		goto blit;
 
 	info->draw = draw;
 	info->client = client;
@@ -2210,7 +2213,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.sequence = *target_msc;
 		vbl.request.signal = (unsigned long)info;
 		if (sna_wait_vblank(sna, &vbl))
-			goto blit_fallback;
+			goto blit;
 
 		return TRUE;
 	}
@@ -2250,11 +2253,11 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	vbl.request.sequence -= 1;
 	vbl.request.signal = (unsigned long)info;
 	if (sna_wait_vblank(sna, &vbl))
-		goto blit_fallback;
+		goto blit;
 
 	return TRUE;
 
-blit_fallback:
+blit:
 	pipe = DRI2_BLIT_COMPLETE;
 	if (can_exchange(sna, draw, front, back)) {
 		DBG(("%s -- xchg\n", __FUNCTION__));
@@ -2269,6 +2272,7 @@ blit_fallback:
 	}
 	if (info)
 		sna_dri_frame_event_info_free(sna, draw, info);
+skip:
 	DRI2SwapComplete(client, draw, 0, 0, 0, pipe, func, data);
 	*target_msc = 0; /* offscreen, so zero out target vblank count */
 	return TRUE;

commit b3ba758a0186c9abc6c0583f52775ea714165134
Author: Damien Lespiau <damien.lespiau@intel.com>
Date:   Thu Feb 14 14:20:19 2013 +0000

    uxa/gen7: Don't use a message register to store vl
    
    Turns out the "new" assembler that uses mesa's opcode emission hits the
    path that automatically transforms MRF registers into GRF ones in the
    exa_wm_src_projective shader.
    
    The diff with the new assembler is:
    
    $ intel-gen4disasm -g7 -
    -   { 0x00600041, 0x208077be, 0x008d03c0, 0x008d0180 },
    +   { 0x00600041, 0x2e8077bd, 0x008d03c0, 0x008d0180 },


Reply to: