r44 - videolan/vlc/debian/patches
xtophe-guest at alioth.debian.org
xtophe-guest at alioth.debian.org
Wed Mar 14 14:19:20 CET 2007
Author: xtophe-guest
Date: 2006-05-15 19:58:23 +0000 (Mon, 15 May 2006)
New Revision: 44
Removed:
videolan/vlc/debian/patches/030_x264_ppc_asm.diff
Modified:
videolan/vlc/debian/patches/series
Log:
This patch was applied upstream
Deleted: videolan/vlc/debian/patches/030_x264_ppc_asm.diff
===================================================================
--- videolan/vlc/debian/patches/030_x264_ppc_asm.diff 2006-05-15 18:43:37 UTC (rev 43)
+++ videolan/vlc/debian/patches/030_x264_ppc_asm.diff 2006-05-15 19:58:23 UTC (rev 44)
@@ -1,337 +0,0 @@
-Index: debian/official/pkg-multimedia/unstable/build-area/vlc-0.8.5-test3.debian/extras/x264/common/ppc/pixel.c
-===================================================================
---- debian.orig/official/pkg-multimedia/unstable/build-area/vlc-0.8.5-test3.debian/extras/x264/common/ppc/pixel.c 2006-05-14 09:08:09.000000000 +0200
-+++ debian/official/pkg-multimedia/unstable/build-area/vlc-0.8.5-test3.debian/extras/x264/common/ppc/pixel.c 2006-05-14 09:09:35.000000000 +0200
-@@ -610,19 +610,18 @@
- DECLARE_ALIGNED( int, sum3, 16 );
- int y;
-
-+ LOAD_ZERO;
- vec_u8_t temp_lv, temp_hv;
- vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
- //vec_u8_t perm0v, perm1v, perm2v, perm3v;
- vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB;
-
-- vec_u32_t sum0v, sum1v, sum2v, sum3v;
-- vec_s32_t zero;
-+ vec_s32_t sum0v, sum1v, sum2v, sum3v;
-
-- zero = vec_splat_s32(0);
-- sum0v = vec_splat_u32(0);
-- sum1v = vec_splat_u32(0);
-- sum2v = vec_splat_u32(0);
-- sum3v = vec_splat_u32(0);
-+ sum0v = vec_splat_s32(0);
-+ sum1v = vec_splat_s32(0);
-+ sum2v = vec_splat_s32(0);
-+ sum3v = vec_splat_s32(0);
-
- perm0vA = vec_lvsl(0, pix0);
- perm1vA = vec_lvsl(0, pix1);
-@@ -704,10 +703,10 @@
-
- }
-
-- sum0v = (vec_u32_t) vec_sums( sum0v, zero );
-- sum1v = (vec_u32_t) vec_sums( sum1v, zero );
-- sum2v = (vec_u32_t) vec_sums( sum2v, zero );
-- sum3v = (vec_u32_t) vec_sums( sum3v, zero );
-+ sum0v = vec_sums( sum0v, zero_s32v );
-+ sum1v = vec_sums( sum1v, zero_s32v );
-+ sum2v = vec_sums( sum2v, zero_s32v );
-+ sum3v = vec_sums( sum3v, zero_s32v );
-
- sum0v = vec_splat( sum0v, 3 );
- sum1v = vec_splat( sum1v, 3 );
-@@ -736,17 +735,16 @@
- DECLARE_ALIGNED( int, sum2, 16 );
- int y;
-
-+ LOAD_ZERO;
- vec_u8_t temp_lv, temp_hv; // temporary load vectors
- vec_u8_t fencv, pix0v, pix1v, pix2v;
- vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB;
-
-- vec_u32_t sum0v, sum1v, sum2v;
-- vec_s32_t zero;
-+ vec_s32_t sum0v, sum1v, sum2v;
-
-- zero = vec_splat_s32(0);
-- sum0v = vec_splat_u32(0);
-- sum1v = vec_splat_u32(0);
-- sum2v = vec_splat_u32(0);
-+ sum0v = vec_splat_s32(0);
-+ sum1v = vec_splat_s32(0);
-+ sum2v = vec_splat_s32(0);
-
- perm0vA = vec_lvsl(0, pix0);
- perm1vA = vec_lvsl(0, pix1);
-@@ -814,9 +812,9 @@
-
- }
-
-- sum0v = (vec_u32_t) vec_sums( sum0v, zero );
-- sum1v = (vec_u32_t) vec_sums( sum1v, zero );
-- sum2v = (vec_u32_t) vec_sums( sum2v, zero );
-+ sum0v = vec_sums( sum0v, zero_s32v );
-+ sum1v = vec_sums( sum1v, zero_s32v );
-+ sum2v = vec_sums( sum2v, zero_s32v );
-
- sum0v = vec_splat( sum0v, 3 );
- sum1v = vec_splat( sum1v, 3 );
-@@ -840,18 +838,17 @@
- DECLARE_ALIGNED( int, sum3, 16 );
- int y;
-
-+ LOAD_ZERO;
- vec_u8_t temp_lv, temp_hv;
- vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
- vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB;
-
-- vec_u32_t sum0v, sum1v, sum2v, sum3v;
-- vec_s32_t zero;
-+ vec_s32_t sum0v, sum1v, sum2v, sum3v;
-
-- zero = vec_splat_s32(0);
-- sum0v = vec_splat_u32(0);
-- sum1v = vec_splat_u32(0);
-- sum2v = vec_splat_u32(0);
-- sum3v = vec_splat_u32(0);
-+ sum0v = vec_splat_s32(0);
-+ sum1v = vec_splat_s32(0);
-+ sum2v = vec_splat_s32(0);
-+ sum3v = vec_splat_s32(0);
-
- perm0vA = vec_lvsl(0, pix0);
- perm1vA = vec_lvsl(0, pix1);
-@@ -934,10 +931,10 @@
-
- }
-
-- sum0v = (vec_u32_t) vec_sums( sum0v, zero );
-- sum1v = (vec_u32_t) vec_sums( sum1v, zero );
-- sum2v = (vec_u32_t) vec_sums( sum2v, zero );
-- sum3v = (vec_u32_t) vec_sums( sum3v, zero );
-+ sum0v = vec_sums( sum0v, zero_s32v );
-+ sum1v = vec_sums( sum1v, zero_s32v );
-+ sum2v = vec_sums( sum2v, zero_s32v );
-+ sum3v = vec_sums( sum3v, zero_s32v );
-
- sum0v = vec_splat( sum0v, 3 );
- sum1v = vec_splat( sum1v, 3 );
-@@ -966,17 +963,16 @@
- DECLARE_ALIGNED( int, sum2, 16 );
- int y;
-
-+ LOAD_ZERO;
- vec_u8_t temp_lv, temp_hv;
- vec_u8_t fencv, pix0v, pix1v, pix2v;
- vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB;
-
-- vec_u32_t sum0v, sum1v, sum2v;
-- vec_s32_t zero;
-+ vec_s32_t sum0v, sum1v, sum2v;
-
-- zero = vec_splat_s32(0);
-- sum0v = vec_splat_u32(0);
-- sum1v = vec_splat_u32(0);
-- sum2v = vec_splat_u32(0);
-+ sum0v = vec_splat_s32(0);
-+ sum1v = vec_splat_s32(0);
-+ sum2v = vec_splat_s32(0);
-
-
- perm0vA = vec_lvsl(0, pix0);
-@@ -1044,9 +1040,9 @@
-
- }
-
-- sum0v = (vec_u32_t) vec_sums( sum0v, zero );
-- sum1v = (vec_u32_t) vec_sums( sum1v, zero );
-- sum2v = (vec_u32_t) vec_sums( sum2v, zero );
-+ sum0v = vec_sums( sum0v, zero_s32v );
-+ sum1v = vec_sums( sum1v, zero_s32v );
-+ sum2v = vec_sums( sum2v, zero_s32v );
-
- sum0v = vec_splat( sum0v, 3 );
- sum1v = vec_splat( sum1v, 3 );
-@@ -1071,18 +1067,17 @@
- DECLARE_ALIGNED( int, sum3, 16 );
- int y;
-
-+ LOAD_ZERO;
- vec_u8_t temp_lv, temp_hv;
- vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
- vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB, permEncv;
-
-- vec_u32_t sum0v, sum1v, sum2v, sum3v;
-- vec_s32_t zero;
-+ vec_s32_t sum0v, sum1v, sum2v, sum3v;
-
-- zero = vec_splat_s32(0);
-- sum0v = vec_splat_u32(0);
-- sum1v = vec_splat_u32(0);
-- sum2v = vec_splat_u32(0);
-- sum3v = vec_splat_u32(0);
-+ sum0v = vec_splat_s32(0);
-+ sum1v = vec_splat_s32(0);
-+ sum2v = vec_splat_s32(0);
-+ sum3v = vec_splat_s32(0);
-
- permEncv = vec_lvsl(0, fenc);
- perm0vA = vec_lvsl(0, pix0);
-@@ -1165,10 +1160,10 @@
- sum3v = (vec_s32_t) vec_sum4s( vec_sub( vec_max( fencv, pix3v ), vec_min( fencv, pix3v ) ), (vec_u32_t) sum3v );
- }
-
-- sum0v = (vec_u32_t) vec_sum2s( sum0v, zero );
-- sum1v = (vec_u32_t) vec_sum2s( sum1v, zero );
-- sum2v = (vec_u32_t) vec_sum2s( sum2v, zero );
-- sum3v = (vec_u32_t) vec_sum2s( sum3v, zero );
-+ sum0v = vec_sum2s( sum0v, zero_s32v );
-+ sum1v = vec_sum2s( sum1v, zero_s32v );
-+ sum2v = vec_sum2s( sum2v, zero_s32v );
-+ sum3v = vec_sum2s( sum3v, zero_s32v );
-
- sum0v = vec_splat( sum0v, 1 );
- sum1v = vec_splat( sum1v, 1 );
-@@ -1194,17 +1189,16 @@
- DECLARE_ALIGNED( int, sum2, 16 );
- int y;
-
-+ LOAD_ZERO;
- vec_u8_t temp_lv, temp_hv;
- vec_u8_t fencv, pix0v, pix1v, pix2v;
- vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB,permEncv;
-
-- vec_u32_t sum0v, sum1v, sum2v;
-- vec_s32_t zero;
-+ vec_s32_t sum0v, sum1v, sum2v;
-
-- zero = vec_splat_s32(0);
-- sum0v = vec_splat_u32(0);
-- sum1v = vec_splat_u32(0);
-- sum2v = vec_splat_u32(0);
-+ sum0v = vec_splat_s32(0);
-+ sum1v = vec_splat_s32(0);
-+ sum2v = vec_splat_s32(0);
-
- permEncv = vec_lvsl(0, fenc);
- perm0vA = vec_lvsl(0, pix0);
-@@ -1275,9 +1269,9 @@
- }
-
-
-- sum0v = (vec_u32_t) vec_sum2s( sum0v, zero );
-- sum1v = (vec_u32_t) vec_sum2s( sum1v, zero );
-- sum2v = (vec_u32_t) vec_sum2s( sum2v, zero );
-+ sum0v = vec_sum2s( sum0v, zero_s32v );
-+ sum1v = vec_sum2s( sum1v, zero_s32v );
-+ sum2v = vec_sum2s( sum2v, zero_s32v );
-
- sum0v = vec_splat( sum0v, 1 );
- sum1v = vec_splat( sum1v, 1 );
-@@ -1301,18 +1295,17 @@
- DECLARE_ALIGNED( int, sum3, 16 );
- int y;
-
-+ LOAD_ZERO;
- vec_u8_t temp_lv, temp_hv;
- vec_u8_t fencv, pix0v, pix1v, pix2v, pix3v;
- vec_u8_t perm0vA, perm1vA, perm2vA, perm3vA, perm0vB, perm1vB, perm2vB, perm3vB, permEncv;
-
-- vec_u32_t sum0v, sum1v, sum2v, sum3v;
-- vec_s32_t zero;
-+ vec_s32_t sum0v, sum1v, sum2v, sum3v;
-
-- zero = vec_splat_s32(0);
-- sum0v = vec_splat_u32(0);
-- sum1v = vec_splat_u32(0);
-- sum2v = vec_splat_u32(0);
-- sum3v = vec_splat_u32(0);
-+ sum0v = vec_splat_s32(0);
-+ sum1v = vec_splat_s32(0);
-+ sum2v = vec_splat_s32(0);
-+ sum3v = vec_splat_s32(0);
-
- permEncv = vec_lvsl(0, fenc);
- perm0vA = vec_lvsl(0, pix0);
-@@ -1396,10 +1389,10 @@
- }
-
-
-- sum0v = (vec_u32_t) vec_sum2s( sum0v, zero );
-- sum1v = (vec_u32_t) vec_sum2s( sum1v, zero );
-- sum2v = (vec_u32_t) vec_sum2s( sum2v, zero );
-- sum3v = (vec_u32_t) vec_sum2s( sum3v, zero );
-+ sum0v = vec_sum2s( sum0v, zero_s32v );
-+ sum1v = vec_sum2s( sum1v, zero_s32v );
-+ sum2v = vec_sum2s( sum2v, zero_s32v );
-+ sum3v = vec_sum2s( sum3v, zero_s32v );
-
- sum0v = vec_splat( sum0v, 1 );
- sum1v = vec_splat( sum1v, 1 );
-@@ -1426,17 +1419,16 @@
- DECLARE_ALIGNED( int, sum2, 16 );
- int y;
-
-+ LOAD_ZERO;
- vec_u8_t temp_lv, temp_hv;
- vec_u8_t fencv, pix0v, pix1v, pix2v;
- vec_u8_t perm0vA, perm1vA, perm2vA, perm0vB, perm1vB, perm2vB, permEncv;
-
-- vec_u32_t sum0v, sum1v, sum2v;
-- vec_s32_t zero;
-+ vec_s32_t sum0v, sum1v, sum2v;
-
-- zero = vec_splat_s32(0);
-- sum0v = vec_splat_u32(0);
-- sum1v = vec_splat_u32(0);
-- sum2v = vec_splat_u32(0);
-+ sum0v = vec_splat_s32(0);
-+ sum1v = vec_splat_s32(0);
-+ sum2v = vec_splat_s32(0);
-
- permEncv = vec_lvsl(0, fenc);
- perm0vA = vec_lvsl(0, pix0);
-@@ -1507,9 +1499,9 @@
- }
-
-
-- sum0v = (vec_u32_t) vec_sum2s( sum0v, zero );
-- sum1v = (vec_u32_t) vec_sum2s( sum1v, zero );
-- sum2v = (vec_u32_t) vec_sum2s( sum2v, zero );
-+ sum0v = vec_sum2s( sum0v, zero_s32v );
-+ sum1v = vec_sum2s( sum1v, zero_s32v );
-+ sum2v = vec_sum2s( sum2v, zero_s32v );
-
- sum0v = vec_splat( sum0v, 1 );
- sum1v = vec_splat( sum1v, 1 );
-@@ -1534,14 +1526,13 @@
- DECLARE_ALIGNED( int, sum, 16 );
-
- int y;
-+ LOAD_ZERO;
- vec_u8_t pix1vA, pix2vA, pix1vB, pix2vB;
- vec_u32_t sumv;
-- vec_s32_t zero;
- vec_u8_t maxA, minA, diffA, maxB, minB, diffB;
- vec_u8_t temp_lv, temp_hv;
- vec_u8_t permA, permB;
-
-- zero = vec_splat_s32(0);
- sumv = vec_splat_u32(0);
-
- permA = vec_lvsl(0, pix2);
-@@ -1607,9 +1598,9 @@
- diffB = vec_sub(maxB, minB);
- sumv = vec_msum(diffB, diffB, sumv);
-
-- sumv = vec_sums(sumv, zero);
-+ sumv = (vec_u32_t) vec_sums((vec_s32_t) sumv, zero_s32v);
- sumv = vec_splat(sumv, 3);
-- vec_ste( sumv, 0, &sum);
-+ vec_ste((vec_s32_t) sumv, 0, &sum);
- return sum;
- }
-
Modified: videolan/vlc/debian/patches/series
===================================================================
--- videolan/vlc/debian/patches/series 2006-05-15 18:43:37 UTC (rev 43)
+++ videolan/vlc/debian/patches/series 2006-05-15 19:58:23 UTC (rev 44)
@@ -1,4 +1,3 @@
010_no-wx-updates.diff
010_osdmenu-paths.diff
020_freetype_font.diff
-030_x264_ppc_asm.diff
More information about the Pkg-multimedia-commits
mailing list