r151 - in unstable/ffmpeg/debian: . patches
Samuel Hocevar
sho at alioth.debian.org
Wed Mar 14 14:22:22 CET 2007
Author: sho
Date: 2007-01-29 11:35:11 +0000 (Mon, 29 Jan 2007)
New Revision: 151
Added:
unstable/ffmpeg/debian/patches/040_only_use_maltivec_when_needed.diff
Modified:
unstable/ffmpeg/debian/changelog
unstable/ffmpeg/debian/patches/series
Log:
* debian/patches/040_only_use_maltivec_when_needed.diff:
+ New patch, only use -maltivec with files that use AltiVec intrinsics,
and make sure no codepath leads to these files on a non-AltiVec
machine (Closes: #405926).
Modified: unstable/ffmpeg/debian/changelog
===================================================================
--- unstable/ffmpeg/debian/changelog 2007-01-28 12:49:33 UTC (rev 150)
+++ unstable/ffmpeg/debian/changelog 2007-01-29 11:35:11 UTC (rev 151)
@@ -20,6 +20,10 @@
+ New patch, detect AltiVec earlier on and only once so that we don't
risk using signal handlers in a multithreaded environment or when
the caller already installed a SIGILL handler.
+ * debian/patches/040_only_use_maltivec_when_needed.diff:
+ + New patch, only use -maltivec with files that use AltiVec intrinsics,
+ and make sure no codepath leads to these files on a non-AltiVec
+ machine (Closes: #405926).
-- Loic Minier <lool at dooz.org> Wed, 24 Jan 2007 12:01:12 +0100
Added: unstable/ffmpeg/debian/patches/040_only_use_maltivec_when_needed.diff
===================================================================
--- unstable/ffmpeg/debian/patches/040_only_use_maltivec_when_needed.diff 2007-01-28 12:49:33 UTC (rev 150)
+++ unstable/ffmpeg/debian/patches/040_only_use_maltivec_when_needed.diff 2007-01-29 11:35:11 UTC (rev 151)
@@ -0,0 +1,1060 @@
+Index: ffmpeg-0.cvs20060823/configure
+===================================================================
+--- ffmpeg-0.cvs20060823.orig/configure 2007-01-29 04:53:04.000000000 -0600
++++ ffmpeg-0.cvs20060823/configure 2007-01-29 04:53:07.000000000 -0600
+@@ -1136,11 +1136,13 @@
+ CFLAGS="$CFLAGS -faltivec"
+ else
+ #CFLAGS="$CFLAGS -maltivec -mabi=altivec"
+- CFLAGS="$CFLAGS -maltivec"
++ ALTIVECFLAGS="$ALTIVECFLAGS -maltivec"
+ fi
+ fi
+ fi
+
++save_flags
++temp_cflags $ALTIVECFLAGS
+ check_header altivec.h && _altivec_h=yes || _altivec_h=no
+
+ # check if our compiler supports Motorola AltiVec C API
+@@ -1159,6 +1161,7 @@
+ }
+ EOF
+ fi
++restore_flags
+
+ # mmi only available on mips
+ if test $mmi = "default"; then
+@@ -1599,6 +1602,7 @@
+ test "$needmdynamicnopic" = yes && add_cflags -mdynamic-no-pic
+
+ echo "OPTFLAGS=$CFLAGS" >> config.mak
++echo "ALTIVECFLAGS=$ALTIVECFLAGS" >> config.mak
+ echo "VHOOKCFLAGS=$VHOOKCFLAGS">>config.mak
+ echo "LDFLAGS=$LDFLAGS" >> config.mak
+ echo "LDCONFIG=$LDCONFIG" >> config.mak
+Index: ffmpeg-0.cvs20060823/libavcodec/Makefile
+===================================================================
+--- ffmpeg-0.cvs20060823.orig/libavcodec/Makefile 2007-01-29 04:53:04.000000000 -0600
++++ ffmpeg-0.cvs20060823/libavcodec/Makefile 2007-01-29 04:53:07.000000000 -0600
+@@ -282,7 +282,8 @@
+ OBJS-$(HAVE_XVMC_ACCEL) += xvmcvideo.o
+
+ ifneq ($(CONFIG_SWSCALER),yes)
+-OBJS += imgresample.o
++OBJS += imgresample.o imgresample_altivec.o
++imgresample_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
+ endif
+
+ # i386 mmx specific stuff
+@@ -355,6 +356,16 @@
+ ppc/snow_altivec.o \
+ ppc/vc1dsp_altivec.o \
+ ppc/float_altivec.o
++ppc/dsputil_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++ppc/mpegvideo_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++ppc/idct_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++ppc/fft_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++ppc/gmc_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++ppc/fdct_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++ppc/h264_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++ppc/snow_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++ppc/vc1dsp_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++ppc/float_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
+
+ CFLAGS += $(CFLAGS-yes)
+ OBJS += $(OBJS-yes)
+Index: ffmpeg-0.cvs20060823/libavcodec/dsputil.h
+===================================================================
+--- ffmpeg-0.cvs20060823.orig/libavcodec/dsputil.h 2007-01-29 04:53:04.000000000 -0600
++++ ffmpeg-0.cvs20060823/libavcodec/dsputil.h 2007-01-29 04:53:07.000000000 -0600
+@@ -541,12 +541,6 @@
+
+ extern int mm_flags;
+
+-#if defined(HAVE_ALTIVEC) && !defined(CONFIG_DARWIN)
+-#define pixel altivec_pixel
+-#include <altivec.h>
+-#undef pixel
+-#endif
+-
+ #define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (16)))
+ #define STRIDE_ALIGN 16
+
+Index: ffmpeg-0.cvs20060823/libavcodec/imgresample.c
+===================================================================
+--- ffmpeg-0.cvs20060823.orig/libavcodec/imgresample.c 2007-01-29 04:53:04.000000000 -0600
++++ ffmpeg-0.cvs20060823/libavcodec/imgresample.c 2007-01-29 04:53:07.000000000 -0600
+@@ -277,133 +277,6 @@
+ }
+ #endif
+
+-#ifdef HAVE_ALTIVEC
+-typedef union {
+- vector unsigned char v;
+- unsigned char c[16];
+-} vec_uc_t;
+-
+-typedef union {
+- vector signed short v;
+- signed short s[8];
+-} vec_ss_t;
+-
+-void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
+- int wrap, int16_t *filter)
+-{
+- int sum, i;
+- const uint8_t *s;
+- vector unsigned char *tv, tmp, dstv, zero;
+- vec_ss_t srchv[4], srclv[4], fv[4];
+- vector signed short zeros, sumhv, sumlv;
+- s = src;
+-
+- for(i=0;i<4;i++)
+- {
+- /*
+- The vec_madds later on does an implicit >>15 on the result.
+- Since FILTER_BITS is 8, and we have 15 bits of magnitude in
+- a signed short, we have just enough bits to pre-shift our
+- filter constants <<7 to compensate for vec_madds.
+- */
+- fv[i].s[0] = filter[i] << (15-FILTER_BITS);
+- fv[i].v = vec_splat(fv[i].v, 0);
+- }
+-
+- zero = vec_splat_u8(0);
+- zeros = vec_splat_s16(0);
+-
+-
+- /*
+- When we're resampling, we'd ideally like both our input buffers,
+- and output buffers to be 16-byte aligned, so we can do both aligned
+- reads and writes. Sadly we can't always have this at the moment, so
+- we opt for aligned writes, as unaligned writes have a huge overhead.
+- To do this, do enough scalar resamples to get dst 16-byte aligned.
+- */
+- i = (-(int)dst) & 0xf;
+- while(i>0) {
+- sum = s[0 * wrap] * filter[0] +
+- s[1 * wrap] * filter[1] +
+- s[2 * wrap] * filter[2] +
+- s[3 * wrap] * filter[3];
+- sum = sum >> FILTER_BITS;
+- if (sum<0) sum = 0; else if (sum>255) sum=255;
+- dst[0] = sum;
+- dst++;
+- s++;
+- dst_width--;
+- i--;
+- }
+-
+- /* Do our altivec resampling on 16 pixels at once. */
+- while(dst_width>=16) {
+- /*
+- Read 16 (potentially unaligned) bytes from each of
+- 4 lines into 4 vectors, and split them into shorts.
+- Interleave the multipy/accumulate for the resample
+- filter with the loads to hide the 3 cycle latency
+- the vec_madds have.
+- */
+- tv = (vector unsigned char *) &s[0 * wrap];
+- tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[i * wrap]));
+- srchv[0].v = (vector signed short) vec_mergeh(zero, tmp);
+- srclv[0].v = (vector signed short) vec_mergel(zero, tmp);
+- sumhv = vec_madds(srchv[0].v, fv[0].v, zeros);
+- sumlv = vec_madds(srclv[0].v, fv[0].v, zeros);
+-
+- tv = (vector unsigned char *) &s[1 * wrap];
+- tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[1 * wrap]));
+- srchv[1].v = (vector signed short) vec_mergeh(zero, tmp);
+- srclv[1].v = (vector signed short) vec_mergel(zero, tmp);
+- sumhv = vec_madds(srchv[1].v, fv[1].v, sumhv);
+- sumlv = vec_madds(srclv[1].v, fv[1].v, sumlv);
+-
+- tv = (vector unsigned char *) &s[2 * wrap];
+- tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[2 * wrap]));
+- srchv[2].v = (vector signed short) vec_mergeh(zero, tmp);
+- srclv[2].v = (vector signed short) vec_mergel(zero, tmp);
+- sumhv = vec_madds(srchv[2].v, fv[2].v, sumhv);
+- sumlv = vec_madds(srclv[2].v, fv[2].v, sumlv);
+-
+- tv = (vector unsigned char *) &s[3 * wrap];
+- tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[3 * wrap]));
+- srchv[3].v = (vector signed short) vec_mergeh(zero, tmp);
+- srclv[3].v = (vector signed short) vec_mergel(zero, tmp);
+- sumhv = vec_madds(srchv[3].v, fv[3].v, sumhv);
+- sumlv = vec_madds(srclv[3].v, fv[3].v, sumlv);
+-
+- /*
+- Pack the results into our destination vector,
+- and do an aligned write of that back to memory.
+- */
+- dstv = vec_packsu(sumhv, sumlv) ;
+- vec_st(dstv, 0, (vector unsigned char *) dst);
+-
+- dst+=16;
+- s+=16;
+- dst_width-=16;
+- }
+-
+- /*
+- If there are any leftover pixels, resample them
+- with the slow scalar method.
+- */
+- while(dst_width>0) {
+- sum = s[0 * wrap] * filter[0] +
+- s[1 * wrap] * filter[1] +
+- s[2 * wrap] * filter[2] +
+- s[3 * wrap] * filter[3];
+- sum = sum >> FILTER_BITS;
+- if (sum<0) sum = 0; else if (sum>255) sum=255;
+- dst[0] = sum;
+- dst++;
+- s++;
+- dst_width--;
+- }
+-}
+-#endif
+-
+ /* slow version to handle limit cases. Does not need optimisation */
+ static void h_resample_slow(uint8_t *dst, int dst_width,
+ const uint8_t *src, int src_width,
+Index: ffmpeg-0.cvs20060823/libavcodec/imgresample_altivec.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ ffmpeg-0.cvs20060823/libavcodec/imgresample_altivec.c 2007-01-29 04:53:07.000000000 -0600
+@@ -0,0 +1,164 @@
++/*
++ * High quality image resampling with polyphase filters
++ * Copyright (c) 2001 Fabrice Bellard.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2 of the License, or (at your option) any later version.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++/**
++ * @file imgresample_altivec.c
++ * High quality image resampling with polyphase filters, AltiVec version.
++ */
++
++#include "avcodec.h"
++#include "swscale.h"
++#include "dsputil.h"
++
++#if defined(HAVE_ALTIVEC) && !defined(CONFIG_DARWIN)
++#define pixel altivec_pixel
++#include <altivec.h>
++#undef pixel
++#endif
++
++/* 6 bits precision is needed for MMX */
++#define FILTER_BITS 8
++
++#ifdef HAVE_ALTIVEC
++typedef union {
++ vector unsigned char v;
++ unsigned char c[16];
++} vec_uc_t;
++
++typedef union {
++ vector signed short v;
++ signed short s[8];
++} vec_ss_t;
++
++void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
++ int wrap, int16_t *filter)
++{
++ int sum, i;
++ const uint8_t *s;
++ vector unsigned char *tv, tmp, dstv, zero;
++ vec_ss_t srchv[4], srclv[4], fv[4];
++ vector signed short zeros, sumhv, sumlv;
++ s = src;
++
++ for(i=0;i<4;i++)
++ {
++ /*
++ The vec_madds later on does an implicit >>15 on the result.
++ Since FILTER_BITS is 8, and we have 15 bits of magnitude in
++ a signed short, we have just enough bits to pre-shift our
++ filter constants <<7 to compensate for vec_madds.
++ */
++ fv[i].s[0] = filter[i] << (15-FILTER_BITS);
++ fv[i].v = vec_splat(fv[i].v, 0);
++ }
++
++ zero = vec_splat_u8(0);
++ zeros = vec_splat_s16(0);
++
++
++ /*
++ When we're resampling, we'd ideally like both our input buffers,
++ and output buffers to be 16-byte aligned, so we can do both aligned
++ reads and writes. Sadly we can't always have this at the moment, so
++ we opt for aligned writes, as unaligned writes have a huge overhead.
++ To do this, do enough scalar resamples to get dst 16-byte aligned.
++ */
++ i = (-(int)dst) & 0xf;
++ while(i>0) {
++ sum = s[0 * wrap] * filter[0] +
++ s[1 * wrap] * filter[1] +
++ s[2 * wrap] * filter[2] +
++ s[3 * wrap] * filter[3];
++ sum = sum >> FILTER_BITS;
++ if (sum<0) sum = 0; else if (sum>255) sum=255;
++ dst[0] = sum;
++ dst++;
++ s++;
++ dst_width--;
++ i--;
++ }
++
++ /* Do our altivec resampling on 16 pixels at once. */
++ while(dst_width>=16) {
++ /*
++ Read 16 (potentially unaligned) bytes from each of
++ 4 lines into 4 vectors, and split them into shorts.
++ Interleave the multipy/accumulate for the resample
++ filter with the loads to hide the 3 cycle latency
++ the vec_madds have.
++ */
++ tv = (vector unsigned char *) &s[0 * wrap];
++ tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[i * wrap]));
++ srchv[0].v = (vector signed short) vec_mergeh(zero, tmp);
++ srclv[0].v = (vector signed short) vec_mergel(zero, tmp);
++ sumhv = vec_madds(srchv[0].v, fv[0].v, zeros);
++ sumlv = vec_madds(srclv[0].v, fv[0].v, zeros);
++
++ tv = (vector unsigned char *) &s[1 * wrap];
++ tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[1 * wrap]));
++ srchv[1].v = (vector signed short) vec_mergeh(zero, tmp);
++ srclv[1].v = (vector signed short) vec_mergel(zero, tmp);
++ sumhv = vec_madds(srchv[1].v, fv[1].v, sumhv);
++ sumlv = vec_madds(srclv[1].v, fv[1].v, sumlv);
++
++ tv = (vector unsigned char *) &s[2 * wrap];
++ tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[2 * wrap]));
++ srchv[2].v = (vector signed short) vec_mergeh(zero, tmp);
++ srclv[2].v = (vector signed short) vec_mergel(zero, tmp);
++ sumhv = vec_madds(srchv[2].v, fv[2].v, sumhv);
++ sumlv = vec_madds(srclv[2].v, fv[2].v, sumlv);
++
++ tv = (vector unsigned char *) &s[3 * wrap];
++ tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[3 * wrap]));
++ srchv[3].v = (vector signed short) vec_mergeh(zero, tmp);
++ srclv[3].v = (vector signed short) vec_mergel(zero, tmp);
++ sumhv = vec_madds(srchv[3].v, fv[3].v, sumhv);
++ sumlv = vec_madds(srclv[3].v, fv[3].v, sumlv);
++
++ /*
++ Pack the results into our destination vector,
++ and do an aligned write of that back to memory.
++ */
++ dstv = vec_packsu(sumhv, sumlv) ;
++ vec_st(dstv, 0, (vector unsigned char *) dst);
++
++ dst+=16;
++ s+=16;
++ dst_width-=16;
++ }
++
++ /*
++ If there are any leftover pixels, resample them
++ with the slow scalar method.
++ */
++ while(dst_width>0) {
++ sum = s[0 * wrap] * filter[0] +
++ s[1 * wrap] * filter[1] +
++ s[2 * wrap] * filter[2] +
++ s[3 * wrap] * filter[3];
++ sum = sum >> FILTER_BITS;
++ if (sum<0) sum = 0; else if (sum>255) sum=255;
++ dst[0] = sum;
++ dst++;
++ s++;
++ dst_width--;
++ }
++}
++#endif
++
+Index: ffmpeg-0.cvs20060823/libpostproc/Makefile
+===================================================================
+--- ffmpeg-0.cvs20060823.orig/libpostproc/Makefile 2007-01-29 04:55:08.000000000 -0600
++++ ffmpeg-0.cvs20060823/libpostproc/Makefile 2007-01-29 04:56:36.000000000 -0600
+@@ -10,8 +10,10 @@
+ LIBMAJOR=$(SPPMAJOR)
+ endif
+
+-STATIC_OBJS=postprocess.o
+-SHARED_OBJS=postprocess_pic.o
++STATIC_OBJS=postprocess.o postprocess_altivec.o
++SHARED_OBJS=postprocess_pic.o postprocess_altivec_pic.o
++postprocess_altivec.o: CFLAGS+= $(ALTIVECFLAGS)
++postprocess_altivec_pic.o: CFLAGS+= $(ALTIVECFLAGS)
+
+ HEADERS = postprocess.h
+
+@@ -23,5 +25,7 @@
+ ifeq ($(BUILD_SHARED),yes)
+ postprocess_pic.o: postprocess.c
+ $(CC) -c $(CFLAGS) -fomit-frame-pointer -fPIC -DPIC -o $@ $<
++postprocess_altivec_pic.o: postprocess_altivec.c
++ $(CC) -c $(CFLAGS) -fomit-frame-pointer -fPIC -DPIC -o $@ $<
+ endif
+
+Index: ffmpeg-0.cvs20060823/libpostproc/postprocess.c
+===================================================================
+--- ffmpeg-0.cvs20060823.orig/libpostproc/postprocess.c 2007-01-29 04:54:58.000000000 -0600
++++ ffmpeg-0.cvs20060823/libpostproc/postprocess.c 2007-01-29 04:58:28.000000000 -0600
+@@ -92,10 +92,6 @@
+
+ #include "mangle.h" //FIXME should be supressed
+
+-#ifdef HAVE_ALTIVEC_H
+-#include <altivec.h>
+-#endif
+-
+ #ifndef HAVE_MEMALIGN
+ #define memalign(a,b) malloc(b)
+ #endif
+@@ -630,8 +626,8 @@
+ #undef RENAME
+ #define HAVE_ALTIVEC
+ #define RENAME(a) a ## _altivec
+-#include "postprocess_altivec_template.c"
+-#include "postprocess_template.c"
++//#include "postprocess_altivec_template.c"
++//#include "postprocess_template.c"
+ #endif
+ #endif //ARCH_POWERPC
+
+Index: ffmpeg-0.cvs20060823/libpostproc/postprocess_altivec.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ ffmpeg-0.cvs20060823/libpostproc/postprocess_altivec.c 2007-01-29 04:59:29.000000000 -0600
+@@ -0,0 +1,614 @@
++/*
++ Copyright (C) 2001-2003 Michael Niedermayer (michaelni at gmx.at)
++
++ AltiVec optimizations (C) 2004 Romain Dolbeau <romain at dolbeau.org>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++*/
++
++/**
++ * @file postprocess.c
++ * postprocessing.
++ */
++
++/*
++ C MMX MMX2 3DNow AltiVec
++isVertDC Ec Ec Ec
++isVertMinMaxOk Ec Ec Ec
++doVertLowPass E e e Ec
++doVertDefFilter Ec Ec e e Ec
++isHorizDC Ec Ec Ec
++isHorizMinMaxOk a E Ec
++doHorizLowPass E e e Ec
++doHorizDefFilter Ec Ec e e Ec
++do_a_deblock Ec E Ec E
++deRing E e e* Ecp
++Vertical RKAlgo1 E a a
++Horizontal RKAlgo1 a a
++Vertical X1# a E E
++Horizontal X1# a E E
++LinIpolDeinterlace e E E*
++CubicIpolDeinterlace a e e*
++LinBlendDeinterlace e E E*
++MedianDeinterlace# E Ec Ec
++TempDeNoiser# E e e Ec
++
++* i dont have a 3dnow CPU -> its untested, but noone said it doesnt work so it seems to work
++# more or less selfinvented filters so the exactness isnt too meaningfull
++E = Exact implementation
++e = allmost exact implementation (slightly different rounding,...)
++a = alternative / approximate impl
++c = checked against the other implementations (-vo md5)
++p = partially optimized, still some work to do
++*/
++
++/*
++TODO:
++reduce the time wasted on the mem transfer
++unroll stuff if instructions depend too much on the prior one
++move YScale thing to the end instead of fixing QP
++write a faster and higher quality deblocking filter :)
++make the mainloop more flexible (variable number of blocks at once
++ (the if/else stuff per block is slowing things down)
++compare the quality & speed of all filters
++split this huge file
++optimize c versions
++try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
++...
++*/
++
++//Changelog: use the Subversion log
++
++#include "config.h"
++#include <inttypes.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#ifdef HAVE_MALLOC_H
++#include <malloc.h>
++#endif
++//#undef HAVE_MMX2
++//#define HAVE_3DNOW
++//#undef HAVE_MMX
++//#undef ARCH_X86
++//#define DEBUG_BRIGHTNESS
++#ifdef USE_FASTMEMCPY
++#include "libvo/fastmemcpy.h"
++#endif
++#include "postprocess.h"
++#include "postprocess_internal.h"
++
++#include "mangle.h" //FIXME should be supressed
++
++#ifdef HAVE_ALTIVEC_H
++#include <altivec.h>
++#endif
++
++#ifndef HAVE_MEMALIGN
++#define memalign(a,b) malloc(b)
++#endif
++
++#define MIN(a,b) ((a) > (b) ? (b) : (a))
++#define MAX(a,b) ((a) < (b) ? (b) : (a))
++#define ABS(a) ((a) > 0 ? (a) : (-(a)))
++#define SIGN(a) ((a) > 0 ? 1 : -1)
++
++#define GET_MODE_BUFFER_SIZE 500
++#define OPTIONS_ARRAY_SIZE 10
++#define BLOCK_SIZE 8
++#define TEMP_STRIDE 8
++//#define NUM_BLOCKS_AT_ONCE 16 //not used yet
++
++#if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ > 0)
++# define attribute_used __attribute__((used))
++# define always_inline __attribute__((always_inline)) inline
++#else
++# define attribute_used
++# define always_inline inline
++#endif
++
++#if defined(ARCH_X86) || defined(ARCH_X86_64)
++static uint64_t __attribute__((aligned(8))) attribute_used w05= 0x0005000500050005LL;
++static uint64_t __attribute__((aligned(8))) attribute_used w04= 0x0004000400040004LL;
++static uint64_t __attribute__((aligned(8))) attribute_used w20= 0x0020002000200020LL;
++static uint64_t __attribute__((aligned(8))) attribute_used b00= 0x0000000000000000LL;
++static uint64_t __attribute__((aligned(8))) attribute_used b01= 0x0101010101010101LL;
++static uint64_t __attribute__((aligned(8))) attribute_used b02= 0x0202020202020202LL;
++static uint64_t __attribute__((aligned(8))) attribute_used b08= 0x0808080808080808LL;
++static uint64_t __attribute__((aligned(8))) attribute_used b80= 0x8080808080808080LL;
++#endif
++
++static uint8_t clip_table[3*256];
++static uint8_t * const clip_tab= clip_table + 256;
++
++static const int verbose= 0;
++
++static const int attribute_used deringThreshold= 20;
++
++
++static struct PPFilter filters[]=
++{
++ {"hb", "hdeblock", 1, 1, 3, H_DEBLOCK},
++ {"vb", "vdeblock", 1, 2, 4, V_DEBLOCK},
++/* {"hr", "rkhdeblock", 1, 1, 3, H_RK1_FILTER},
++ {"vr", "rkvdeblock", 1, 2, 4, V_RK1_FILTER},*/
++ {"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
++ {"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
++ {"ha", "ahdeblock", 1, 1, 3, H_A_DEBLOCK},
++ {"va", "avdeblock", 1, 2, 4, V_A_DEBLOCK},
++ {"dr", "dering", 1, 5, 6, DERING},
++ {"al", "autolevels", 0, 1, 2, LEVEL_FIX},
++ {"lb", "linblenddeint", 1, 1, 4, LINEAR_BLEND_DEINT_FILTER},
++ {"li", "linipoldeint", 1, 1, 4, LINEAR_IPOL_DEINT_FILTER},
++ {"ci", "cubicipoldeint", 1, 1, 4, CUBIC_IPOL_DEINT_FILTER},
++ {"md", "mediandeint", 1, 1, 4, MEDIAN_DEINT_FILTER},
++ {"fd", "ffmpegdeint", 1, 1, 4, FFMPEG_DEINT_FILTER},
++ {"l5", "lowpass5", 1, 1, 4, LOWPASS5_DEINT_FILTER},
++ {"tn", "tmpnoise", 1, 7, 8, TEMP_NOISE_FILTER},
++ {"fq", "forcequant", 1, 0, 0, FORCE_QUANT},
++ {NULL, NULL,0,0,0,0} //End Marker
++};
++
++static const char *replaceTable[]=
++{
++ "default", "hdeblock:a,vdeblock:a,dering:a",
++ "de", "hdeblock:a,vdeblock:a,dering:a",
++ "fast", "x1hdeblock:a,x1vdeblock:a,dering:a",
++ "fa", "x1hdeblock:a,x1vdeblock:a,dering:a",
++ "ac", "ha:a:128:7,va:a,dering:a",
++ NULL //End Marker
++};
++
++
++#if defined(ARCH_X86) || defined(ARCH_X86_64)
++static inline void prefetchnta(void *p)
++{
++ asm volatile( "prefetchnta (%0)\n\t"
++ : : "r" (p)
++ );
++}
++
++static inline void prefetcht0(void *p)
++{
++ asm volatile( "prefetcht0 (%0)\n\t"
++ : : "r" (p)
++ );
++}
++
++static inline void prefetcht1(void *p)
++{
++ asm volatile( "prefetcht1 (%0)\n\t"
++ : : "r" (p)
++ );
++}
++
++static inline void prefetcht2(void *p)
++{
++ asm volatile( "prefetcht2 (%0)\n\t"
++ : : "r" (p)
++ );
++}
++#endif
++
++// The horizontal Functions exist only in C cuz the MMX code is faster with vertical filters and transposing
++
++/**
++ * Check if the given 8x8 Block is mostly "flat"
++ */
++static inline int isHorizDC_C(uint8_t src[], int stride, PPContext *c)
++{
++ int numEq= 0;
++ int y;
++ const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
++ const int dcThreshold= dcOffset*2 + 1;
++
++ for(y=0; y<BLOCK_SIZE; y++)
++ {
++ if(((unsigned)(src[0] - src[1] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[1] - src[2] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[2] - src[3] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[3] - src[4] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[4] - src[5] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[5] - src[6] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[6] - src[7] + dcOffset)) < dcThreshold) numEq++;
++ src+= stride;
++ }
++ return numEq > c->ppMode.flatnessThreshold;
++}
++
++/**
++ * Check if the middle 8x8 Block in the given 8x16 block is flat
++ */
++static inline int isVertDC_C(uint8_t src[], int stride, PPContext *c){
++ int numEq= 0;
++ int y;
++ const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
++ const int dcThreshold= dcOffset*2 + 1;
++
++ src+= stride*4; // src points to begin of the 8x8 Block
++ for(y=0; y<BLOCK_SIZE-1; y++)
++ {
++ if(((unsigned)(src[0] - src[0+stride] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[1] - src[1+stride] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[2] - src[2+stride] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[3] - src[3+stride] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[4] - src[4+stride] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[5] - src[5+stride] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[6] - src[6+stride] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[7] - src[7+stride] + dcOffset)) < dcThreshold) numEq++;
++ src+= stride;
++ }
++ return numEq > c->ppMode.flatnessThreshold;
++}
++
++static inline int isHorizMinMaxOk_C(uint8_t src[], int stride, int QP)
++{
++ int i;
++#if 1
++ for(i=0; i<2; i++){
++ if((unsigned)(src[0] - src[5] + 2*QP) > 4*QP) return 0;
++ src += stride;
++ if((unsigned)(src[2] - src[7] + 2*QP) > 4*QP) return 0;
++ src += stride;
++ if((unsigned)(src[4] - src[1] + 2*QP) > 4*QP) return 0;
++ src += stride;
++ if((unsigned)(src[6] - src[3] + 2*QP) > 4*QP) return 0;
++ src += stride;
++ }
++#else
++ for(i=0; i<8; i++){
++ if((unsigned)(src[0] - src[7] + 2*QP) > 4*QP) return 0;
++ src += stride;
++ }
++#endif
++ return 1;
++}
++
++static inline int isVertMinMaxOk_C(uint8_t src[], int stride, int QP)
++{
++#if 1
++#if 1
++ int x;
++ src+= stride*4;
++ for(x=0; x<BLOCK_SIZE; x+=4)
++ {
++ if((unsigned)(src[ x + 0*stride] - src[ x + 5*stride] + 2*QP) > 4*QP) return 0;
++ if((unsigned)(src[1+x + 2*stride] - src[1+x + 7*stride] + 2*QP) > 4*QP) return 0;
++ if((unsigned)(src[2+x + 4*stride] - src[2+x + 1*stride] + 2*QP) > 4*QP) return 0;
++ if((unsigned)(src[3+x + 6*stride] - src[3+x + 3*stride] + 2*QP) > 4*QP) return 0;
++ }
++#else
++ int x;
++ src+= stride*3;
++ for(x=0; x<BLOCK_SIZE; x++)
++ {
++ if((unsigned)(src[x + stride] - src[x + (stride<<3)] + 2*QP) > 4*QP) return 0;
++ }
++#endif
++ return 1;
++#else
++ int x;
++ src+= stride*4;
++ for(x=0; x<BLOCK_SIZE; x++)
++ {
++ int min=255;
++ int max=0;
++ int y;
++ for(y=0; y<8; y++){
++ int v= src[x + y*stride];
++ if(v>max) max=v;
++ if(v<min) min=v;
++ }
++ if(max-min > 2*QP) return 0;
++ }
++ return 1;
++#endif
++}
++
++static inline int horizClassify_C(uint8_t src[], int stride, PPContext *c){
++ if( isHorizDC_C(src, stride, c) ){
++ if( isHorizMinMaxOk_C(src, stride, c->QP) )
++ return 1;
++ else
++ return 0;
++ }else{
++ return 2;
++ }
++}
++
++static inline int vertClassify_C(uint8_t src[], int stride, PPContext *c){
++ if( isVertDC_C(src, stride, c) ){
++ if( isVertMinMaxOk_C(src, stride, c->QP) )
++ return 1;
++ else
++ return 0;
++ }else{
++ return 2;
++ }
++}
++
++static inline void doHorizDefFilter_C(uint8_t dst[], int stride, PPContext *c)
++{
++ int y;
++ for(y=0; y<BLOCK_SIZE; y++)
++ {
++ const int middleEnergy= 5*(dst[4] - dst[3]) + 2*(dst[2] - dst[5]);
++
++ if(ABS(middleEnergy) < 8*c->QP)
++ {
++ const int q=(dst[3] - dst[4])/2;
++ const int leftEnergy= 5*(dst[2] - dst[1]) + 2*(dst[0] - dst[3]);
++ const int rightEnergy= 5*(dst[6] - dst[5]) + 2*(dst[4] - dst[7]);
++
++ int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
++ d= MAX(d, 0);
++
++ d= (5*d + 32) >> 6;
++ d*= SIGN(-middleEnergy);
++
++ if(q>0)
++ {
++ d= d<0 ? 0 : d;
++ d= d>q ? q : d;
++ }
++ else
++ {
++ d= d>0 ? 0 : d;
++ d= d<q ? q : d;
++ }
++
++ dst[3]-= d;
++ dst[4]+= d;
++ }
++ dst+= stride;
++ }
++}
++
++/**
++ * Do a horizontal low pass filter on the 10x8 block (dst points to middle 8x8 Block)
++ * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
++ */
++static inline void doHorizLowPass_C(uint8_t dst[], int stride, PPContext *c)
++{
++ int y;
++ for(y=0; y<BLOCK_SIZE; y++)
++ {
++ const int first= ABS(dst[-1] - dst[0]) < c->QP ? dst[-1] : dst[0];
++ const int last= ABS(dst[8] - dst[7]) < c->QP ? dst[8] : dst[7];
++
++ int sums[10];
++ sums[0] = 4*first + dst[0] + dst[1] + dst[2] + 4;
++ sums[1] = sums[0] - first + dst[3];
++ sums[2] = sums[1] - first + dst[4];
++ sums[3] = sums[2] - first + dst[5];
++ sums[4] = sums[3] - first + dst[6];
++ sums[5] = sums[4] - dst[0] + dst[7];
++ sums[6] = sums[5] - dst[1] + last;
++ sums[7] = sums[6] - dst[2] + last;
++ sums[8] = sums[7] - dst[3] + last;
++ sums[9] = sums[8] - dst[4] + last;
++
++ dst[0]= (sums[0] + sums[2] + 2*dst[0])>>4;
++ dst[1]= (sums[1] + sums[3] + 2*dst[1])>>4;
++ dst[2]= (sums[2] + sums[4] + 2*dst[2])>>4;
++ dst[3]= (sums[3] + sums[5] + 2*dst[3])>>4;
++ dst[4]= (sums[4] + sums[6] + 2*dst[4])>>4;
++ dst[5]= (sums[5] + sums[7] + 2*dst[5])>>4;
++ dst[6]= (sums[6] + sums[8] + 2*dst[6])>>4;
++ dst[7]= (sums[7] + sums[9] + 2*dst[7])>>4;
++
++ dst+= stride;
++ }
++}
++
++/**
++ * Experimental Filter 1 (Horizontal)
++ * will not damage linear gradients
++ * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
++ * can only smooth blocks at the expected locations (it cant smooth them if they did move)
++ * MMX2 version does correct clipping C version doesnt
++ * not identical with the vertical one
++ */
++static inline void horizX1Filter(uint8_t *src, int stride, int QP)
++{
++ int y;
++ static uint64_t *lut= NULL;
++ if(lut==NULL)
++ {
++ int i;
++ lut= (uint64_t*)memalign(8, 256*8);
++ for(i=0; i<256; i++)
++ {
++ int v= i < 128 ? 2*i : 2*(i-256);
++/*
++//Simulate 112242211 9-Tap filter
++ uint64_t a= (v/16) & 0xFF;
++ uint64_t b= (v/8) & 0xFF;
++ uint64_t c= (v/4) & 0xFF;
++ uint64_t d= (3*v/8) & 0xFF;
++*/
++//Simulate piecewise linear interpolation
++ uint64_t a= (v/16) & 0xFF;
++ uint64_t b= (v*3/16) & 0xFF;
++ uint64_t c= (v*5/16) & 0xFF;
++ uint64_t d= (7*v/16) & 0xFF;
++ uint64_t A= (0x100 - a)&0xFF;
++ uint64_t B= (0x100 - b)&0xFF;
++ uint64_t C= (0x100 - c)&0xFF;
++ uint64_t D= (0x100 - c)&0xFF;
++
++ lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
++ (D<<24) | (C<<16) | (B<<8) | (A);
++ //lut[i] = (v<<32) | (v<<24);
++ }
++ }
++
++ for(y=0; y<BLOCK_SIZE; y++)
++ {
++ int a= src[1] - src[2];
++ int b= src[3] - src[4];
++ int c= src[5] - src[6];
++
++ int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
++
++ if(d < QP)
++ {
++ int v = d * SIGN(-b);
++
++ src[1] +=v/8;
++ src[2] +=v/4;
++ src[3] +=3*v/8;
++ src[4] -=3*v/8;
++ src[5] -=v/4;
++ src[6] -=v/8;
++
++ }
++ src+=stride;
++ }
++}
++
++/**
++ * accurate deblock filter
++ */
++static always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, PPContext *c){
++ int y;
++ const int QP= c->QP;
++ const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
++ const int dcThreshold= dcOffset*2 + 1;
++//START_TIMER
++ src+= step*4; // src points to begin of the 8x8 Block
++ for(y=0; y<8; y++){
++ int numEq= 0;
++
++ if(((unsigned)(src[-1*step] - src[0*step] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[ 0*step] - src[1*step] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[ 1*step] - src[2*step] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[ 2*step] - src[3*step] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[ 3*step] - src[4*step] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[ 4*step] - src[5*step] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[ 5*step] - src[6*step] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[ 6*step] - src[7*step] + dcOffset)) < dcThreshold) numEq++;
++ if(((unsigned)(src[ 7*step] - src[8*step] + dcOffset)) < dcThreshold) numEq++;
++ if(numEq > c->ppMode.flatnessThreshold){
++ int min, max, x;
++
++ if(src[0] > src[step]){
++ max= src[0];
++ min= src[step];
++ }else{
++ max= src[step];
++ min= src[0];
++ }
++ for(x=2; x<8; x+=2){
++ if(src[x*step] > src[(x+1)*step]){
++ if(src[x *step] > max) max= src[ x *step];
++ if(src[(x+1)*step] < min) min= src[(x+1)*step];
++ }else{
++ if(src[(x+1)*step] > max) max= src[(x+1)*step];
++ if(src[ x *step] < min) min= src[ x *step];
++ }
++ }
++ if(max-min < 2*QP){
++ const int first= ABS(src[-1*step] - src[0]) < QP ? src[-1*step] : src[0];
++ const int last= ABS(src[8*step] - src[7*step]) < QP ? src[8*step] : src[7*step];
++
++ int sums[10];
++ sums[0] = 4*first + src[0*step] + src[1*step] + src[2*step] + 4;
++ sums[1] = sums[0] - first + src[3*step];
++ sums[2] = sums[1] - first + src[4*step];
++ sums[3] = sums[2] - first + src[5*step];
++ sums[4] = sums[3] - first + src[6*step];
++ sums[5] = sums[4] - src[0*step] + src[7*step];
++ sums[6] = sums[5] - src[1*step] + last;
++ sums[7] = sums[6] - src[2*step] + last;
++ sums[8] = sums[7] - src[3*step] + last;
++ sums[9] = sums[8] - src[4*step] + last;
++
++ src[0*step]= (sums[0] + sums[2] + 2*src[0*step])>>4;
++ src[1*step]= (sums[1] + sums[3] + 2*src[1*step])>>4;
++ src[2*step]= (sums[2] + sums[4] + 2*src[2*step])>>4;
++ src[3*step]= (sums[3] + sums[5] + 2*src[3*step])>>4;
++ src[4*step]= (sums[4] + sums[6] + 2*src[4*step])>>4;
++ src[5*step]= (sums[5] + sums[7] + 2*src[5*step])>>4;
++ src[6*step]= (sums[6] + sums[8] + 2*src[6*step])>>4;
++ src[7*step]= (sums[7] + sums[9] + 2*src[7*step])>>4;
++ }
++ }else{
++ const int middleEnergy= 5*(src[4*step] - src[3*step]) + 2*(src[2*step] - src[5*step]);
++
++ if(ABS(middleEnergy) < 8*QP)
++ {
++ const int q=(src[3*step] - src[4*step])/2;
++ const int leftEnergy= 5*(src[2*step] - src[1*step]) + 2*(src[0*step] - src[3*step]);
++ const int rightEnergy= 5*(src[6*step] - src[5*step]) + 2*(src[4*step] - src[7*step]);
++
++ int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
++ d= MAX(d, 0);
++
++ d= (5*d + 32) >> 6;
++ d*= SIGN(-middleEnergy);
++
++ if(q>0)
++ {
++ d= d<0 ? 0 : d;
++ d= d>q ? q : d;
++ }
++ else
++ {
++ d= d>0 ? 0 : d;
++ d= d<q ? q : d;
++ }
++
++ src[3*step]-= d;
++ src[4*step]+= d;
++ }
++ }
++
++ src += stride;
++ }
++/*if(step==16){
++ STOP_TIMER("step16")
++}else{
++ STOP_TIMER("stepX")
++}*/
++}
++
++//Note: we have C, MMX, MMX2, 3DNOW version there is no 3DNOW+MMX2 one
++//Plain C versions
++#if !defined (HAVE_MMX) || defined (RUNTIME_CPUDETECT)
++#define COMPILE_C
++#endif
++
++#ifdef ARCH_POWERPC
++#ifdef HAVE_ALTIVEC
++#define COMPILE_ALTIVEC
++#endif //HAVE_ALTIVEC
++#endif //ARCH_POWERPC
++
++#undef HAVE_MMX
++#undef HAVE_MMX2
++#undef HAVE_3DNOW
++#undef HAVE_ALTIVEC
++
++#ifdef ARCH_POWERPC
++#ifdef COMPILE_ALTIVEC
++#undef RENAME
++#define HAVE_ALTIVEC
++#define RENAME(a) a ## _altivec
++#include "postprocess_altivec_template.c"
++#include "postprocess_template.c"
++#endif
++#endif //ARCH_POWERPC
++
Modified: unstable/ffmpeg/debian/patches/series
===================================================================
--- unstable/ffmpeg/debian/patches/series 2007-01-28 12:49:33 UTC (rev 150)
+++ unstable/ffmpeg/debian/patches/series 2007-01-29 11:35:11 UTC (rev 151)
@@ -12,6 +12,7 @@
020_really_use_liba52.diff
030_arm_cpu_detect.diff
030_arm_workaround.diff
+040_early_altivec_detection.diff
+040_only_use_maltivec_when_needed.diff
050_h264-misc-security-fixes.diff
051_asf-misc-security-fixes.diff
-040_early_altivec_detection.diff
More information about the Pkg-multimedia-commits
mailing list