codeclib: make selective inclusion of macros and inline functions from the codeclib_misc.h header easier and clean out some old libtremor stuff.

git-svn-id: svn://svn.rockbox.org/rockbox/trunk@29985 a1c6a512-1295-4272-9138-f99709370657
This commit is contained in:
Nils Wallménius 2011-06-08 10:35:27 +00:00
parent 6672766dfe
commit d5ceb4ce2b
3 changed files with 54 additions and 41 deletions

View File

@ -16,9 +16,7 @@
********************************************************************/ ********************************************************************/
#ifdef CPU_ARM #ifdef CPU_ARM
#if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_) #define INCL_OPTIMIZED_MULT32
#define _V_WIDE_MATH
#if ARM_ARCH >= 6 #if ARM_ARCH >= 6
static inline int32_t MULT32(int32_t x, int32_t y) { static inline int32_t MULT32(int32_t x, int32_t y) {
int32_t hi; int32_t hi;
@ -37,10 +35,12 @@ static inline int32_t MULT32(int32_t x, int32_t y) {
} }
#endif #endif
#define INCL_OPTIMIZED_MULT31
static inline int32_t MULT31(int32_t x, int32_t y) { static inline int32_t MULT31(int32_t x, int32_t y) {
return MULT32(x,y)<<1; return MULT32(x,y)<<1;
} }
#define INCL_OPTIMIZED_MULT31_SHIFT15
static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
int32_t lo,hi; int32_t lo,hi;
asm volatile("smull %0, %1, %2, %3\n\t" asm volatile("smull %0, %1, %2, %3\n\t"
@ -52,6 +52,7 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
return(hi); return(hi);
} }
#define INCL_OPTIMIZED_MULT31_SHIFT16
static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
int32_t lo,hi; int32_t lo,hi;
asm volatile("smull %0, %1, %2, %3\n\t" asm volatile("smull %0, %1, %2, %3\n\t"
@ -63,6 +64,7 @@ static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
return(hi); return(hi);
} }
#define INCL_OPTIMIZED_XPROD32
#define XPROD32(a, b, t, v, x, y) \ #define XPROD32(a, b, t, v, x, y) \
{ \ { \
int32_t l; \ int32_t l; \
@ -75,6 +77,8 @@ static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
: "r" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \ : "r" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \
} }
#define INCL_OPTIMIZED_XPROD31_R
#define INCL_OPTIMIZED_XNPROD31_R
#if ARM_ARCH >= 6 #if ARM_ARCH >= 6
/* These may yield slightly different result from the macros below /* These may yield slightly different result from the macros below
because only the high 32 bits of the multiplications are accumulated while because only the high 32 bits of the multiplications are accumulated while
@ -134,6 +138,7 @@ static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
} }
#endif #endif
#define INCL_OPTIMIZED_XPROD31
static inline void XPROD31(int32_t a, int32_t b, static inline void XPROD31(int32_t a, int32_t b,
int32_t t, int32_t v, int32_t t, int32_t v,
int32_t *x, int32_t *y) int32_t *x, int32_t *y)
@ -144,6 +149,7 @@ static inline void XPROD31(int32_t a, int32_t b,
*y = _y1; *y = _y1;
} }
#define INCL_OPTIMIZED_XNPROD31
static inline void XNPROD31(int32_t a, int32_t b, static inline void XNPROD31(int32_t a, int32_t b,
int32_t t, int32_t v, int32_t t, int32_t v,
int32_t *x, int32_t *y) int32_t *x, int32_t *y)
@ -261,7 +267,6 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n)
#endif #endif
#endif
/* not used anymore */ /* not used anymore */
/* /*
#ifndef _V_CLIP_MATH #ifndef _V_CLIP_MATH
@ -282,11 +287,6 @@ static inline int32_t CLIP_TO_15(int32_t x) {
#endif #endif
*/ */
#ifndef _V_LSP_MATH_ASM
#define _V_LSP_MATH_ASM
#endif
#endif #endif

View File

@ -21,9 +21,7 @@
#if defined(CPU_COLDFIRE) #if defined(CPU_COLDFIRE)
#ifndef _V_WIDE_MATH #define INCL_OPTIMIZED_MULT32
#define _V_WIDE_MATH
static inline int32_t MULT32(int32_t x, int32_t y) { static inline int32_t MULT32(int32_t x, int32_t y) {
asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply & shift */ asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply & shift */
@ -35,6 +33,7 @@ static inline int32_t MULT32(int32_t x, int32_t y) {
return x; return x;
} }
#define INCL_OPTIMIZED_MULT31
static inline int32_t MULT31(int32_t x, int32_t y) { static inline int32_t MULT31(int32_t x, int32_t y) {
asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */ asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */
"movclr.l %%acc0, %[x];" /* move and clear */ "movclr.l %%acc0, %[x];" /* move and clear */
@ -44,6 +43,7 @@ static inline int32_t MULT31(int32_t x, int32_t y) {
return x; return x;
} }
#define INCL_OPTIMIZED_MULT31_SHIFT15
static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
int32_t r; int32_t r;
@ -61,6 +61,7 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
return r; return r;
} }
#define INCL_OPTIMIZED_MULT31_SHIFT16
static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
int32_t r; int32_t r;
@ -76,6 +77,7 @@ static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
return x; return x;
} }
#define INCL_OPTIMIZED_XPROD31
static inline static inline
void XPROD31(int32_t a, int32_t b, void XPROD31(int32_t a, int32_t b,
int32_t t, int32_t v, int32_t t, int32_t v,
@ -95,6 +97,7 @@ void XPROD31(int32_t a, int32_t b,
: "cc", "memory"); : "cc", "memory");
} }
#define INCL_OPTIMIZED_XNPROD31
static inline static inline
void XNPROD31(int32_t a, int32_t b, void XNPROD31(int32_t a, int32_t b,
int32_t t, int32_t v, int32_t t, int32_t v,
@ -114,15 +117,11 @@ void XNPROD31(int32_t a, int32_t b,
: "cc", "memory"); : "cc", "memory");
} }
#if 0 /* canonical Tremor definition */
#define XPROD32(_a, _b, _t, _v, _x, _y) \
{ (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
(_y)=MULT32(_b,_t)-MULT32(_a,_v); }
#endif
/* this could lose the LSB by overflow, but i don't think it'll ever happen. /* this could lose the LSB by overflow, but i don't think it'll ever happen.
if anyone think they can hear a bug caused by this, please try the above if anyone think they can hear a bug caused by this, please try the above
version. */ version. */
#define INCL_OPTIMIZED_XPROD32
#define XPROD32(_a, _b, _t, _v, _x, _y) \ #define XPROD32(_a, _b, _t, _v, _x, _y) \
asm volatile ("mac.l %[a], %[t], %%acc0;" \ asm volatile ("mac.l %[a], %[t], %%acc0;" \
"mac.l %[b], %[v], %%acc0;" \ "mac.l %[b], %[v], %%acc0;" \
@ -137,6 +136,7 @@ void XNPROD31(int32_t a, int32_t b,
[t] "r" (_t), [v] "r" (_v) \ [t] "r" (_t), [v] "r" (_v) \
: "cc"); : "cc");
#define INCL_OPTIMIZED_XPROD31_R
#define XPROD31_R(_a, _b, _t, _v, _x, _y) \ #define XPROD31_R(_a, _b, _t, _v, _x, _y) \
asm volatile ("mac.l %[a], %[t], %%acc0;" \ asm volatile ("mac.l %[a], %[t], %%acc0;" \
"mac.l %[b], %[v], %%acc0;" \ "mac.l %[b], %[v], %%acc0;" \
@ -149,6 +149,7 @@ void XNPROD31(int32_t a, int32_t b,
[t] "r" (_t), [v] "r" (_v) \ [t] "r" (_t), [v] "r" (_v) \
: "cc"); : "cc");
#define INCL_OPTIMIZED_XNPROD31_R
#define XNPROD31_R(_a, _b, _t, _v, _x, _y) \ #define XNPROD31_R(_a, _b, _t, _v, _x, _y) \
asm volatile ("mac.l %[a], %[t], %%acc0;" \ asm volatile ("mac.l %[a], %[t], %%acc0;" \
"msac.l %[b], %[v], %%acc0;" \ "msac.l %[b], %[v], %%acc0;" \
@ -336,7 +337,6 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n)
#endif #endif
#endif
/* not used anymore */ /* not used anymore */
/* /*
#ifndef _V_CLIP_MATH #ifndef _V_CLIP_MATH

View File

@ -15,32 +15,15 @@
********************************************************************/ ********************************************************************/
//#include "config-tremor.h" #ifndef _CODECLIB_MISC_H_
#define _CODECLIB_MISC_H_
#ifndef _V_RANDOM_H_
#define _V_RANDOM_H_
//#include "ivorbiscodec.h"
//#include "os_types.h"
#include <stdint.h>
#include "asm_arm.h" #include "asm_arm.h"
#include "asm_mcf5249.h" #include "asm_mcf5249.h"
/* Some prototypes that were not defined elsewhere */
//void *_vorbis_block_alloc(vorbis_block *vb,long bytes);
//void _vorbis_block_ripcord(vorbis_block *vb);
//extern int _ilog(unsigned int v);
#ifndef _V_WIDE_MATH
#define _V_WIDE_MATH
#ifndef ROCKBOX
#include <inttypes.h>
#endif /* ROCKBOX */
#ifndef _LOW_ACCURACY_ #ifndef _LOW_ACCURACY_
/* 64 bit multiply */ /* 64 bit multiply */
/* #include <sys/types.h> */
#ifdef ROCKBOX_LITTLE_ENDIAN #ifdef ROCKBOX_LITTLE_ENDIAN
union magic { union magic {
@ -60,29 +43,43 @@ union magic {
}; };
#endif #endif
#ifndef INCL_OPTIMIZED_MULT32
#define INCL_OPTIMIZED_MULT32
static inline int32_t MULT32(int32_t x, int32_t y) { static inline int32_t MULT32(int32_t x, int32_t y) {
union magic magic; union magic magic;
magic.whole = (int64_t)x * y; magic.whole = (int64_t)x * y;
return magic.halves.hi; return magic.halves.hi;
} }
#endif
#ifndef INCL_OPTIMIZED_MULT31
#define INCL_OPTIMIZED_MULT31
static inline int32_t MULT31(int32_t x, int32_t y) { static inline int32_t MULT31(int32_t x, int32_t y) {
return MULT32(x,y)<<1; return MULT32(x,y)<<1;
} }
#endif
#ifndef INCL_OPTIMIZED_MULT31_SHIFT15
#define INCL_OPTIMIZED_MULT31_SHIFT15
static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
union magic magic; union magic magic;
magic.whole = (int64_t)x * y; magic.whole = (int64_t)x * y;
return ((uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17); return ((uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
} }
#endif
#ifndef INCL_OPTIMIZED_MULT31_SHIFT16
#define INCL_OPTIMIZED_MULT31_SHIFT16
static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
union magic magic; union magic magic;
magic.whole = (int64_t)x * y; magic.whole = (int64_t)x * y;
return ((uint32_t)(magic.halves.lo)>>16) | ((magic.halves.hi)<<16); return ((uint32_t)(magic.halves.lo)>>16) | ((magic.halves.hi)<<16);
} }
#endif
#else #else
/* Rockbox: unused */
#if 0
/* 32 bit multiply, more portable but less accurate */ /* 32 bit multiply, more portable but less accurate */
/* /*
@ -110,6 +107,7 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
return (x >> 6) * y; /* y preshifted >>9 */ return (x >> 6) * y; /* y preshifted >>9 */
} }
#endif #endif
#endif
/* /*
* The XPROD functions are meant to optimize the cross products found all * The XPROD functions are meant to optimize the cross products found all
@ -121,13 +119,17 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
* macros. * macros.
*/ */
#ifndef INCL_OPTIMIZED_XPROD32
#define INCL_OPTIMIZED_XPROD32
/* replaced XPROD32 with a macro to avoid memory reference /* replaced XPROD32 with a macro to avoid memory reference
_x, _y are the results (must be l-values) */ _x, _y are the results (must be l-values) */
#define XPROD32(_a, _b, _t, _v, _x, _y) \ #define XPROD32(_a, _b, _t, _v, _x, _y) \
{ (_x)=MULT32(_a,_t)+MULT32(_b,_v); \ { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
(_y)=MULT32(_b,_t)-MULT32(_a,_v); } (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
#endif
/* Rockbox: Unused */
/*
#ifdef __i386__ #ifdef __i386__
#define XPROD31(_a, _b, _t, _v, _x, _y) \ #define XPROD31(_a, _b, _t, _v, _x, _y) \
@ -138,7 +140,10 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
*(_y)=MULT31(_b,_t)+MULT31(_a,_v); } *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
#else #else
*/
#ifndef INCL_OPTIMIZED_XPROD31
#define INCL_OPTIMIZED_XPROD31
static inline void XPROD31(int32_t a, int32_t b, static inline void XPROD31(int32_t a, int32_t b,
int32_t t, int32_t v, int32_t t, int32_t v,
int32_t *x, int32_t *y) int32_t *x, int32_t *y)
@ -146,7 +151,10 @@ static inline void XPROD31(int32_t a, int32_t b,
*x = MULT31(a, t) + MULT31(b, v); *x = MULT31(a, t) + MULT31(b, v);
*y = MULT31(b, t) - MULT31(a, v); *y = MULT31(b, t) - MULT31(a, v);
} }
#endif
#ifndef INCL_OPTIMIZED_XNPROD31
#define INCL_OPTIMIZED_XNPROD31
static inline void XNPROD31(int32_t a, int32_t b, static inline void XNPROD31(int32_t a, int32_t b,
int32_t t, int32_t v, int32_t t, int32_t v,
int32_t *x, int32_t *y) int32_t *x, int32_t *y)
@ -155,19 +163,25 @@ static inline void XNPROD31(int32_t a, int32_t b,
*y = MULT31(b, t) + MULT31(a, v); *y = MULT31(b, t) + MULT31(a, v);
} }
#endif #endif
/*#endif*/
#ifndef INCL_OPTIMIZED_XPROD31_R
#define INCL_OPTIMIZED_XPROD31_R
#define XPROD31_R(_a, _b, _t, _v, _x, _y)\ #define XPROD31_R(_a, _b, _t, _v, _x, _y)\
{\ {\
_x = MULT31(_a, _t) + MULT31(_b, _v);\ _x = MULT31(_a, _t) + MULT31(_b, _v);\
_y = MULT31(_b, _t) - MULT31(_a, _v);\ _y = MULT31(_b, _t) - MULT31(_a, _v);\
} }
#endif
#ifndef INCL_OPTIMIZED_XNPROD31_R
#define INCL_OPTIMIZED_XNPROD31_R
#define XNPROD31_R(_a, _b, _t, _v, _x, _y)\ #define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
{\ {\
_x = MULT31(_a, _t) - MULT31(_b, _v);\ _x = MULT31(_a, _t) - MULT31(_b, _v);\
_y = MULT31(_b, _t) + MULT31(_a, _v);\ _y = MULT31(_b, _t) + MULT31(_a, _v);\
} }
#endif
#ifndef _V_VECT_OPS #ifndef _V_VECT_OPS
#define _V_VECT_OPS #define _V_VECT_OPS
@ -213,7 +227,6 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n)
} }
#endif #endif
#endif
/* not used anymore */ /* not used anymore */
/* /*
#ifndef _V_CLIP_MATH #ifndef _V_CLIP_MATH