shun-iwasawa 82a8f5
/*
shun-iwasawa 82a8f5
 * AltiVec optimizations for libjpeg-turbo
shun-iwasawa 82a8f5
 *
shun-iwasawa 82a8f5
 * Copyright (C) 2014, D. R. Commander.  All Rights Reserved.
shun-iwasawa 82a8f5
 *
shun-iwasawa 82a8f5
 * This software is provided 'as-is', without any express or implied
shun-iwasawa 82a8f5
 * warranty.  In no event will the authors be held liable for any damages
shun-iwasawa 82a8f5
 * arising from the use of this software.
shun-iwasawa 82a8f5
 *
shun-iwasawa 82a8f5
 * Permission is granted to anyone to use this software for any purpose,
shun-iwasawa 82a8f5
 * including commercial applications, and to alter it and redistribute it
shun-iwasawa 82a8f5
 * freely, subject to the following restrictions:
shun-iwasawa 82a8f5
 *
shun-iwasawa 82a8f5
 * 1. The origin of this software must not be misrepresented; you must not
shun-iwasawa 82a8f5
 *    claim that you wrote the original software. If you use this software
shun-iwasawa 82a8f5
 *    in a product, an acknowledgment in the product documentation would be
shun-iwasawa 82a8f5
 *    appreciated but is not required.
shun-iwasawa 82a8f5
 * 2. Altered source versions must be plainly marked as such, and must not be
shun-iwasawa 82a8f5
 *    misrepresented as being the original software.
shun-iwasawa 82a8f5
 * 3. This notice may not be removed or altered from any source distribution.
shun-iwasawa 82a8f5
 */
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
/* FAST INTEGER FORWARD DCT
shun-iwasawa 82a8f5
 *
shun-iwasawa 82a8f5
 * This is similar to the SSE2 implementation, except that we left-shift the
shun-iwasawa 82a8f5
 * constants by 1 less bit (the -1 in CONST_SHIFT.)  This is because
shun-iwasawa 82a8f5
 * vec_madds(arg1, arg2, arg3) generates the 16-bit saturated sum of:
shun-iwasawa 82a8f5
 *   the elements in arg3 + the most significant 17 bits of
shun-iwasawa 82a8f5
 *     (the elements in arg1 * the elements in arg2).
shun-iwasawa 82a8f5
 */
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
#include "jsimd_altivec.h"
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
#define F_0_382  98   /* FIX(0.382683433) */
shun-iwasawa 82a8f5
#define F_0_541  139  /* FIX(0.541196100) */
shun-iwasawa 82a8f5
#define F_0_707  181  /* FIX(0.707106781) */
shun-iwasawa 82a8f5
#define F_1_306  334  /* FIX(1.306562965) */
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
#define CONST_BITS  8
shun-iwasawa 82a8f5
#define PRE_MULTIPLY_SCALE_BITS  2
shun-iwasawa 82a8f5
#define CONST_SHIFT  (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS - 1)
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
#define DO_FDCT() { \
shun-iwasawa 82a8f5
  /* Even part */ \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  tmp10 = vec_add(tmp0, tmp3); \
shun-iwasawa 82a8f5
  tmp13 = vec_sub(tmp0, tmp3); \
shun-iwasawa 82a8f5
  tmp11 = vec_add(tmp1, tmp2); \
shun-iwasawa 82a8f5
  tmp12 = vec_sub(tmp1, tmp2); \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  out0  = vec_add(tmp10, tmp11); \
shun-iwasawa 82a8f5
  out4  = vec_sub(tmp10, tmp11); \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  z1 = vec_add(tmp12, tmp13); \
shun-iwasawa 82a8f5
  z1 = vec_sl(z1, pre_multiply_scale_bits); \
shun-iwasawa 82a8f5
  z1 = vec_madds(z1, pw_0707, pw_zero); \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  out2 = vec_add(tmp13, z1); \
shun-iwasawa 82a8f5
  out6 = vec_sub(tmp13, z1); \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  /* Odd part */ \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  tmp10 = vec_add(tmp4, tmp5); \
shun-iwasawa 82a8f5
  tmp11 = vec_add(tmp5, tmp6); \
shun-iwasawa 82a8f5
  tmp12 = vec_add(tmp6, tmp7); \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  tmp10 = vec_sl(tmp10, pre_multiply_scale_bits); \
shun-iwasawa 82a8f5
  tmp12 = vec_sl(tmp12, pre_multiply_scale_bits); \
shun-iwasawa 82a8f5
  z5 = vec_sub(tmp10, tmp12); \
shun-iwasawa 82a8f5
  z5 = vec_madds(z5, pw_0382, pw_zero); \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  z2 = vec_madds(tmp10, pw_0541, z5); \
shun-iwasawa 82a8f5
  z4 = vec_madds(tmp12, pw_1306, z5); \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  tmp11 = vec_sl(tmp11, pre_multiply_scale_bits); \
shun-iwasawa 82a8f5
  z3 = vec_madds(tmp11, pw_0707, pw_zero); \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  z11 = vec_add(tmp7, z3); \
shun-iwasawa 82a8f5
  z13 = vec_sub(tmp7, z3); \
shun-iwasawa 82a8f5
  \
shun-iwasawa 82a8f5
  out5 = vec_add(z13, z2); \
shun-iwasawa 82a8f5
  out3 = vec_sub(z13, z2); \
shun-iwasawa 82a8f5
  out1 = vec_add(z11, z4); \
shun-iwasawa 82a8f5
  out7 = vec_sub(z11, z4); \
shun-iwasawa 82a8f5
}
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
void jsimd_fdct_ifast_altivec(DCTELEM *data)
shun-iwasawa 82a8f5
{
shun-iwasawa 82a8f5
  __vector short row0, row1, row2, row3, row4, row5, row6, row7,
shun-iwasawa 82a8f5
    col0, col1, col2, col3, col4, col5, col6, col7,
shun-iwasawa 82a8f5
    tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp10, tmp11, tmp12, tmp13,
shun-iwasawa 82a8f5
    z1, z2, z3, z4, z5, z11, z13,
shun-iwasawa 82a8f5
    out0, out1, out2, out3, out4, out5, out6, out7;
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  /* Constants */
shun-iwasawa 82a8f5
  __vector short pw_zero = { __8X(0) },
shun-iwasawa 82a8f5
    pw_0382 = { __8X(F_0_382 << CONST_SHIFT) },
shun-iwasawa 82a8f5
    pw_0541 = { __8X(F_0_541 << CONST_SHIFT) },
shun-iwasawa 82a8f5
    pw_0707 = { __8X(F_0_707 << CONST_SHIFT) },
shun-iwasawa 82a8f5
    pw_1306 = { __8X(F_1_306 << CONST_SHIFT) };
shun-iwasawa 82a8f5
  __vector unsigned short
shun-iwasawa 82a8f5
    pre_multiply_scale_bits = { __8X(PRE_MULTIPLY_SCALE_BITS) };
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  /* Pass 1: process rows */
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  row0 = vec_ld(0, data);
shun-iwasawa 82a8f5
  row1 = vec_ld(16, data);
shun-iwasawa 82a8f5
  row2 = vec_ld(32, data);
shun-iwasawa 82a8f5
  row3 = vec_ld(48, data);
shun-iwasawa 82a8f5
  row4 = vec_ld(64, data);
shun-iwasawa 82a8f5
  row5 = vec_ld(80, data);
shun-iwasawa 82a8f5
  row6 = vec_ld(96, data);
shun-iwasawa 82a8f5
  row7 = vec_ld(112, data);
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  TRANSPOSE(row, col);
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  tmp0 = vec_add(col0, col7);
shun-iwasawa 82a8f5
  tmp7 = vec_sub(col0, col7);
shun-iwasawa 82a8f5
  tmp1 = vec_add(col1, col6);
shun-iwasawa 82a8f5
  tmp6 = vec_sub(col1, col6);
shun-iwasawa 82a8f5
  tmp2 = vec_add(col2, col5);
shun-iwasawa 82a8f5
  tmp5 = vec_sub(col2, col5);
shun-iwasawa 82a8f5
  tmp3 = vec_add(col3, col4);
shun-iwasawa 82a8f5
  tmp4 = vec_sub(col3, col4);
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  DO_FDCT();
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  /* Pass 2: process columns */
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  TRANSPOSE(out, row);
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  tmp0 = vec_add(row0, row7);
shun-iwasawa 82a8f5
  tmp7 = vec_sub(row0, row7);
shun-iwasawa 82a8f5
  tmp1 = vec_add(row1, row6);
shun-iwasawa 82a8f5
  tmp6 = vec_sub(row1, row6);
shun-iwasawa 82a8f5
  tmp2 = vec_add(row2, row5);
shun-iwasawa 82a8f5
  tmp5 = vec_sub(row2, row5);
shun-iwasawa 82a8f5
  tmp3 = vec_add(row3, row4);
shun-iwasawa 82a8f5
  tmp4 = vec_sub(row3, row4);
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  DO_FDCT();
shun-iwasawa 82a8f5
shun-iwasawa 82a8f5
  vec_st(out0, 0, data);
shun-iwasawa 82a8f5
  vec_st(out1, 16, data);
shun-iwasawa 82a8f5
  vec_st(out2, 32, data);
shun-iwasawa 82a8f5
  vec_st(out3, 48, data);
shun-iwasawa 82a8f5
  vec_st(out4, 64, data);
shun-iwasawa 82a8f5
  vec_st(out5, 80, data);
shun-iwasawa 82a8f5
  vec_st(out6, 96, data);
shun-iwasawa 82a8f5
  vec_st(out7, 112, data);
shun-iwasawa 82a8f5
}