1/* multi_arith.h: multi-precision integer arithmetic functions, needed 2 to do extended-precision floating point. 3 4 (c) 1998 David Huggins-Daines. 5 6 Somewhat based on arch/alpha/math-emu/ieee-math.c, which is (c) 7 David Mosberger-Tang. 8 9 You may copy, modify, and redistribute this file under the terms of 10 the GNU General Public License, version 2, or any later version, at 11 your convenience. */ 12 13/* Note: 14 15 These are not general multi-precision math routines. Rather, they 16 implement the subset of integer arithmetic that we need in order to 17 multiply, divide, and normalize 128-bit unsigned mantissae. */ 18 19#ifndef MULTI_ARITH_H 20#define MULTI_ARITH_H 21 22 23extern inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt) 24{ 25 reg->exp += cnt; 26 27 switch (cnt) { 28 case 0 ... 8: 29 reg->lowmant = reg->mant.m32[1] << (8 - cnt); 30 reg->mant.m32[1] = (reg->mant.m32[1] >> cnt) | 31 (reg->mant.m32[0] << (32 - cnt)); 32 reg->mant.m32[0] = reg->mant.m32[0] >> cnt; 33 break; 34 case 9 ... 32: 35 reg->lowmant = reg->mant.m32[1] >> (cnt - 8); 36 if (reg->mant.m32[1] << (40 - cnt)) 37 reg->lowmant |= 1; 38 reg->mant.m32[1] = (reg->mant.m32[1] >> cnt) | 39 (reg->mant.m32[0] << (32 - cnt)); 40 reg->mant.m32[0] = reg->mant.m32[0] >> cnt; 41 break; 42 case 33 ... 39: 43 asm volatile ("bfextu %1{%2,#8},%0" : "=d" (reg->lowmant) 44 : "m" (reg->mant.m32[0]), "d" (64 - cnt)); 45 if (reg->mant.m32[1] << (40 - cnt)) 46 reg->lowmant |= 1; 47 reg->mant.m32[1] = reg->mant.m32[0] >> (cnt - 32); 48 reg->mant.m32[0] = 0; 49 break; 50 case 40 ... 71: 51 reg->lowmant = reg->mant.m32[0] >> (cnt - 40); 52 if ((reg->mant.m32[0] << (72 - cnt)) || reg->mant.m32[1]) 53 reg->lowmant |= 1; 54 reg->mant.m32[1] = reg->mant.m32[0] >> (cnt - 32); 55 reg->mant.m32[0] = 0; 56 break; 57 default: 58 reg->lowmant = reg->mant.m32[0] || reg->mant.m32[1]; 59 reg->mant.m32[0] = 0; 60 reg->mant.m32[1] = 0; 61 break; 62 } 63} 64 65extern inline int fp_overnormalize(struct fp_ext *reg) 66{ 67 int shift; 68 69 if (reg->mant.m32[0]) { 70 asm ("bfffo %1{#0,#32},%0" : "=d" (shift) : "dm" (reg->mant.m32[0])); 71 reg->mant.m32[0] = (reg->mant.m32[0] << shift) | (reg->mant.m32[1] >> (32 - shift)); 72 reg->mant.m32[1] = (reg->mant.m32[1] << shift); 73 } else { 74 asm ("bfffo %1{#0,#32},%0" : "=d" (shift) : "dm" (reg->mant.m32[1])); 75 reg->mant.m32[0] = (reg->mant.m32[1] << shift); 76 reg->mant.m32[1] = 0; 77 shift += 32; 78 } 79 80 return shift; 81} 82 83extern inline int fp_addmant(struct fp_ext *dest, struct fp_ext *src) 84{ 85 int carry; 86 87 /* we assume here, gcc only insert move and a clr instr */ 88 asm volatile ("add.b %1,%0" : "=d,g" (dest->lowmant) 89 : "g,d" (src->lowmant), "0,0" (dest->lowmant)); 90 asm volatile ("addx.l %1,%0" : "=d" (dest->mant.m32[1]) 91 : "d" (src->mant.m32[1]), "0" (dest->mant.m32[1])); 92 asm volatile ("addx.l %1,%0" : "=d" (dest->mant.m32[0]) 93 : "d" (src->mant.m32[0]), "0" (dest->mant.m32[0])); 94 asm volatile ("addx.l %0,%0" : "=d" (carry) : "0" (0)); 95 96 return carry; 97} 98 99extern inline int fp_addcarry(struct fp_ext *reg) 100{ 101 if (++reg->exp == 0x7fff) { 102 if (reg->mant.m64) 103 fp_set_sr(FPSR_EXC_INEX2); 104 reg->mant.m64 = 0; 105 fp_set_sr(FPSR_EXC_OVFL); 106 return 0; 107 } 108 reg->lowmant = (reg->mant.m32[1] << 7) | (reg->lowmant ? 1 : 0); 109 reg->mant.m32[1] = (reg->mant.m32[1] >> 1) | 110 (reg->mant.m32[0] << 31); 111 reg->mant.m32[0] = (reg->mant.m32[0] >> 1) | 0x80000000; 112 113 return 1; 114} 115 116extern inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1, struct fp_ext *src2) 117{ 118 /* we assume here, gcc only insert move and a clr instr */ 119 asm volatile ("sub.b %1,%0" : "=d,g" (dest->lowmant) 120 : "g,d" (src2->lowmant), "0,0" (src1->lowmant)); 121 asm volatile ("subx.l %1,%0" : "=d" (dest->mant.m32[1]) 122 : "d" (src2->mant.m32[1]), "0" (src1->mant.m32[1])); 123 asm volatile ("subx.l %1,%0" : "=d" (dest->mant.m32[0]) 124 : "d" (src2->mant.m32[0]), "0" (src1->mant.m32[0])); 125} 126 127#define fp_mul64(desth, destl, src1, src2) ({ \ 128 asm ("mulu.l %2,%1:%0" : "=d" (destl), "=d" (desth) \ 129 : "g" (src1), "0" (src2)); \ 130}) 131#define fp_div64(quot, rem, srch, srcl, div) \ 132 asm ("divu.l %2,%1:%0" : "=d" (quot), "=d" (rem) \ 133 : "dm" (div), "1" (srch), "0" (srcl)) 134#define fp_add64(dest1, dest2, src1, src2) ({ \ 135 asm ("add.l %1,%0" : "=d,dm" (dest2) \ 136 : "dm,d" (src2), "0,0" (dest2)); \ 137 asm ("addx.l %1,%0" : "=d" (dest1) \ 138 : "d" (src1), "0" (dest1)); \ 139}) 140#define fp_addx96(dest, src) ({ \ 141 /* we assume here, gcc only insert move and a clr instr */ \ 142 asm volatile ("add.l %1,%0" : "=d,g" (dest->m32[2]) \ 143 : "g,d" (temp.m32[1]), "0,0" (dest->m32[2])); \ 144 asm volatile ("addx.l %1,%0" : "=d" (dest->m32[1]) \ 145 : "d" (temp.m32[0]), "0" (dest->m32[1])); \ 146 asm volatile ("addx.l %1,%0" : "=d" (dest->m32[0]) \ 147 : "d" (0), "0" (dest->m32[0])); \ 148}) 149#define fp_sub64(dest, src) ({ \ 150 asm ("sub.l %1,%0" : "=d,dm" (dest.m32[1]) \ 151 : "dm,d" (src.m32[1]), "0,0" (dest.m32[1])); \ 152 asm ("subx.l %1,%0" : "=d" (dest.m32[0]) \ 153 : "d" (src.m32[0]), "0" (dest.m32[0])); \ 154}) 155#define fp_sub96c(dest, srch, srcm, srcl) ({ \ 156 char carry; \ 157 asm ("sub.l %1,%0" : "=d,dm" (dest.m32[2]) \ 158 : "dm,d" (srcl), "0,0" (dest.m32[2])); \ 159 asm ("subx.l %1,%0" : "=d" (dest.m32[1]) \ 160 : "d" (srcm), "0" (dest.m32[1])); \ 161 asm ("subx.l %2,%1; scs %0" : "=d" (carry), "=d" (dest.m32[0]) \ 162 : "d" (srch), "1" (dest.m32[0])); \ 163 carry; \ 164}) 165 166extern inline void fp_multiplymant(union fp_mant128 *dest, struct fp_ext *src1, struct fp_ext *src2) 167{ 168 union fp_mant64 temp; 169 170 fp_mul64(dest->m32[0], dest->m32[1], src1->mant.m32[0], src2->mant.m32[0]); 171 fp_mul64(dest->m32[2], dest->m32[3], src1->mant.m32[1], src2->mant.m32[1]); 172 173 fp_mul64(temp.m32[0], temp.m32[1], src1->mant.m32[0], src2->mant.m32[1]); 174 fp_addx96(dest, temp); 175 176 fp_mul64(temp.m32[0], temp.m32[1], src1->mant.m32[1], src2->mant.m32[0]); 177 fp_addx96(dest, temp); 178} 179 180extern inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src, struct fp_ext *div) 181{ 182 union fp_mant128 tmp; 183 union fp_mant64 tmp64; 184 unsigned long *mantp = dest->m32; 185 unsigned long fix, rem, first, dummy; 186 int i; 187 188 /* the algorithm below requires dest to be smaller than div, 189 but both have the high bit set */ 190 if (src->mant.m64 >= div->mant.m64) { 191 fp_sub64(src->mant, div->mant); 192 *mantp = 1; 193 } else 194 *mantp = 0; 195 mantp++; 196 197 /* basic idea behind this algorithm: we can't divide two 64bit numbers 198 (AB/CD) directly, but we can calculate AB/C0, but this means this 199 quotient is off by C0/CD, so we have to multiply the first result 200 to fix the result, after that we have nearly the correct result 201 and only a few corrections are needed. */ 202 203 /* C0/CD can be precalculated, but it's an 64bit division again, but 204 we can make it a bit easier, by dividing first through C so we get 205 10/1D and now only a single shift and the value fits into 32bit. */ 206 fix = 0x80000000; 207 dummy = div->mant.m32[1] / div->mant.m32[0] + 1; 208 dummy = (dummy >> 1) | fix; 209 fp_div64(fix, dummy, fix, 0, dummy); 210 fix--; 211 212 for (i = 0; i < 3; i++, mantp++) { 213 if (src->mant.m32[0] == div->mant.m32[0]) { 214 fp_div64(first, rem, 0, src->mant.m32[1], div->mant.m32[0]); 215 216 fp_mul64(*mantp, dummy, first, fix); 217 *mantp += fix; 218 } else { 219 fp_div64(first, rem, src->mant.m32[0], src->mant.m32[1], div->mant.m32[0]); 220 221 fp_mul64(*mantp, dummy, first, fix); 222 } 223 224 fp_mul64(tmp.m32[0], tmp.m32[1], div->mant.m32[0], first - *mantp); 225 fp_add64(tmp.m32[0], tmp.m32[1], 0, rem); 226 tmp.m32[2] = 0; 227 228 fp_mul64(tmp64.m32[0], tmp64.m32[1], *mantp, div->mant.m32[1]); 229 fp_sub96c(tmp, 0, tmp64.m32[0], tmp64.m32[1]); 230 231 src->mant.m32[0] = tmp.m32[1]; 232 src->mant.m32[1] = tmp.m32[2]; 233 234 while (!fp_sub96c(tmp, 0, div->mant.m32[0], div->mant.m32[1])) { 235 src->mant.m32[0] = tmp.m32[1]; 236 src->mant.m32[1] = tmp.m32[2]; 237 *mantp += 1; 238 } 239 } 240} 241 242 243extern inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src, int shift) 244{ 245 unsigned long tmp; 246 247 switch (shift) { 248 case 0: 249 dest->mant.m64 = src->m64[0]; 250 dest->lowmant = src->m32[2] >> 24; 251 if (src->m32[3] || (src->m32[2] << 8)) 252 dest->lowmant |= 1; 253 break; 254 case 1: 255 asm volatile ("lsl.l #1,%0" 256 : "=d" (tmp) : "0" (src->m32[2])); 257 asm volatile ("roxl.l #1,%0" 258 : "=d" (dest->mant.m32[1]) : "0" (src->m32[1])); 259 asm volatile ("roxl.l #1,%0" 260 : "=d" (dest->mant.m32[0]) : "0" (src->m32[0])); 261 dest->lowmant = tmp >> 24; 262 if (src->m32[3] || (tmp << 8)) 263 dest->lowmant |= 1; 264 break; 265 case 31: 266 asm volatile ("lsr.l #1,%1; roxr.l #1,%0" 267 : "=d" (dest->mant.m32[0]) 268 : "d" (src->m32[0]), "0" (src->m32[1])); 269 asm volatile ("roxr.l #1,%0" 270 : "=d" (dest->mant.m32[1]) : "0" (src->m32[2])); 271 asm volatile ("roxr.l #1,%0" 272 : "=d" (tmp) : "0" (src->m32[3])); 273 dest->lowmant = tmp >> 24; 274 if (src->m32[3] << 7) 275 dest->lowmant |= 1; 276 break; 277 case 32: 278 dest->mant.m32[0] = src->m32[1]; 279 dest->mant.m32[1] = src->m32[2]; 280 dest->lowmant = src->m32[3] >> 24; 281 if (src->m32[3] << 8) 282 dest->lowmant |= 1; 283 break; 284 } 285} 286 287 288#endif /* MULTI_ARITH_H */ 289