1/* mpn_mod_1(dividend_ptr, dividend_size, divisor_limb) --
2 Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
3 Return the single-limb remainder.
4 There are no constraints on the value of the divisor.
5
6Copyright (C) 1991-2019 Free Software Foundation, Inc.
7
8This file is part of the GNU MP Library.
9
10The GNU MP Library is free software; you can redistribute it and/or modify
11it under the terms of the GNU Lesser General Public License as published by
12the Free Software Foundation; either version 2.1 of the License, or (at your
13option) any later version.
14
15The GNU MP Library is distributed in the hope that it will be useful, but
16WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
18License for more details.
19
20You should have received a copy of the GNU Lesser General Public License
21along with the GNU MP Library; see the file COPYING.LIB. If not, see
22<http://www.gnu.org/licenses/>. */
23
24#include <gmp.h>
25#include "gmp-impl.h"
26#include "longlong.h"
27
28#ifndef UMUL_TIME
29#define UMUL_TIME 1
30#endif
31
32#ifndef UDIV_TIME
33#define UDIV_TIME UMUL_TIME
34#endif
35
36/* FIXME: We should be using invert_limb (or invert_normalized_limb)
37 here (not udiv_qrnnd). */
38
39mp_limb_t
40mpn_mod_1 (mp_srcptr dividend_ptr, mp_size_t dividend_size,
41 mp_limb_t divisor_limb)
42{
43 mp_size_t i;
44 mp_limb_t n1, n0, r;
45 mp_limb_t dummy __attribute__ ((unused));
46
47 /* Botch: Should this be handled at all? Rely on callers? */
48 if (dividend_size == 0)
49 return 0;
50
51 /* If multiplication is much faster than division, and the
52 dividend is large, pre-invert the divisor, and use
53 only multiplications in the inner loop. */
54
55 /* This test should be read:
56 Does it ever help to use udiv_qrnnd_preinv?
57 && Does what we save compensate for the inversion overhead? */
58 if (UDIV_TIME > (2 * UMUL_TIME + 6)
59 && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME)
60 {
61 int normalization_steps;
62
63 count_leading_zeros (normalization_steps, divisor_limb);
64 if (normalization_steps != 0)
65 {
66 mp_limb_t divisor_limb_inverted;
67
68 divisor_limb <<= normalization_steps;
69
70 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
71 result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
72 most significant bit (with weight 2**N) implicit. */
73
74 /* Special case for DIVISOR_LIMB == 100...000. */
75 if (divisor_limb << 1 == 0)
76 divisor_limb_inverted = ~(mp_limb_t) 0;
77 else
78 udiv_qrnnd (divisor_limb_inverted, dummy,
79 -divisor_limb, 0, divisor_limb);
80
81 n1 = dividend_ptr[dividend_size - 1];
82 r = n1 >> (BITS_PER_MP_LIMB - normalization_steps);
83
84 /* Possible optimization:
85 if (r == 0
86 && divisor_limb > ((n1 << normalization_steps)
87 | (dividend_ptr[dividend_size - 2] >> ...)))
88 ...one division less... */
89
90 for (i = dividend_size - 2; i >= 0; i--)
91 {
92 n0 = dividend_ptr[i];
93 udiv_qrnnd_preinv (dummy, r, r,
94 ((n1 << normalization_steps)
95 | (n0 >> (BITS_PER_MP_LIMB - normalization_steps))),
96 divisor_limb, divisor_limb_inverted);
97 n1 = n0;
98 }
99 udiv_qrnnd_preinv (dummy, r, r,
100 n1 << normalization_steps,
101 divisor_limb, divisor_limb_inverted);
102 return r >> normalization_steps;
103 }
104 else
105 {
106 mp_limb_t divisor_limb_inverted;
107
108 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
109 result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
110 most significant bit (with weight 2**N) implicit. */
111
112 /* Special case for DIVISOR_LIMB == 100...000. */
113 if (divisor_limb << 1 == 0)
114 divisor_limb_inverted = ~(mp_limb_t) 0;
115 else
116 udiv_qrnnd (divisor_limb_inverted, dummy,
117 -divisor_limb, 0, divisor_limb);
118
119 i = dividend_size - 1;
120 r = dividend_ptr[i];
121
122 if (r >= divisor_limb)
123 r = 0;
124 else
125 i--;
126
127 for (; i >= 0; i--)
128 {
129 n0 = dividend_ptr[i];
130 udiv_qrnnd_preinv (dummy, r, r,
131 n0, divisor_limb, divisor_limb_inverted);
132 }
133 return r;
134 }
135 }
136 else
137 {
138 if (UDIV_NEEDS_NORMALIZATION)
139 {
140 int normalization_steps;
141
142 count_leading_zeros (normalization_steps, divisor_limb);
143 if (normalization_steps != 0)
144 {
145 divisor_limb <<= normalization_steps;
146
147 n1 = dividend_ptr[dividend_size - 1];
148 r = n1 >> (BITS_PER_MP_LIMB - normalization_steps);
149
150 /* Possible optimization:
151 if (r == 0
152 && divisor_limb > ((n1 << normalization_steps)
153 | (dividend_ptr[dividend_size - 2] >> ...)))
154 ...one division less... */
155
156 for (i = dividend_size - 2; i >= 0; i--)
157 {
158 n0 = dividend_ptr[i];
159 udiv_qrnnd (dummy, r, r,
160 ((n1 << normalization_steps)
161 | (n0 >> (BITS_PER_MP_LIMB - normalization_steps))),
162 divisor_limb);
163 n1 = n0;
164 }
165 udiv_qrnnd (dummy, r, r,
166 n1 << normalization_steps,
167 divisor_limb);
168 return r >> normalization_steps;
169 }
170 }
171 /* No normalization needed, either because udiv_qrnnd doesn't require
172 it, or because DIVISOR_LIMB is already normalized. */
173
174 i = dividend_size - 1;
175 r = dividend_ptr[i];
176
177 if (r >= divisor_limb)
178 r = 0;
179 else
180 i--;
181
182 for (; i >= 0; i--)
183 {
184 n0 = dividend_ptr[i];
185 udiv_qrnnd (dummy, r, r, n0, divisor_limb);
186 }
187 return r;
188 }
189}
190