Lines Matching refs:x0

74 	__m128i x0, x1, x2, k, shuf_mask;  in crc32_pclmul_batch()  local
81 x0 = _mm_cvtsi32_si128(*crc); in crc32_pclmul_batch()
83 x0 = _mm_slli_si128(x0, 12); in crc32_pclmul_batch()
85 x0 = _mm_xor_si128(x1, x0); in crc32_pclmul_batch()
105 x4 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_batch()
109 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_batch()
121 x0 = _mm_xor_si128(x0, x4); in crc32_pclmul_batch()
125 x0 = _mm_xor_si128(x0, x8); in crc32_pclmul_batch()
136 x4 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_batch()
137 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_batch()
138 x0 = _mm_xor_si128(x0, x1); in crc32_pclmul_batch()
139 x0 = _mm_xor_si128(x0, x4); in crc32_pclmul_batch()
140 x4 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_batch()
141 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_batch()
142 x0 = _mm_xor_si128(x0, x2); in crc32_pclmul_batch()
143 x0 = _mm_xor_si128(x0, x4); in crc32_pclmul_batch()
144 x4 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_batch()
145 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_batch()
146 x0 = _mm_xor_si128(x0, x3); in crc32_pclmul_batch()
147 x0 = _mm_xor_si128(x0, x4); in crc32_pclmul_batch()
156 x1 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_batch()
157 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_batch()
158 x0 = _mm_xor_si128(x0, x2); in crc32_pclmul_batch()
159 x0 = _mm_xor_si128(x0, x1); in crc32_pclmul_batch()
166 x1 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_batch()
167 x0 = _mm_slli_si128(x0, 8); in crc32_pclmul_batch()
168 x0 = _mm_srli_si128(x0, 4); in crc32_pclmul_batch()
169 x0 = _mm_xor_si128(x0, x1); in crc32_pclmul_batch()
171 x1 = _mm_clmulepi64_si128(x0, k, 0x01); in crc32_pclmul_batch()
172 x0 = _mm_xor_si128(x0, x1); in crc32_pclmul_batch()
176 x1 = _mm_move_epi64(x0); in crc32_pclmul_batch()
181 x0 = _mm_xor_si128(x1, x0); in crc32_pclmul_batch()
182 *crc = _mm_extract_epi32(x0, 0); in crc32_pclmul_batch()
191 __m128i x0, x1, x2, k; in crc32_pclmul_reflected_batch() local
197 x0 = _mm_loadu_si128((__m128i *)(p + 0x00)); in crc32_pclmul_reflected_batch()
198 x0 = _mm_xor_si128(x0, _mm_cvtsi32_si128(*crc)); in crc32_pclmul_reflected_batch()
214 x4 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_reflected_batch()
218 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_reflected_batch()
226 x0 = _mm_xor_si128(x0, x4); in crc32_pclmul_reflected_batch()
230 x0 = _mm_xor_si128(x0, x8); in crc32_pclmul_reflected_batch()
241 x4 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_reflected_batch()
242 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_reflected_batch()
243 x0 = _mm_xor_si128(x0, x1); in crc32_pclmul_reflected_batch()
244 x0 = _mm_xor_si128(x0, x4); in crc32_pclmul_reflected_batch()
245 x4 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_reflected_batch()
246 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_reflected_batch()
247 x0 = _mm_xor_si128(x0, x2); in crc32_pclmul_reflected_batch()
248 x0 = _mm_xor_si128(x0, x4); in crc32_pclmul_reflected_batch()
249 x4 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_reflected_batch()
250 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_reflected_batch()
251 x0 = _mm_xor_si128(x0, x3); in crc32_pclmul_reflected_batch()
252 x0 = _mm_xor_si128(x0, x4); in crc32_pclmul_reflected_batch()
260 x1 = _mm_clmulepi64_si128(x0, k, 0x00); in crc32_pclmul_reflected_batch()
261 x0 = _mm_clmulepi64_si128(x0, k, 0x11); in crc32_pclmul_reflected_batch()
262 x0 = _mm_xor_si128(x0, x2); in crc32_pclmul_reflected_batch()
263 x0 = _mm_xor_si128(x0, x1); in crc32_pclmul_reflected_batch()
269 x1 = _mm_clmulepi64_si128(x0, k, 0x10); in crc32_pclmul_reflected_batch()
270 x0 = _mm_srli_si128(x0, 8); in crc32_pclmul_reflected_batch()
271 x0 = _mm_xor_si128(x0, x1); in crc32_pclmul_reflected_batch()
273 x1 = _mm_shuffle_epi32(x0, 0xfc); in crc32_pclmul_reflected_batch()
274 x0 = _mm_shuffle_epi32(x0, 0xf9); in crc32_pclmul_reflected_batch()
277 x0 = _mm_xor_si128(x0, x1); in crc32_pclmul_reflected_batch()
280 x1 = _mm_shuffle_epi32(x0, 0xf3); in crc32_pclmul_reflected_batch()
281 x0 = _mm_slli_si128(x0, 4); in crc32_pclmul_reflected_batch()
285 x0 = _mm_xor_si128(x1, x0); in crc32_pclmul_reflected_batch()
286 *crc = _mm_extract_epi32(x0, 2); in crc32_pclmul_reflected_batch()