Add fast ARM asm; cleaned up code. Added optimization setting. (#50)
diff --git a/asm_arm.inc b/asm_arm.inc
deleted file mode 100644
index 24e57e3..0000000
--- a/asm_arm.inc
+++ /dev/null
@@ -1,2455 +0,0 @@
-#define DEC_5 4
-#define DEC_6 5
-#define DEC_7 6
-#define DEC_8 7
-
-#define DEC(N) uECC_CONCAT(DEC_, N)
-
-#define REPEAT_1(stuff) stuff
-#define REPEAT_2(stuff) REPEAT_1(stuff) stuff
-#define REPEAT_3(stuff) REPEAT_2(stuff) stuff
-#define REPEAT_4(stuff) REPEAT_3(stuff) stuff
-#define REPEAT_5(stuff) REPEAT_4(stuff) stuff
-#define REPEAT_6(stuff) REPEAT_5(stuff) stuff
-#define REPEAT_7(stuff) REPEAT_6(stuff) stuff
-#define REPEAT_8(stuff) REPEAT_7(stuff) stuff
-
-#define REPEAT(N, stuff) uECC_CONCAT(REPEAT_, N)(stuff)
-
-#define STR2(thing) #thing
-#define STR(thing) STR2(thing)
-
-#if (uECC_ASM == uECC_asm_fast)
-
-static uint32_t vli_add(uint32_t *result, const uint32_t *left, const uint32_t *right) {
- uint32_t carry = 0;
- uint32_t left_word;
- uint32_t right_word;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "ldmia %[lptr]!, {%[left]} \n\t" /* Load left word. */
- "ldmia %[rptr]!, {%[right]} \n\t" /* Load right word. */
- "adds %[left], %[right] \n\t" /* Add first word. */
- "stmia %[dptr]!, {%[left]} \n\t" /* Store result word. */
-
- /* Now we just do the remaining words with the carry bit (using ADC) */
- REPEAT(DEC(uECC_WORDS),
- "ldmia %[lptr]!, {%[left]} \n\t"
- "ldmia %[rptr]!, {%[right]} \n\t"
- "adcs %[left], %[right] \n\t"
- "stmia %[dptr]!, {%[left]} \n\t")
-
- "adcs %[carry], %[carry] \n\t" /* Store carry bit. */
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- #if (uECC_PLATFORM == uECC_arm_thumb)
- : [dptr] "+l" (result), [lptr] "+l" (left), [rptr] "+l" (right),
- [carry] "+l" (carry), [left] "=l" (left_word), [right] "=l" (right_word)
- #else
- : [dptr] "+r" (result), [lptr] "+r" (left), [rptr] "+r" (right),
- [carry] "+r" (carry), [left] "=r" (left_word), [right] "=r" (right_word)
- #endif
- :
- : "cc", "memory"
- );
- return carry;
-}
-#define asm_add 1
-
-static uint32_t vli_sub(uint32_t *result, const uint32_t *left, const uint32_t *right) {
- uint32_t carry = 0;
- uint32_t left_word;
- uint32_t right_word;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "ldmia %[lptr]!, {%[left]} \n\t" /* Load left word. */
- "ldmia %[rptr]!, {%[right]} \n\t" /* Load right word. */
- "subs %[left], %[right] \n\t" /* Subtract. */
- "stmia %[dptr]!, {%[left]} \n\t" /* Store result word. */
-
- /* Now we just do the remaining words with the carry bit (using SBC) */
- REPEAT(DEC(uECC_WORDS),
- "ldmia %[lptr]!, {%[left]} \n\t"
- "ldmia %[rptr]!, {%[right]} \n\t"
- "sbcs %[left], %[right] \n\t"
- "stmia %[dptr]!, {%[left]} \n\t")
-
- "adcs %[carry], %[carry] \n\t" /* Store carry bit. */
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- #if (uECC_PLATFORM == uECC_arm_thumb)
- : [dptr] "+l" (result), [lptr] "+l" (left), [rptr] "+l" (right),
- [carry] "+l" (carry), [left] "=l" (left_word), [right] "=l" (right_word)
- #else
- : [dptr] "+r" (result), [lptr] "+r" (left), [rptr] "+r" (right),
- [carry] "+r" (carry), [left] "=r" (left_word), [right] "=r" (right_word)
- #endif
- :
- : "cc", "memory"
- );
- return !carry; // note that on ARM, carry flag set means "no borrow" when subtracting
- // (for some reason...)
-}
-#define asm_sub 1
-
-#if (uECC_PLATFORM != uECC_arm_thumb)
-#if (uECC_WORDS == 5)
-static void vli_mult(uint32_t *result, const uint32_t *left, const uint32_t *right) {
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
- register const uint32_t *r2 __asm__("r2") = right;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "add r0, 12 \n\t"
- "add r2, 12 \n\t"
- "ldmia r1!, {r3,r4} \n\t"
- "ldmia r2!, {r6,r7} \n\t"
-
- "umull r11, r12, r3, r6 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r9, r3, r7 \n\t"
- "adds r12, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r11, r14, r4, r6 \n\t"
- "adds r12, r11 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "umull r12, r14, r4, r7 \n\t"
- "adds r9, r12 \n\t"
- "adc r10, r14 \n\t"
- "stmia r0!, {r9, r10} \n\t"
-
- "sub r0, 28 \n\t"
- "sub r2, 20 \n\t"
- "ldmia r2!, {r6,r7,r8} \n\t"
- "ldmia r1!, {r5} \n\t"
-
- "umull r11, r12, r3, r6 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r9, r3, r7 \n\t"
- "adds r12, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r11, r14, r4, r6 \n\t"
- "adds r12, r11 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r12, r14, r3, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r5, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "ldmia r1!, {r3} \n\t"
- "mov r12, #0 \n\t"
- "umull r14, r9, r4, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r5, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r3, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "ldmia r1!, {r4} \n\t"
- "mov r14, #0 \n\t"
- "umull r9, r10, r5, r8 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r3, r7 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r4, r6 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "ldr r9, [r0] \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, #0 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "ldmia r2!, {r6} \n\t"
- "mov r9, #0 \n\t"
- "umull r10, r11, r5, r6 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r3, r8 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r4, r7 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "ldr r10, [r0] \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, #0 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "ldmia r2!, {r7} \n\t"
- "mov r10, #0 \n\t"
- "umull r11, r12, r5, r7 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r6 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r4, r8 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "ldr r11, [r0] \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r14} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r12, r14, r3, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "umull r14, r9, r4, r7 \n\t"
- "adds r10, r14 \n\t"
- "adc r11, r9 \n\t"
- "stmia r0!, {r10, r11} \n\t"
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : "+r" (r0), "+r" (r1), "+r" (r2)
- :
- : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
- );
-}
-#define asm_mult 1
-#endif /* (uECC_WORDS == 5) */
-
-#if (uECC_WORDS == 6)
-static void vli_mult(uint32_t *result, const uint32_t *left, const uint32_t *right) {
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
- register const uint32_t *r2 __asm__("r2") = right;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "add r0, 12 \n\t"
- "add r2, 12 \n\t"
- "ldmia r1!, {r3,r4,r5} \n\t"
- "ldmia r2!, {r6,r7,r8} \n\t"
-
- "umull r11, r12, r3, r6 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r9, r3, r7 \n\t"
- "adds r12, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r11, r14, r4, r6 \n\t"
- "adds r12, r11 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r12, r14, r3, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r5, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r14, r9, r4, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r5, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "umull r9, r10, r5, r8 \n\t"
- "adds r11, r9 \n\t"
- "adc r12, r10 \n\t"
- "stmia r0!, {r11, r12} \n\t"
-
- "sub r0, 36 \n\t"
- "sub r2, 24 \n\t"
- "ldmia r2!, {r6,r7,r8} \n\t"
-
- "umull r11, r12, r3, r6 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r9, r3, r7 \n\t"
- "adds r12, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r11, r14, r4, r6 \n\t"
- "adds r12, r11 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r12, r14, r3, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r5, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "ldmia r1!, {r3} \n\t"
- "mov r12, #0 \n\t"
- "umull r14, r9, r4, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r5, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r3, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "ldmia r1!, {r4} \n\t"
- "mov r14, #0 \n\t"
- "umull r9, r10, r5, r8 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r3, r7 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r4, r6 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "ldr r9, [r0] \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, #0 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "ldmia r1!, {r5} \n\t"
- "mov r9, #0 \n\t"
- "umull r10, r11, r3, r8 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r4, r7 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r5, r6 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "ldr r10, [r0] \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, #0 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "ldmia r2!, {r6} \n\t"
- "mov r10, #0 \n\t"
- "umull r11, r12, r3, r6 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r4, r8 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r5, r7 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "ldr r11, [r0] \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r14} \n\t"
-
- "ldmia r2!, {r7} \n\t"
- "mov r11, #0 \n\t"
- "umull r12, r14, r3, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r5, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "ldr r12, [r0] \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, #0 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "ldmia r2!, {r8} \n\t"
- "mov r12, #0 \n\t"
- "umull r14, r9, r3, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r4, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r5, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "mov r14, #0 \n\t"
- "umull r9, r10, r4, r8 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r5, r7 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "umull r10, r11, r5, r8 \n\t"
- "adds r12, r10 \n\t"
- "adc r14, r11 \n\t"
- "stmia r0!, {r12, r14} \n\t"
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : "+r" (r0), "+r" (r1), "+r" (r2)
- :
- : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
- );
-}
-#define asm_mult 1
-#endif /* (uECC_WORDS == 6) */
-
-#if (uECC_WORDS == 7)
-static void vli_mult(uint32_t *result, const uint32_t *left, const uint32_t *right) {
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
- register const uint32_t *r2 __asm__("r2") = right;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "add r0, 24 \n\t"
- "add r2, 24 \n\t"
- "ldmia r1!, {r3} \n\t"
- "ldmia r2!, {r6} \n\t"
-
- "umull r9, r10, r3, r6 \n\t"
- "stmia r0!, {r9, r10} \n\t"
-
- "sub r0, 20 \n\t"
- "sub r2, 16 \n\t"
- "ldmia r2!, {r6, r7, r8} \n\t"
- "ldmia r1!, {r4, r5} \n\t"
-
- "umull r9, r10, r3, r6 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "mov r14, #0 \n\t"
- "umull r9, r12, r3, r7 \n\t"
- "adds r10, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r9, r11, r4, r6 \n\t"
- "adds r10, r9 \n\t"
- "adcs r12, r11 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "mov r9, #0 \n\t"
- "umull r10, r11, r3, r8 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r4, r7 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r5, r6 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "ldmia r1!, {r3} \n\t"
- "mov r10, #0 \n\t"
- "umull r11, r12, r4, r8 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r5, r7 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r6 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "ldr r11, [r0] \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r14} \n\t"
-
- "ldmia r2!, {r6} \n\t"
- "mov r11, #0 \n\t"
- "umull r12, r14, r4, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r5, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r3, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "ldr r12, [r0] \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, #0 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r14, r9, r5, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r3, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "umull r9, r10, r3, r6 \n\t"
- "adds r11, r9 \n\t"
- "adc r12, r10 \n\t"
- "stmia r0!, {r11, r12} \n\t"
-
- "sub r0, 44 \n\t"
- "sub r1, 16 \n\t"
- "sub r2, 28 \n\t"
- "ldmia r1!, {r3,r4,r5} \n\t"
- "ldmia r2!, {r6,r7,r8} \n\t"
-
- "umull r9, r10, r3, r6 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "mov r14, #0 \n\t"
- "umull r9, r12, r3, r7 \n\t"
- "adds r10, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r9, r11, r4, r6 \n\t"
- "adds r10, r9 \n\t"
- "adcs r12, r11 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "mov r9, #0 \n\t"
- "umull r10, r11, r3, r8 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r4, r7 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r5, r6 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "ldmia r1!, {r3} \n\t"
- "mov r10, #0 \n\t"
- "umull r11, r12, r4, r8 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r5, r7 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r6 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "ldr r11, [r0] \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r14} \n\t"
-
- "ldmia r1!, {r4} \n\t"
- "mov r11, #0 \n\t"
- "umull r12, r14, r5, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r3, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "ldr r12, [r0] \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, #0 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "ldmia r1!, {r5} \n\t"
- "mov r12, #0 \n\t"
- "umull r14, r9, r3, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r4, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r5, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "ldmia r1!, {r3} \n\t"
- "mov r14, #0 \n\t"
- "umull r9, r10, r4, r8 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r5, r7 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r3, r6 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "ldr r9, [r0] \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, #0 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "ldmia r2!, {r6} \n\t"
- "mov r9, #0 \n\t"
- "umull r10, r11, r4, r6 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r5, r8 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r3, r7 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "ldr r10, [r0] \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, #0 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "ldmia r2!, {r7} \n\t"
- "mov r10, #0 \n\t"
- "umull r11, r12, r4, r7 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r5, r6 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r8 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "ldr r11, [r0] \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r14} \n\t"
-
- "ldmia r2!, {r8} \n\t"
- "mov r11, #0 \n\t"
- "umull r12, r14, r4, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r5, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r3, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "ldr r12, [r0] \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, #0 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "ldmia r2!, {r6} \n\t"
- "mov r12, #0 \n\t"
- "umull r14, r9, r4, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r5, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r3, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "mov r14, #0 \n\t"
- "umull r9, r10, r5, r6 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r3, r8 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "umull r10, r11, r3, r6 \n\t"
- "adds r12, r10 \n\t"
- "adc r14, r11 \n\t"
- "stmia r0!, {r12, r14} \n\t"
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : "+r" (r0), "+r" (r1), "+r" (r2)
- :
- : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
- );
-}
-#define asm_mult 1
-#endif /* (uECC_WORDS == 7) */
-
-#if (uECC_WORDS == 8)
-static void vli_mult(uint32_t *result, const uint32_t *left, const uint32_t *right) {
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
- register const uint32_t *r2 __asm__("r2") = right;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "add r0, 24 \n\t"
- "add r2, 24 \n\t"
- "ldmia r1!, {r3,r4} \n\t"
- "ldmia r2!, {r6,r7} \n\t"
-
- "umull r11, r12, r3, r6 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r9, r3, r7 \n\t"
- "adds r12, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r11, r14, r4, r6 \n\t"
- "adds r12, r11 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "umull r12, r14, r4, r7 \n\t"
- "adds r9, r12 \n\t"
- "adc r10, r14 \n\t"
- "stmia r0!, {r9, r10} \n\t"
-
- "sub r0, 28 \n\t"
- "sub r2, 20 \n\t"
- "ldmia r2!, {r6,r7,r8} \n\t"
- "ldmia r1!, {r5} \n\t"
-
- "umull r11, r12, r3, r6 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r9, r3, r7 \n\t"
- "adds r12, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r11, r14, r4, r6 \n\t"
- "adds r12, r11 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r12, r14, r3, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r5, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "ldmia r1!, {r3} \n\t"
- "mov r12, #0 \n\t"
- "umull r14, r9, r4, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r5, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r3, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "ldmia r1!, {r4} \n\t"
- "mov r14, #0 \n\t"
- "umull r9, r10, r5, r8 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r3, r7 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r4, r6 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "ldr r9, [r0] \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, #0 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "ldmia r2!, {r6} \n\t"
- "mov r9, #0 \n\t"
- "umull r10, r11, r5, r6 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r3, r8 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r4, r7 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "ldr r10, [r0] \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, #0 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "ldmia r2!, {r7} \n\t"
- "mov r10, #0 \n\t"
- "umull r11, r12, r5, r7 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r6 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r4, r8 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "ldr r11, [r0] \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r14} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r12, r14, r3, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "umull r14, r9, r4, r7 \n\t"
- "adds r10, r14 \n\t"
- "adc r11, r9 \n\t"
- "stmia r0!, {r10, r11} \n\t"
-
- "sub r0, 52 \n\t"
- "sub r1, 20 \n\t"
- "sub r2, 32 \n\t"
- "ldmia r1!, {r3,r4,r5} \n\t"
- "ldmia r2!, {r6,r7,r8} \n\t"
-
- "umull r11, r12, r3, r6 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r9, r3, r7 \n\t"
- "adds r12, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r11, r14, r4, r6 \n\t"
- "adds r12, r11 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r12, r14, r3, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r5, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "ldmia r1!, {r3} \n\t"
- "mov r12, #0 \n\t"
- "umull r14, r9, r4, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r5, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r3, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "ldmia r1!, {r4} \n\t"
- "mov r14, #0 \n\t"
- "umull r9, r10, r5, r8 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r3, r7 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r4, r6 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "ldr r9, [r0] \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, #0 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "ldmia r1!, {r5} \n\t"
- "mov r9, #0 \n\t"
- "umull r10, r11, r3, r8 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r4, r7 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r5, r6 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "ldr r10, [r0] \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, #0 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "ldmia r1!, {r3} \n\t"
- "mov r10, #0 \n\t"
- "umull r11, r12, r4, r8 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r5, r7 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r6 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "ldr r11, [r0] \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r14} \n\t"
-
- "ldmia r1!, {r4} \n\t"
- "mov r11, #0 \n\t"
- "umull r12, r14, r5, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r3, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "ldr r12, [r0] \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, #0 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "ldmia r2!, {r6} \n\t"
- "mov r12, #0 \n\t"
- "umull r14, r9, r5, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r3, r8 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r4, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "ldmia r2!, {r7} \n\t"
- "mov r14, #0 \n\t"
- "umull r9, r10, r5, r7 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r3, r6 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "umull r9, r10, r4, r8 \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, r10 \n\t"
- "adc r14, #0 \n\t"
- "ldr r9, [r0] \n\t"
- "adds r11, r9 \n\t"
- "adcs r12, #0 \n\t"
- "adc r14, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "ldmia r2!, {r8} \n\t"
- "mov r9, #0 \n\t"
- "umull r10, r11, r5, r8 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r3, r7 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "umull r10, r11, r4, r6 \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, r11 \n\t"
- "adc r9, #0 \n\t"
- "ldr r10, [r0] \n\t"
- "adds r12, r10 \n\t"
- "adcs r14, #0 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "ldmia r2!, {r6} \n\t"
- "mov r10, #0 \n\t"
- "umull r11, r12, r5, r6 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r8 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r4, r7 \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "ldr r11, [r0] \n\t"
- "adds r14, r11 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r14} \n\t"
-
- "ldmia r2!, {r7} \n\t"
- "mov r11, #0 \n\t"
- "umull r12, r14, r5, r7 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r3, r6 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "umull r12, r14, r4, r8 \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, r14 \n\t"
- "adc r11, #0 \n\t"
- "ldr r12, [r0] \n\t"
- "adds r9, r12 \n\t"
- "adcs r10, #0 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r14, r9, r3, r7 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "umull r14, r9, r4, r6 \n\t"
- "adds r10, r14 \n\t"
- "adcs r11, r9 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r10} \n\t"
-
- "umull r9, r10, r4, r7 \n\t"
- "adds r11, r9 \n\t"
- "adc r12, r10 \n\t"
- "stmia r0!, {r11, r12} \n\t"
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : "+r" (r0), "+r" (r1), "+r" (r2)
- :
- : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
- );
-}
-#define asm_mult 1
-#endif /* (uECC_WORDS == 8) */
-
-#if uECC_SQUARE_FUNC
-#if (uECC_WORDS == 5)
-static void vli_square(uint32_t *result, const uint32_t *left) {
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "ldmia r1!, {r2,r3,r4,r5,r6} \n\t"
-
- "umull r11, r12, r2, r2 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r9, #0 \n\t"
- "umull r10, r11, r2, r3 \n\t"
- "adds r12, r10 \n\t"
- "adcs r8, r11, #0 \n\t"
- "adc r9, #0 \n\t"
- "adds r12, r10 \n\t"
- "adcs r8, r11 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r12, r2, r4 \n\t"
- "adds r11, r11 \n\t"
- "adcs r12, r12 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r3 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r2, r5 \n\t"
- "umull r1, r14, r3, r4 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r14 \n\t"
- "adc r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r8, r9, r2, r6 \n\t"
- "umull r1, r14, r3, r5 \n\t"
- "adds r8, r1 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "umull r1, r14, r4, r4 \n\t"
- "adds r8, r1 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r3, r6 \n\t"
- "umull r1, r14, r4, r5 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r14 \n\t"
- "adc r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r8, #0 \n\t"
- "umull r1, r10, r4, r6 \n\t"
- "adds r1, r1 \n\t"
- "adcs r10, r10 \n\t"
- "adc r8, #0 \n\t"
- "adds r11, r1 \n\t"
- "adcs r12, r10 \n\t"
- "adc r8, #0 \n\t"
- "umull r1, r10, r5, r5 \n\t"
- "adds r11, r1 \n\t"
- "adcs r12, r10 \n\t"
- "adc r8, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r1, r10, r5, r6 \n\t"
- "adds r1, r1 \n\t"
- "adcs r10, r10 \n\t"
- "adc r11, #0 \n\t"
- "adds r12, r1 \n\t"
- "adcs r8, r10 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "umull r1, r10, r6, r6 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r10 \n\t"
- "stmia r0!, {r8, r11} \n\t"
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : "+r" (r0), "+r" (r1)
- :
- : "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
- );
-}
-#define asm_square 1
-#endif /* (uECC_WORDS == 5) */
-
-#if (uECC_WORDS == 6)
-static void vli_square(uint32_t *result, const uint32_t *left) {
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "ldmia r1!, {r2,r3,r4,r5,r6,r7} \n\t"
-
- "umull r11, r12, r2, r2 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r9, #0 \n\t"
- "umull r10, r11, r2, r3 \n\t"
- "adds r12, r10 \n\t"
- "adcs r8, r11, #0 \n\t"
- "adc r9, #0 \n\t"
- "adds r12, r10 \n\t"
- "adcs r8, r11 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r12, r2, r4 \n\t"
- "adds r11, r11 \n\t"
- "adcs r12, r12 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r3 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r2, r5 \n\t"
- "umull r1, r14, r3, r4 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r14 \n\t"
- "adc r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r8, r9, r2, r6 \n\t"
- "umull r1, r14, r3, r5 \n\t"
- "adds r8, r1 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "umull r1, r14, r4, r4 \n\t"
- "adds r8, r1 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r2, r7 \n\t"
- "umull r1, r14, r3, r6 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r14 \n\t"
- "adc r12, #0 \n\t"
- "umull r1, r14, r4, r5 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r14 \n\t"
- "adc r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r8, r9, r3, r7 \n\t"
- "umull r1, r14, r4, r6 \n\t"
- "adds r8, r1 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "umull r1, r14, r5, r5 \n\t"
- "adds r8, r1 \n\t"
- "adcs r9, r14 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r4, r7 \n\t"
- "umull r1, r14, r5, r6 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r14 \n\t"
- "adc r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r8, #0 \n\t"
- "umull r1, r10, r5, r7 \n\t"
- "adds r1, r1 \n\t"
- "adcs r10, r10 \n\t"
- "adc r8, #0 \n\t"
- "adds r11, r1 \n\t"
- "adcs r12, r10 \n\t"
- "adc r8, #0 \n\t"
- "umull r1, r10, r6, r6 \n\t"
- "adds r11, r1 \n\t"
- "adcs r12, r10 \n\t"
- "adc r8, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r1, r10, r6, r7 \n\t"
- "adds r1, r1 \n\t"
- "adcs r10, r10 \n\t"
- "adc r11, #0 \n\t"
- "adds r12, r1 \n\t"
- "adcs r8, r10 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "umull r1, r10, r7, r7 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r10 \n\t"
- "stmia r0!, {r8, r11} \n\t"
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : "+r" (r0), "+r" (r1)
- :
- : "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
- );
-}
-#define asm_square 1
-#endif /* (uECC_WORDS == 6) */
-
-#if (uECC_WORDS == 7)
-static void vli_square(uint32_t *result, const uint32_t *left) {
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "ldmia r1!, {r2} \n\t"
- "add r1, 20 \n\t"
- "ldmia r1!, {r5} \n\t"
- "add r0, 24 \n\t"
- "umull r8, r9, r2, r5 \n\t"
- "stmia r0!, {r8, r9} \n\t"
- "sub r0, 32 \n\t"
- "sub r1, 28 \n\t"
-
- "ldmia r1!, {r2, r3, r4, r5, r6, r7} \n\t"
-
- "umull r11, r12, r2, r2 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r9, #0 \n\t"
- "umull r10, r11, r2, r3 \n\t"
- "adds r12, r10 \n\t"
- "adcs r8, r11, #0 \n\t"
- "adc r9, #0 \n\t"
- "adds r12, r10 \n\t"
- "adcs r8, r11 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r12, r2, r4 \n\t"
- "adds r11, r11 \n\t"
- "adcs r12, r12 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r3 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r2, r5 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r3, r4 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r8, r9, r2, r6 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r3, r5 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r4, r4 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r2, r7 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r3, r6 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r4, r5 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "ldmia r1!, {r2} \n\t"
- "mov r10, #0 \n\t"
- "umull r8, r9, r3, r7 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r4, r6 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r8, r14 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r5, r5 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r3, r2 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r4, r7 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r5, r6 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r8, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r8, r9, r4, r2 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r5, r7 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r6, r6 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r5, r2 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r6, r7 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r8, #0 \n\t"
- "umull r1, r10, r6, r2 \n\t"
- "adds r1, r1 \n\t"
- "adcs r10, r10 \n\t"
- "adc r8, #0 \n\t"
- "adds r11, r1 \n\t"
- "adcs r12, r10 \n\t"
- "adc r8, #0 \n\t"
- "umull r1, r10, r7, r7 \n\t"
- "adds r11, r1 \n\t"
- "adcs r12, r10 \n\t"
- "adc r8, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r1, r10, r7, r2 \n\t"
- "adds r1, r1 \n\t"
- "adcs r10, r10 \n\t"
- "adc r11, #0 \n\t"
- "adds r12, r1 \n\t"
- "adcs r8, r10 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "umull r1, r10, r2, r2 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r10 \n\t"
- "stmia r0!, {r8, r11} \n\t"
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : "+r" (r0), "+r" (r1)
- :
- : "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
- );
-}
-#define asm_square 1
-#endif /* (uECC_WORDS == 7) */
-
-#if (uECC_WORDS == 8)
-static void vli_square(uint32_t *result, const uint32_t *left) {
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "ldmia r1!, {r2, r3} \n\t"
- "add r1, 16 \n\t"
- "ldmia r1!, {r5, r6} \n\t"
- "add r0, 24 \n\t"
-
- "umull r8, r9, r2, r5 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "umull r12, r10, r2, r6 \n\t"
- "adds r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r9} \n\t"
-
- "umull r8, r9, r3, r6 \n\t"
- "adds r10, r8 \n\t"
- "adc r11, r9, #0 \n\t"
- "stmia r0!, {r10, r11} \n\t"
-
- "sub r0, 40 \n\t"
- "sub r1, 32 \n\t"
- "ldmia r1!, {r2,r3,r4,r5,r6,r7} \n\t"
-
- "umull r11, r12, r2, r2 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r9, #0 \n\t"
- "umull r10, r11, r2, r3 \n\t"
- "adds r12, r10 \n\t"
- "adcs r8, r11, #0 \n\t"
- "adc r9, #0 \n\t"
- "adds r12, r10 \n\t"
- "adcs r8, r11 \n\t"
- "adc r9, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r11, r12, r2, r4 \n\t"
- "adds r11, r11 \n\t"
- "adcs r12, r12 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "umull r11, r12, r3, r3 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r2, r5 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r3, r4 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r8, r9, r2, r6 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r3, r5 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r4, r4 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r2, r7 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r3, r6 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r4, r5 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "ldmia r1!, {r2} \n\t"
- "mov r10, #0 \n\t"
- "umull r8, r9, r3, r7 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r4, r6 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r8, r14 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r5, r5 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r3, r2 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r4, r7 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r5, r6 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r8, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "ldmia r1!, {r3} \n\t"
- "mov r10, #0 \n\t"
- "umull r8, r9, r4, r2 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r5, r7 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r8, r14 \n\t"
- "adcs r9, #0 \n\t"
- "adc r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r6, r6 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r4, r3 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r5, r2 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r6, r7 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "ldr r14, [r0] \n\t"
- "adds r8, r14 \n\t"
- "adcs r11, #0 \n\t"
- "adc r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r10, #0 \n\t"
- "umull r8, r9, r5, r3 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r6, r2 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r9, r9 \n\t"
- "adc r10, r10 \n\t"
- "mov r14, r9 \n\t"
- "umlal r8, r9, r7, r7 \n\t"
- "cmp r14, r9 \n\t"
- "it hi \n\t"
- "adchi r10, #0 \n\t"
- "adds r8, r11 \n\t"
- "adcs r9, r12 \n\t"
- "adc r10, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r12, #0 \n\t"
- "umull r8, r11, r6, r3 \n\t"
- "mov r14, r11 \n\t"
- "umlal r8, r11, r7, r2 \n\t"
- "cmp r14, r11 \n\t"
- "it hi \n\t"
- "adchi r12, #0 \n\t"
- "adds r8, r8 \n\t"
- "adcs r11, r11 \n\t"
- "adc r12, r12 \n\t"
- "adds r8, r9 \n\t"
- "adcs r11, r10 \n\t"
- "adc r12, #0 \n\t"
- "stmia r0!, {r8} \n\t"
-
- "mov r8, #0 \n\t"
- "umull r1, r10, r7, r3 \n\t"
- "adds r1, r1 \n\t"
- "adcs r10, r10 \n\t"
- "adc r8, #0 \n\t"
- "adds r11, r1 \n\t"
- "adcs r12, r10 \n\t"
- "adc r8, #0 \n\t"
- "umull r1, r10, r2, r2 \n\t"
- "adds r11, r1 \n\t"
- "adcs r12, r10 \n\t"
- "adc r8, #0 \n\t"
- "stmia r0!, {r11} \n\t"
-
- "mov r11, #0 \n\t"
- "umull r1, r10, r2, r3 \n\t"
- "adds r1, r1 \n\t"
- "adcs r10, r10 \n\t"
- "adc r11, #0 \n\t"
- "adds r12, r1 \n\t"
- "adcs r8, r10 \n\t"
- "adc r11, #0 \n\t"
- "stmia r0!, {r12} \n\t"
-
- "umull r1, r10, r3, r3 \n\t"
- "adds r8, r1 \n\t"
- "adcs r11, r10 \n\t"
- "stmia r0!, {r8, r11} \n\t"
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : "+r" (r0), "+r" (r1)
- :
- : "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
- );
-}
-#define asm_square 1
-#endif /* (uECC_WORDS == 8) */
-#endif /* uECC_SQUARE_FUNC */
-
-#endif /* (uECC_PLATFORM != uECC_arm_thumb) */
-#endif /* (uECC_ASM == uECC_asm_fast) */
-
-#if !asm_add
-static uint32_t vli_add(uint32_t *result, const uint32_t *left, const uint32_t *right) {
- uint32_t counter = uECC_WORDS;
- uint32_t carry = 0;
- uint32_t left_word;
- uint32_t right_word;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "1: \n\t"
- "ldmia %[lptr]!, {%[left]} \n\t" /* Load left word. */
- "ldmia %[rptr]!, {%[right]} \n\t" /* Load right word. */
- "lsrs %[carry], #1 \n\t" /* Set up carry flag (carry = 0 after this). */
- "adcs %[left], %[right] \n\t" /* Add with carry. */
- "adcs %[carry], %[carry] \n\t" /* Store carry bit. */
- "stmia %[dptr]!, {%[left]} \n\t" /* Store result word. */
- "subs %[ctr], #1 \n\t" /* Decrement counter. */
- "bne 1b \n\t" /* Loop until counter == 0. */
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- #if (uECC_PLATFORM == uECC_arm_thumb)
- : [dptr] "+l" (result), [lptr] "+l" (left), [rptr] "+l" (right),
- [ctr] "+l" (counter), [carry] "+l" (carry),
- [left] "=l" (left_word), [right] "=l" (right_word)
- #else
- : [dptr] "+r" (result), [lptr] "+r" (left), [rptr] "+r" (right),
- [ctr] "+r" (counter), [carry] "+r" (carry),
- [left] "=r" (left_word), [right] "=r" (right_word)
- #endif
- :
- : "cc", "memory"
- );
- return carry;
-}
-#define asm_add 1
-#endif
-
-#if !asm_sub
-static uint32_t vli_sub(uint32_t *result, const uint32_t *left, const uint32_t *right) {
- uint32_t counter = uECC_WORDS;
- uint32_t carry = 1; /* carry = 1 initially (means don't borrow) */
- uint32_t left_word;
- uint32_t right_word;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "1: \n\t"
- "ldmia %[lptr]!, {%[left]} \n\t" /* Load left word. */
- "ldmia %[rptr]!, {%[right]} \n\t" /* Load right word. */
- "lsrs %[carry], #1 \n\t" /* Set up carry flag (carry = 0 after this). */
- "sbcs %[left], %[right] \n\t" /* Subtract with borrow. */
- "adcs %[carry], %[carry] \n\t" /* Store carry bit. */
- "stmia %[dptr]!, {%[left]} \n\t" /* Store result word. */
- "subs %[ctr], #1 \n\t" /* Decrement counter. */
- "bne 1b \n\t" /* Loop until counter == 0. */
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- #if (uECC_PLATFORM == uECC_arm_thumb)
- : [dptr] "+l" (result), [lptr] "+l" (left), [rptr] "+l" (right),
- [ctr] "+l" (counter), [carry] "+l" (carry),
- [left] "=l" (left_word), [right] "=l" (right_word)
- #else
- : [dptr] "+r" (result), [lptr] "+r" (left), [rptr] "+r" (right),
- [ctr] "+r" (counter), [carry] "+r" (carry),
- [left] "=r" (left_word), [right] "=r" (right_word)
- #endif
- :
- : "cc", "memory"
- );
- return !carry;
-}
-#define asm_sub 1
-#endif
-
-#if !asm_mult
-static void vli_mult(uint32_t *result, const uint32_t *left, const uint32_t *right) {
-#if (uECC_PLATFORM != uECC_arm_thumb)
- uint32_t c0 = 0;
- uint32_t c1 = 0;
- uint32_t c2 = 0;
- uint32_t k = 0;
- uint32_t i;
- uint32_t t0, t1;
-
- __asm__ volatile (
- ".syntax unified \n\t"
-
- "1: \n\t" /* outer loop (k < uECC_WORDS) */
- "movs %[i], #0 \n\t" /* i = 0 */
- "b 3f \n\t"
-
- "2: \n\t" /* outer loop (k >= uECC_WORDS) */
- "movs %[i], %[k] \n\t" /* i = k */
- "subs %[i], %[eccdm1] \n\t" /* i = k - (uECC_WORDS - 1) (times 4) */
-
- "3: \n\t" /* inner loop */
- "subs %[t0], %[k], %[i] \n\t" /* t0 = k-i */
-
- "ldr %[t1], [%[right], %[t0]] \n\t" /* t1 = right[k - i] */
- "ldr %[t0], [%[left], %[i]] \n\t" /* t0 = left[i] */
-
- "umull %[t0], %[t1], %[t0], %[t1] \n\t" /* (t0, t1) = left[i] * right[k - i] */
-
- "adds %[c0], %[t0] \n\t" /* add low word to c0 */
- "adcs %[c1], %[t1] \n\t" /* add high word to c1, including carry */
- "adcs %[c2], #0 \n\t" /* add carry to c2 */
-
- "adds %[i], #4 \n\t" /* i += 4 */
- "cmp %[i], %[eccd] \n\t" /* i < uECC_WORDS (times 4)? */
- "bge 4f \n\t" /* if not, exit the loop */
- "cmp %[i], %[k] \n\t" /* i <= k? */
- "ble 3b \n\t" /* if so, continue looping */
-
- "4: \n\t" /* end inner loop */
-
- "str %[c0], [%[result], %[k]] \n\t" /* result[k] = c0 */
- "mov %[c0], %[c1] \n\t" /* c0 = c1 */
- "mov %[c1], %[c2] \n\t" /* c1 = c2 */
- "movs %[c2], #0 \n\t" /* c2 = 0 */
- "adds %[k], #4 \n\t" /* k += 4 */
- "cmp %[k], %[eccd] \n\t" /* k < uECC_WORDS (times 4) ? */
- "blt 1b \n\t" /* if not, loop back, start with i = 0 */
- "cmp %[k], %[eccd2m1] \n\t" /* k < uECC_WORDS * 2 - 1 (times 4) ? */
- "blt 2b \n\t" /* if not, loop back, start with i = (k + 1) - uECC_WORDS */
- /* end outer loop */
-
- "str %[c0], [%[result], %[k]] \n\t" /* result[uECC_WORDS * 2 - 1] = c0 */
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : [c0] "+r" (c0), [c1] "+r" (c1), [c2] "+r" (c2),
- [k] "+r" (k), [i] "=&r" (i), [t0] "=&r" (t0), [t1] "=&r" (t1)
- : [result] "r" (result), [left] "r" (left), [right] "r" (right),
- [eccd] "I" (uECC_WORDS * 4), [eccdm1] "I" ((uECC_WORDS-1) * 4),
- [eccd2m1] "I" ((uECC_WORDS * 2 - 1) * 4)
- : "cc", "memory"
- );
-
-#else /* Thumb-1 */
-
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
- register const uint32_t *r2 __asm__("r2") = right;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "movs r3, #0 \n\t" /* c0 = 0 */
- "movs r4, #0 \n\t" /* c1 = 0 */
- "movs r5, #0 \n\t" /* c2 = 0 */
- "movs r6, #0 \n\t" /* k = 0 */
-
- "push {r0} \n\t" /* keep result on the stack */
-
- "1: \n\t" /* outer loop (k < uECC_WORDS) */
- "movs r7, #0 \n\t" /* r7 = i = 0 */
- "b 3f \n\t"
-
- "2: \n\t" /* outer loop (k >= uECC_WORDS) */
- "movs r7, r6 \n\t" /* r7 = k */
- "subs r7, %[eccdm1] \n\t" /* r7 = i = k - (uECC_WORDS - 1) (times 4) */
-
- "3: \n\t" /* inner loop */
- "push {r3, r4, r5, r6} \n\t" /* push things, r3 (c0) is at the top of stack. */
- "subs r0, r6, r7 \n\t" /* r0 = k - i */
-
- "ldr r4, [r2, r0] \n\t" /* r4 = right[k - i] */
- "ldr r0, [r1, r7] \n\t" /* r0 = left[i] */
-
- "lsrs r3, r0, #16 \n\t" /* r3 = a1 */
- "uxth r0, r0 \n\t" /* r0 = a0 */
-
- "lsrs r5, r4, #16 \n\t" /* r5 = b1 */
- "uxth r4, r4 \n\t" /* r4 = b0 */
-
- "movs r6, r3 \n\t" /* r6 = a1 */
- "muls r6, r5, r6 \n\t" /* r6 = a1 * b1 */
- "muls r3, r4, r3 \n\t" /* r3 = b0 * a1 */
- "muls r5, r0, r5 \n\t" /* r5 = a0 * b1 */
- "muls r0, r4, r0 \n\t" /* r0 = a0 * b0 */
-
- "movs r4, #0 \n\t" /* r4 = 0 */
- "adds r3, r5 \n\t" /* r3 = b0 * a1 + a0 * b1 */
- "adcs r4, r4 \n\t" /* r4 = carry */
- "lsls r4, #16 \n\t" /* r4 = carry << 16 */
- "adds r6, r4 \n\t" /* r6 = a1 * b1 + carry */
-
- "lsls r4, r3, #16 \n\t" /* r4 = (b0 * a1 + a0 * b1) << 16 */
- "lsrs r3, #16 \n\t" /* r3 = (b0 * a1 + a0 * b1) >> 16 */
- "adds r0, r4 \n\t" /* r0 = low word = a0 * b0 + ((b0 * a1 + a0 * b1) << 16) */
- "adcs r6, r3 \n\t" /* r6 = high word = a1 * b1 + carry + ((b0 * a1 + a0 * b1) >> 16) */
-
- "pop {r3, r4, r5} \n\t" /* r3 = c0, r4 = c1, r5 = c2 */
- "adds r3, r0 \n\t" /* add low word to c0 */
- "adcs r4, r6 \n\t" /* add high word to c1, including carry */
- "movs r0, #0 \n\t" /* r0 = 0 (does not affect carry bit) */
- "adcs r5, r0 \n\t" /* add carry to c2 */
-
- "pop {r6} \n\t" /* r6 = k */
-
- "adds r7, #4 \n\t" /* i += 4 */
- "cmp r7, %[eccd] \n\t" /* i < uECC_WORDS (times 4)? */
- "bge 4f \n\t" /* if not, exit the loop */
- "cmp r7, r6 \n\t" /* i <= k? */
- "ble 3b \n\t" /* if so, continue looping */
-
- "4: \n\t" /* end inner loop */
-
- "ldr r0, [sp, #0] \n\t" /* r0 = result */
-
- "str r3, [r0, r6] \n\t" /* result[k] = c0 */
- "mov r3, r4 \n\t" /* c0 = c1 */
- "mov r4, r5 \n\t" /* c1 = c2 */
- "movs r5, #0 \n\t" /* c2 = 0 */
- "adds r6, #4 \n\t" /* k += 4 */
- "cmp r6, %[eccd] \n\t" /* k < uECC_WORDS (times 4) ? */
- "blt 1b \n\t" /* if not, loop back, start with i = 0 */
- "cmp r6, %[eccd2m1] \n\t" /* k < uECC_WORDS * 2 - 1 (times 4) ? */
- "blt 2b \n\t" /* if not, loop back, start with i = (k + 1) - uECC_WORDS */
- /* end outer loop */
-
- "str r3, [r0, r6] \n\t" /* result[uECC_WORDS * 2 - 1] = c0 */
- "pop {r0} \n\t" /* pop result off the stack */
-
- ".syntax divided \n\t"
- :
- : [r0] "l" (r0), [r1] "l" (r1), [r2] "l" (r2), [eccd] "I" (uECC_WORDS * 4), [eccdm1] "I" ((uECC_WORDS-1) * 4), [eccd2m1] "I" ((uECC_WORDS * 2 - 1) * 4)
- : "r3", "r4", "r5", "r6", "r7", "cc", "memory"
- );
-#endif
-}
-#define asm_mult 1
-#endif /* !asm_mult */
-
-#if uECC_SQUARE_FUNC
-#if !asm_square
-static void vli_square(uint32_t *result, const uint32_t *left) {
-#if (uECC_PLATFORM != uECC_arm_thumb)
- uint32_t c0 = 0;
- uint32_t c1 = 0;
- uint32_t c2 = 0;
- uint32_t k = 0;
- uint32_t i, tt;
- uint32_t t0, t1;
-
- __asm__ volatile (
- ".syntax unified \n\t"
-
- "1: \n\t" /* outer loop (k < uECC_WORDS) */
- "movs %[i], #0 \n\t" /* i = 0 */
- "b 3f \n\t"
-
- "2: \n\t" /* outer loop (k >= uECC_WORDS) */
- "movs %[i], %[k] \n\t" /* i = k */
- "subs %[i], %[eccdm1] \n\t" /* i = k - (uECC_WORDS - 1) (times 4) */
-
- "3: \n\t" /* inner loop */
- "subs %[tt], %[k], %[i] \n\t" /* tt = k-i */
-
- "ldr %[t1], [%[left], %[tt]] \n\t" /* t1 = left[k - i] */
- "ldr %[t0], [%[left], %[i]] \n\t" /* t0 = left[i] */
-
- "umull %[t0], %[t1], %[t0], %[t1] \n\t" /* (t0, t1) = left[i] * right[k - i] */
-
- "cmp %[i], %[tt] \n\t" /* (i < k - i) ? */
- "bge 4f \n\t" /* if i >= k - i, skip */
- "lsls %[t1], #1 \n\t" /* high word << 1 */
- "adc %[c2], #0 \n\t" /* add carry bit to c2 */
- "lsls %[t0], #1 \n\t" /* low word << 1 */
- "adc %[t1], #0 \n\t" /* add carry bit to high word */
-
- "4: \n\t"
-
- "adds %[c0], %[t0] \n\t" /* add low word to c0 */
- "adcs %[c1], %[t1] \n\t" /* add high word to c1, including carry */
- "adc %[c2], #0 \n\t" /* add carry to c2 */
-
- "adds %[i], #4 \n\t" /* i += 4 */
- "cmp %[i], %[k] \n\t" /* i <= k? */
- "bge 5f \n\t" /* if not, exit the loop */
- "subs %[tt], %[k], %[i] \n\t" /* tt = k - i */
- "cmp %[i], %[tt] \n\t" /* i <= k - i? */
- "ble 3b \n\t" /* if so, continue looping */
-
- "5: \n\t" /* end inner loop */
-
- "str %[c0], [%[result], %[k]] \n\t" /* result[k] = c0 */
- "mov %[c0], %[c1] \n\t" /* c0 = c1 */
- "mov %[c1], %[c2] \n\t" /* c1 = c2 */
- "movs %[c2], #0 \n\t" /* c2 = 0 */
- "adds %[k], #4 \n\t" /* k += 4 */
- "cmp %[k], %[eccd] \n\t" /* k < uECC_WORDS (times 4) ? */
- "blt 1b \n\t" /* if not, loop back, start with i = 0 */
- "cmp %[k], %[eccd2m1] \n\t" /* k < uECC_WORDS * 2 - 1 (times 4) ? */
- "blt 2b \n\t" /* if not, loop back, start with i = (k + 1) - uECC_WORDS */
- /* end outer loop */
-
- "str %[c0], [%[result], %[k]] \n\t" /* result[uECC_WORDS * 2 - 1] = c0 */
- #if (uECC_PLATFORM != uECC_arm_thumb2)
- ".syntax divided \n\t"
- #endif
- : [c0] "+r" (c0), [c1] "+r" (c1), [c2] "+r" (c2),
- [k] "+r" (k), [i] "=&r" (i), [tt] "=&r" (tt), [t0] "=&r" (t0), [t1] "=&r" (t1)
- : [result] "r" (result), [left] "r" (left),
- [eccd] "I" (uECC_WORDS * 4), [eccdm1] "I" ((uECC_WORDS-1) * 4),
- [eccd2m1] "I" ((uECC_WORDS * 2 - 1) * 4)
- : "cc", "memory"
- );
-
-#else
-
- register uint32_t *r0 __asm__("r0") = result;
- register const uint32_t *r1 __asm__("r1") = left;
-
- __asm__ volatile (
- ".syntax unified \n\t"
- "movs r2, #0 \n\t" /* c0 = 0 */
- "movs r3, #0 \n\t" /* c1 = 0 */
- "movs r4, #0 \n\t" /* c2 = 0 */
- "movs r5, #0 \n\t" /* k = 0 */
-
- "push {r0} \n\t" /* keep result on the stack */
-
- "1: \n\t" /* outer loop (k < uECC_WORDS) */
- "movs r6, #0 \n\t" /* r6 = i = 0 */
- "b 3f \n\t"
-
- "2: \n\t" /* outer loop (k >= uECC_WORDS) */
- "movs r6, r5 \n\t" /* r6 = k */
- "subs r6, %[eccdm1] \n\t" /* r6 = i = k - (uECC_WORDS - 1) (times 4) */
-
- "3: \n\t" /* inner loop */
- "push {r2, r3, r4, r5} \n\t" /* push things, r2 (c0) is at the top of stack. */
- "subs r7, r5, r6 \n\t" /* r7 = k - i */
-
- "ldr r3, [r1, r7] \n\t" /* r3 = left[k - i] */
- "ldr r0, [r1, r6] \n\t" /* r0 = left[i] */
-
- "lsrs r2, r0, #16 \n\t" /* r2 = a1 */
- "uxth r0, r0 \n\t" /* r0 = a0 */
-
- "lsrs r4, r3, #16 \n\t" /* r4 = b1 */
- "uxth r3, r3 \n\t" /* r3 = b0 */
-
- "movs r5, r2 \n\t" /* r5 = a1 */
- "muls r5, r4, r5 \n\t" /* r5 = a1 * b1 */
- "muls r2, r3, r2 \n\t" /* r2 = b0 * a1 */
- "muls r4, r0, r4 \n\t" /* r4 = a0 * b1 */
- "muls r0, r3, r0 \n\t" /* r0 = a0 * b0 */
-
- "movs r3, #0 \n\t" /* r3 = 0 */
- "adds r2, r4 \n\t" /* r2 = b0 * a1 + a0 * b1 */
- "adcs r3, r3 \n\t" /* r3 = carry */
- "lsls r3, #16 \n\t" /* r3 = carry << 16 */
- "adds r5, r3 \n\t" /* r5 = a1 * b1 + carry */
-
- "lsls r3, r2, #16 \n\t" /* r3 = (b0 * a1 + a0 * b1) << 16 */
- "lsrs r2, #16 \n\t" /* r2 = (b0 * a1 + a0 * b1) >> 16 */
- "adds r0, r3 \n\t" /* r0 = low word = a0 * b0 + ((b0 * a1 + a0 * b1) << 16) */
- "adcs r5, r2 \n\t" /* r5 = high word = a1 * b1 + carry + ((b0 * a1 + a0 * b1) >> 16) */
-
- "movs r3, #0 \n\t" /* r3 = 0 */
- "cmp r6, r7 \n\t" /* (i < k - i) ? */
- "mov r7, r3 \n\t" /* r7 = 0 (does not affect condition)*/
- "bge 4f \n\t" /* if i >= k - i, skip */
- "lsls r5, #1 \n\t" /* high word << 1 */
- "adcs r7, r3 \n\t" /* r7 = carry bit for c2 */
- "lsls r0, #1 \n\t" /* low word << 1 */
- "adcs r5, r3 \n\t" /* add carry from shift to high word */
-
- "4: \n\t"
- "pop {r2, r3, r4} \n\t" /* r2 = c0, r3 = c1, r4 = c2 */
- "adds r2, r0 \n\t" /* add low word to c0 */
- "adcs r3, r5 \n\t" /* add high word to c1, including carry */
- "movs r0, #0 \n\t" /* r0 = 0 (does not affect carry bit) */
- "adcs r4, r0 \n\t" /* add carry to c2 */
- "adds r4, r7 \n\t" /* add carry from doubling (if any) */
-
- "pop {r5} \n\t" /* r5 = k */
-
- "adds r6, #4 \n\t" /* i += 4 */
- "cmp r6, r5 \n\t" /* i <= k? */
- "bge 5f \n\t" /* if not, exit the loop */
- "subs r7, r5, r6 \n\t" /* r7 = k - i */
- "cmp r6, r7 \n\t" /* i <= k - i? */
- "ble 3b \n\t" /* if so, continue looping */
-
- "5: \n\t" /* end inner loop */
-
- "ldr r0, [sp, #0] \n\t" /* r0 = result */
-
- "str r2, [r0, r5] \n\t" /* result[k] = c0 */
- "mov r2, r3 \n\t" /* c0 = c1 */
- "mov r3, r4 \n\t" /* c1 = c2 */
- "movs r4, #0 \n\t" /* c2 = 0 */
- "adds r5, #4 \n\t" /* k += 4 */
- "cmp r5, %[eccd] \n\t" /* k < uECC_WORDS (times 4) ? */
- "blt 1b \n\t" /* if not, loop back, start with i = 0 */
- "cmp r5, %[eccd2m1] \n\t" /* k < uECC_WORDS * 2 - 1 (times 4) ? */
- "blt 2b \n\t" /* if not, loop back, start with i = (k + 1) - uECC_WORDS */
- /* end outer loop */
-
- "str r2, [r0, r5] \n\t" /* result[uECC_WORDS * 2 - 1] = c0 */
- "pop {r0} \n\t" /* pop result off the stack */
-
- ".syntax divided \n\t"
- : [r0] "+l" (r0), [r1] "+l" (r1)
- : [eccd] "I" (uECC_WORDS * 4), [eccdm1] "I" ((uECC_WORDS-1) * 4),
- [eccd2m1] "I" ((uECC_WORDS * 2 - 1) * 4)
- : "r2", "r3", "r4", "r5", "r6", "r7", "cc", "memory"
- );
-#endif
-}
-#define asm_square 1
-#endif /* !asm_square */
-#endif /* uECC_SQUARE_FUNC */
diff --git a/asm_arm_fast.inc b/asm_arm_fast.inc
new file mode 100644
index 0000000..150afa0
--- /dev/null
+++ b/asm_arm_fast.inc
@@ -0,0 +1,845 @@
+/* Copyright 2015, Kenneth MacKay. Licensed under the BSD 2-clause license. */
+
+#ifndef _UECC_ASM_ARM_FAST_H_
+#define _UECC_ASM_ARM_FAST_H_
+
+#if (uECC_SUPPORTS_secp256r1 || uECC_SUPPORTS_secp256k1)
+ #define uECC_MIN_WORDS 8
+#endif
+#if uECC_SUPPORTS_secp224r1
+ #undef uECC_MIN_WORDS
+ #define uECC_MIN_WORDS 7
+#endif
+#if uECC_SUPPORTS_secp192r1
+ #undef uECC_MIN_WORDS
+ #define uECC_MIN_WORDS 6
+#endif
+#if uECC_SUPPORTS_secp160r1
+ #undef uECC_MIN_WORDS
+ #define uECC_MIN_WORDS 5
+#endif
+
+#if (uECC_PLATFORM == uECC_arm_thumb)
+ #define REG_RW "+l"
+ #define REG_WRITE "=l"
+#else
+ #define REG_RW "+r"
+ #define REG_WRITE "=r"
+#endif
+
+#if (uECC_PLATFORM == uECC_arm_thumb2)
+ #define RESUME_SYNTAX
+#else
+ #define RESUME_SYNTAX ".syntax divided \n\t"
+#endif
+
+static uECC_word_t vli_add(uECC_word_t *result,
+ const uECC_word_t *left,
+ const uECC_word_t *right,
+ wordcount_t num_words) {
+#if (uECC_PLATFORM == uECC_arm_thumb) || (uECC_PLATFORM == uECC_arm_thumb2)
+ uint32_t jump = ((uECC_MAX_WORDS - num_words) * 4 + 5) * 2 + 1;
+#else /* ARM */
+ uint32_t jump = ((uECC_MAX_WORDS - num_words) * 4 + 5) * 4;
+#endif
+ uint32_t carry;
+ uint32_t left_word;
+ uint32_t right_word;
+
+ __asm__ volatile (
+ ".syntax unified \n\t"
+ "movs %[carry], #0 \n\t"
+ "mov %[left], pc \n\t"
+ "adds %[jump], %[left] \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "adds %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ "bx %[jump] \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "adcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "adcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "adcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "adcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ #if (uECC_MAX_WORDS >= 6)
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "adcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+ #endif
+ #if (uECC_MAX_WORDS >= 7)
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "adcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+ #endif
+ #if (uECC_MAX_WORDS >= 8)
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "adcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+ #endif
+ "adcs %[carry], %[carry] \n\t"
+ RESUME_SYNTAX
+ : [dptr] REG_RW (result), [lptr] REG_RW (left), [rptr] REG_RW (right),
+ [carry] REG_WRITE (carry), [left] REG_WRITE (left_word), [right] REG_WRITE (right_word),
+ [jump] REG_RW (jump)
+ :
+ : "cc", "memory"
+ );
+ return carry;
+}
+#define asm_add 1
+
+static uECC_word_t vli_sub(uECC_word_t *result,
+ const uECC_word_t *left,
+ const uECC_word_t *right,
+ wordcount_t num_words) {
+#if (uECC_PLATFORM == uECC_arm_thumb) || (uECC_PLATFORM == uECC_arm_thumb2)
+ uint32_t jump = ((uECC_MAX_WORDS - num_words) * 4 + 5) * 2 + 1;
+#else /* ARM */
+ uint32_t jump = ((uECC_MAX_WORDS - num_words) * 4 + 5) * 4;
+#endif
+ uint32_t carry;
+ uint32_t left_word;
+ uint32_t right_word;
+
+ __asm__ volatile (
+ ".syntax unified \n\t"
+ "movs %[carry], #0 \n\t"
+ "mov %[left], pc \n\t"
+ "adds %[jump], %[left] \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "subs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ "bx %[jump] \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "sbcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "sbcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "sbcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "sbcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+
+ #if (uECC_MAX_WORDS >= 6)
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "sbcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+ #endif
+ #if (uECC_MAX_WORDS >= 7)
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "sbcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+ #endif
+ #if (uECC_MAX_WORDS >= 8)
+ "ldmia %[lptr]!, {%[left]} \n\t"
+ "ldmia %[rptr]!, {%[right]} \n\t"
+ "sbcs %[left], %[right] \n\t"
+ "stmia %[dptr]!, {%[left]} \n\t"
+ #endif
+ "adcs %[carry], %[carry] \n\t"
+ RESUME_SYNTAX
+ : [dptr] REG_RW (result), [lptr] REG_RW (left), [rptr] REG_RW (right),
+ [carry] REG_WRITE (carry), [left] REG_WRITE (left_word), [right] REG_WRITE (right_word),
+ [jump] REG_RW (jump)
+ :
+ : "cc", "memory"
+ );
+ return !carry; /* Note that on ARM, carry flag set means "no borrow" when subtracting
+ (for some reason...) */
+}
+#define asm_sub 1
+
+#define FAST_MULT_ASM_5_TO_6 \
+ "cmp r3, #5 \n\t" \
+ "beq 1f \n\t" \
+ \
+ /* r4 = left high, r5 = right high */ \
+ "ldr r4, [r1] \n\t" \
+ "ldr r5, [r2] \n\t" \
+ \
+ "sub r0, #20 \n\t" \
+ "sub r1, #20 \n\t" \
+ "sub r2, #20 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r4, r8 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r9, r6 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "str r9, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r10, r6 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "str r10, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r14, r6 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "str r14, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r9, r6 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "str r9, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r10, r6 \n\t" \
+ "adcs r14, #0 \n\t" \
+ /* skip past already-loaded (r4, r5) */ \
+ "ldr r7, [r1], #8 \n\t" \
+ "ldr r8, [r2], #8 \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "str r10, [r0], #4 \n\t" \
+ \
+ "umull r11, r12, r4, r5 \n\t" \
+ "adds r11, r14 \n\t" \
+ "adc r12, r9 \n\t" \
+ "stmia r0!, {r11, r12} \n\t"
+
+#define FAST_MULT_ASM_6_TO_7 \
+ "cmp r3, #6 \n\t" \
+ "beq 1f \n\t" \
+ \
+ /* r4 = left high, r5 = right high */ \
+ "ldr r4, [r1] \n\t" \
+ "ldr r5, [r2] \n\t" \
+ \
+ "sub r0, #24 \n\t" \
+ "sub r1, #24 \n\t" \
+ "sub r2, #24 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r4, r8 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r9, r6 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "str r9, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r10, r6 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "str r10, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r14, r6 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "str r14, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r9, r6 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "str r9, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r10, r6 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "str r10, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r14, r6 \n\t" \
+ "adcs r9, #0 \n\t" \
+ /* skip past already-loaded (r4, r5) */ \
+ "ldr r7, [r1], #8 \n\t" \
+ "ldr r8, [r2], #8 \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "str r14, [r0], #4 \n\t" \
+ \
+ "umull r11, r12, r4, r5 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adc r12, r10 \n\t" \
+ "stmia r0!, {r11, r12} \n\t"
+
+#define FAST_MULT_ASM_7_TO_8 \
+ "cmp r3, #7 \n\t" \
+ "beq 1f \n\t" \
+ \
+ /* r4 = left high, r5 = right high */ \
+ "ldr r4, [r1] \n\t" \
+ "ldr r5, [r2] \n\t" \
+ \
+ "sub r0, #28 \n\t" \
+ "sub r1, #28 \n\t" \
+ "sub r2, #28 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r4, r8 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r9, r6 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "str r9, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r10, r6 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "str r10, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r14, r6 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "str r14, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r9, r6 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "str r9, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r10, r6 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r10, r11 \n\t" \
+ "adcs r14, r12 \n\t" \
+ "adc r9, #0 \n\t" \
+ "str r10, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r14, r6 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "ldr r7, [r1], #4 \n\t" \
+ "ldr r8, [r2], #4 \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "str r14, [r0], #4 \n\t" \
+ \
+ "ldr r6, [r0] \n\t" \
+ "adds r9, r6 \n\t" \
+ "adcs r10, #0 \n\t" \
+ /* skip past already-loaded (r4, r5) */ \
+ "ldr r7, [r1], #8 \n\t" \
+ "ldr r8, [r2], #8 \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r9, r11 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adc r14, #0 \n\t" \
+ "str r9, [r0], #4 \n\t" \
+ \
+ "umull r11, r12, r4, r5 \n\t" \
+ "adds r11, r10 \n\t" \
+ "adc r12, r14 \n\t" \
+ "stmia r0!, {r11, r12} \n\t"
+
+#if (uECC_PLATFORM != uECC_arm_thumb)
+static void vli_mult(uint32_t *result,
+ const uint32_t *left,
+ const uint32_t *right,
+ wordcount_t num_words) {
+ register uint32_t *r0 __asm__("r0") = result;
+ register const uint32_t *r1 __asm__("r1") = left;
+ register const uint32_t *r2 __asm__("r2") = right;
+ register uint32_t r3 __asm__("r3") = num_words;
+
+ __asm__ volatile (
+ ".syntax unified \n\t"
+ "push {r3} \n\t"
+
+#if (uECC_MIN_WORDS == 5)
+ FAST_MULT_ASM_5
+ "pop {r3} \n\t"
+ #if (uECC_MAX_WORDS > 5)
+ FAST_MULT_ASM_5_TO_6
+ #endif
+ #if (uECC_MAX_WORDS > 6)
+ FAST_MULT_ASM_6_TO_7
+ #endif
+ #if (uECC_MAX_WORDS > 7)
+ FAST_MULT_ASM_7_TO_8
+ #endif
+#elif (uECC_MIN_WORDS == 6)
+ FAST_MULT_ASM_6
+ "pop {r3} \n\t"
+ #if (uECC_MAX_WORDS > 6)
+ FAST_MULT_ASM_6_TO_7
+ #endif
+ #if (uECC_MAX_WORDS > 7)
+ FAST_MULT_ASM_7_TO_8
+ #endif
+#elif (uECC_MIN_WORDS == 7)
+ FAST_MULT_ASM_7
+ "pop {r3} \n\t"
+ #if (uECC_MAX_WORDS > 7)
+ FAST_MULT_ASM_7_TO_8
+ #endif
+#elif (uECC_MIN_WORDS == 8)
+ FAST_MULT_ASM_8
+ "pop {r3} \n\t"
+#endif
+
+ "1: \n\t"
+ RESUME_SYNTAX
+ : "+r" (r0), "+r" (r1), "+r" (r2)
+ : "r" (r3)
+ : "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
+ );
+}
+#define asm_mult 1
+
+#if uECC_SQUARE_FUNC
+
+#define FAST_SQUARE_ASM_5_TO_6 \
+ "cmp r2, #5 \n\t" \
+ "beq 1f \n\t" \
+ \
+ /* r3 = high */ \
+ "ldr r3, [r1] \n\t" \
+ \
+ "sub r0, #20 \n\t" \
+ "sub r1, #20 \n\t" \
+ \
+ /* Do off-center multiplication */ \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r4, r5, r3, r14 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r7, r6, r3, r14 \n\t" \
+ "adds r5, r7 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r8, r7, r3, r14 \n\t" \
+ "adcs r6, r8 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r9, r8, r3, r14 \n\t" \
+ "adcs r7, r9 \n\t" \
+ /* Skip already-loaded r3 */ \
+ "ldr r14, [r1], #8 \n\t" \
+ "umull r10, r9, r3, r14 \n\t" \
+ "adcs r8, r10 \n\t" \
+ "adcs r9, #0 \n\t" \
+ \
+ /* Multiply by 2 */ \
+ "mov r10, #0 \n\t" \
+ "adds r4, r4 \n\t" \
+ "adcs r5, r5 \n\t" \
+ "adcs r6, r6 \n\t" \
+ "adcs r7, r7 \n\t" \
+ "adcs r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adcs r10, #0 \n\t" \
+ \
+ /* Add into previous */ \
+ "ldr r14, [r0] \n\t" \
+ "adds r4, r14 \n\t" \
+ "str r4, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r5, r14 \n\t" \
+ "str r5, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r6, r14 \n\t" \
+ "str r6, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r7, r14 \n\t" \
+ "str r7, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r8, r14 \n\t" \
+ "str r8, [r0], #4 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adcs r10, #0 \n\t" \
+ \
+ /* Perform center multiplication */ \
+ "umull r4, r5, r3, r3 \n\t" \
+ "adds r4, r9 \n\t" \
+ "adc r5, r10 \n\t" \
+ "stmia r0!, {r4, r5} \n\t"
+
+#define FAST_SQUARE_ASM_6_TO_7 \
+ "cmp r2, #6 \n\t" \
+ "beq 1f \n\t" \
+ \
+ /* r3 = high */ \
+ "ldr r3, [r1] \n\t" \
+ \
+ "sub r0, #24 \n\t" \
+ "sub r1, #24 \n\t" \
+ \
+ /* Do off-center multiplication */ \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r4, r5, r3, r14 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r7, r6, r3, r14 \n\t" \
+ "adds r5, r7 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r8, r7, r3, r14 \n\t" \
+ "adcs r6, r8 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r9, r8, r3, r14 \n\t" \
+ "adcs r7, r9 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r10, r9, r3, r14 \n\t" \
+ "adcs r8, r10 \n\t" \
+ /* Skip already-loaded r3 */ \
+ "ldr r14, [r1], #8 \n\t" \
+ "umull r11, r10, r3, r14 \n\t" \
+ "adcs r9, r11 \n\t" \
+ "adcs r10, #0 \n\t" \
+ \
+ /* Multiply by 2 */ \
+ "mov r11, #0 \n\t" \
+ "adds r4, r4 \n\t" \
+ "adcs r5, r5 \n\t" \
+ "adcs r6, r6 \n\t" \
+ "adcs r7, r7 \n\t" \
+ "adcs r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adcs r11, #0 \n\t" \
+ \
+ /* Add into previous */ \
+ "ldr r14, [r0] \n\t" \
+ "adds r4, r14 \n\t" \
+ "str r4, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r5, r14 \n\t" \
+ "str r5, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r6, r14 \n\t" \
+ "str r6, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r7, r14 \n\t" \
+ "str r7, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r8, r14 \n\t" \
+ "str r8, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r9, r14 \n\t" \
+ "str r9, [r0], #4 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "adcs r11, #0 \n\t" \
+ \
+ /* Perform center multiplication */ \
+ "umull r4, r5, r3, r3 \n\t" \
+ "adds r4, r10 \n\t" \
+ "adc r5, r11 \n\t" \
+ "stmia r0!, {r4, r5} \n\t"
+
+#define FAST_SQUARE_ASM_7_TO_8 \
+ "cmp r2, #7 \n\t" \
+ "beq 1f \n\t" \
+ \
+ /* r3 = high */ \
+ "ldr r3, [r1] \n\t" \
+ \
+ "sub r0, #28 \n\t" \
+ "sub r1, #28 \n\t" \
+ \
+ /* Do off-center multiplication */ \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r4, r5, r3, r14 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r7, r6, r3, r14 \n\t" \
+ "adds r5, r7 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r8, r7, r3, r14 \n\t" \
+ "adcs r6, r8 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r9, r8, r3, r14 \n\t" \
+ "adcs r7, r9 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r10, r9, r3, r14 \n\t" \
+ "adcs r8, r10 \n\t" \
+ "ldr r14, [r1], #4 \n\t" \
+ "umull r11, r10, r3, r14 \n\t" \
+ "adcs r9, r11 \n\t" \
+ /* Skip already-loaded r3 */ \
+ "ldr r14, [r1], #8 \n\t" \
+ "umull r12, r11, r3, r14 \n\t" \
+ "adcs r10, r12 \n\t" \
+ "adcs r11, #0 \n\t" \
+ \
+ /* Multiply by 2 */ \
+ "mov r12, #0 \n\t" \
+ "adds r4, r4 \n\t" \
+ "adcs r5, r5 \n\t" \
+ "adcs r6, r6 \n\t" \
+ "adcs r7, r7 \n\t" \
+ "adcs r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adcs r12, #0 \n\t" \
+ \
+ /* Add into previous */ \
+ "ldr r14, [r0] \n\t" \
+ "adds r4, r14 \n\t" \
+ "str r4, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r5, r14 \n\t" \
+ "str r5, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r6, r14 \n\t" \
+ "str r6, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r7, r14 \n\t" \
+ "str r7, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r8, r14 \n\t" \
+ "str r8, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r9, r14 \n\t" \
+ "str r9, [r0], #4 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adcs r10, r14 \n\t" \
+ "str r10, [r0], #4 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adcs r12, #0 \n\t" \
+ \
+ /* Perform center multiplication */ \
+ "umull r4, r5, r3, r3 \n\t" \
+ "adds r4, r11 \n\t" \
+ "adc r5, r12 \n\t" \
+ "stmia r0!, {r4, r5} \n\t"
+
+static void vli_square(uECC_word_t *result, const uECC_word_t *left, wordcount_t num_words) {
+ register uint32_t *r0 __asm__("r0") = result;
+ register const uint32_t *r1 __asm__("r1") = left;
+ register uint32_t r2 __asm__("r2") = num_words;
+
+ __asm__ volatile (
+ ".syntax unified \n\t"
+ "push {r1, r2} \n\t"
+
+#if (uECC_MIN_WORDS == 5)
+ FAST_SQUARE_ASM_5
+ "pop {r1, r2} \n\t"
+ #if (uECC_MAX_WORDS > 5)
+ "add r1, #20 \n\t"
+ FAST_SQUARE_ASM_5_TO_6
+ #endif
+ #if (uECC_MAX_WORDS > 6)
+ FAST_SQUARE_ASM_6_TO_7
+ #endif
+ #if (uECC_MAX_WORDS > 7)
+ FAST_SQUARE_ASM_7_TO_8
+ #endif
+#elif (uECC_MIN_WORDS == 6)
+ FAST_SQUARE_ASM_6
+ "pop {r1, r2} \n\t"
+ #if (uECC_MAX_WORDS > 6)
+ "add r1, #24 \n\t"
+ FAST_SQUARE_ASM_6_TO_7
+ #endif
+ #if (uECC_MAX_WORDS > 7)
+ FAST_SQUARE_ASM_7_TO_8
+ #endif
+#elif (uECC_MIN_WORDS == 7)
+ FAST_SQUARE_ASM_7
+ "pop {r1, r2} \n\t"
+ #if (uECC_MAX_WORDS > 7)
+ "add r1, #28 \n\t"
+ FAST_SQUARE_ASM_7_TO_8
+ #endif
+#elif (uECC_MIN_WORDS == 8)
+ FAST_SQUARE_ASM_8
+ "pop {r1, r2} \n\t"
+#endif
+
+ "1: \n\t"
+ RESUME_SYNTAX
+ : "+r" (r0), "+r" (r1)
+ : "r" (r2)
+ : "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r14", "cc", "memory"
+ );
+}
+#define asm_square 1
+#endif /* uECC_SQUARE_FUNC */
+
+#endif /* uECC_PLATFORM != uECC_arm_thumb */
+
+#endif /* _UECC_ASM_ARM_FAST_H_ */
diff --git a/asm_arm_mult_square.inc b/asm_arm_mult_square.inc
new file mode 100644
index 0000000..d46af78
--- /dev/null
+++ b/asm_arm_mult_square.inc
@@ -0,0 +1,1808 @@
+/* Copyright 2015, Kenneth MacKay. Licensed under the BSD 2-clause license. */
+
+#ifndef _UECC_ASM_ARM_MULT_SQUARE_H_
+#define _UECC_ASM_ARM_MULT_SQUARE_H_
+
+#define FAST_MULT_ASM_5 \
+ "add r0, 12 \n\t" \
+ "add r2, 12 \n\t" \
+ "ldmia r1!, {r3,r4} \n\t" \
+ "ldmia r2!, {r6,r7} \n\t" \
+ \
+ "umull r11, r12, r3, r6 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r9, r3, r7 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r14, r4, r6 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "umull r12, r14, r4, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adc r10, r14 \n\t" \
+ "stmia r0!, {r9, r10} \n\t" \
+ \
+ "sub r0, 28 \n\t" \
+ "sub r2, 20 \n\t" \
+ "ldmia r2!, {r6,r7,r8} \n\t" \
+ "ldmia r1!, {r5} \n\t" \
+ \
+ "umull r11, r12, r3, r6 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r9, r3, r7 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r14, r4, r6 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r3, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r5, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "ldmia r1!, {r3} \n\t" \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r4, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r5, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r3, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "ldmia r1!, {r4} \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r5, r8 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r3, r7 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r4, r6 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "ldr r9, [r0] \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, #0 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "ldmia r2!, {r6} \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r5, r6 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r3, r8 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r4, r7 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "ldr r10, [r0] \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "ldmia r2!, {r7} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r6 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "ldr r11, [r0] \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r14} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r3, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "umull r14, r9, r4, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adc r11, r9 \n\t" \
+ "stmia r0!, {r10, r11} \n\t"
+
+#define FAST_MULT_ASM_6 \
+ "add r0, 12 \n\t" \
+ "add r2, 12 \n\t" \
+ "ldmia r1!, {r3,r4,r5} \n\t" \
+ "ldmia r2!, {r6,r7,r8} \n\t" \
+ \
+ "umull r11, r12, r3, r6 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r9, r3, r7 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r14, r4, r6 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r3, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r5, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r4, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r5, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "umull r9, r10, r5, r8 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adc r12, r10 \n\t" \
+ "stmia r0!, {r11, r12} \n\t" \
+ \
+ "sub r0, 36 \n\t" \
+ "sub r2, 24 \n\t" \
+ "ldmia r2!, {r6,r7,r8} \n\t" \
+ \
+ "umull r11, r12, r3, r6 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r9, r3, r7 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r14, r4, r6 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r3, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r5, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "ldmia r1!, {r3} \n\t" \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r4, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r5, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r3, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "ldmia r1!, {r4} \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r5, r8 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r3, r7 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r4, r6 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "ldr r9, [r0] \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, #0 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "ldmia r1!, {r5} \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r3, r8 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r4, r7 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r5, r6 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "ldr r10, [r0] \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "ldmia r2!, {r6} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r3, r6 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "ldr r11, [r0] \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r14} \n\t" \
+ \
+ "ldmia r2!, {r7} \n\t" \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r3, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r5, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "ldr r12, [r0] \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "ldmia r2!, {r8} \n\t" \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r3, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r4, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r5, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r4, r8 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r5, r7 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "umull r10, r11, r5, r8 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adc r14, r11 \n\t" \
+ "stmia r0!, {r12, r14} \n\t"
+
+#define FAST_MULT_ASM_7 \
+ "add r0, 24 \n\t" \
+ "add r2, 24 \n\t" \
+ "ldmia r1!, {r3} \n\t" \
+ "ldmia r2!, {r6} \n\t" \
+ \
+ "umull r9, r10, r3, r6 \n\t" \
+ "stmia r0!, {r9, r10} \n\t" \
+ \
+ "sub r0, 20 \n\t" \
+ "sub r2, 16 \n\t" \
+ "ldmia r2!, {r6, r7, r8} \n\t" \
+ "ldmia r1!, {r4, r5} \n\t" \
+ \
+ "umull r9, r10, r3, r6 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "mov r14, #0 \n\t" \
+ "umull r9, r12, r3, r7 \n\t" \
+ "adds r10, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r9, r11, r4, r6 \n\t" \
+ "adds r10, r9 \n\t" \
+ "adcs r12, r11 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r3, r8 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r4, r7 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r5, r6 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "ldmia r1!, {r3} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r6 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "ldr r11, [r0] \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r14} \n\t" \
+ \
+ "ldmia r2!, {r6} \n\t" \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r4, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r5, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r3, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "ldr r12, [r0] \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r5, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r3, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "umull r9, r10, r3, r6 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adc r12, r10 \n\t" \
+ "stmia r0!, {r11, r12} \n\t" \
+ \
+ "sub r0, 44 \n\t" \
+ "sub r1, 16 \n\t" \
+ "sub r2, 28 \n\t" \
+ "ldmia r1!, {r3,r4,r5} \n\t" \
+ "ldmia r2!, {r6,r7,r8} \n\t" \
+ \
+ "umull r9, r10, r3, r6 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "mov r14, #0 \n\t" \
+ "umull r9, r12, r3, r7 \n\t" \
+ "adds r10, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r9, r11, r4, r6 \n\t" \
+ "adds r10, r9 \n\t" \
+ "adcs r12, r11 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r3, r8 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r4, r7 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r5, r6 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "ldmia r1!, {r3} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r6 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "ldr r11, [r0] \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r14} \n\t" \
+ \
+ "ldmia r1!, {r4} \n\t" \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r5, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r3, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "ldr r12, [r0] \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "ldmia r1!, {r5} \n\t" \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r3, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r4, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r5, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "ldmia r1!, {r3} \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r4, r8 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r5, r7 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r3, r6 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "ldr r9, [r0] \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, #0 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "ldmia r2!, {r6} \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r4, r6 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r5, r8 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r3, r7 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "ldr r10, [r0] \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "ldmia r2!, {r7} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r4, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r6 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "ldr r11, [r0] \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r14} \n\t" \
+ \
+ "ldmia r2!, {r8} \n\t" \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r4, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r5, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r3, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "ldr r12, [r0] \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "ldmia r2!, {r6} \n\t" \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r4, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r5, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r3, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r5, r6 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r3, r8 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "umull r10, r11, r3, r6 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adc r14, r11 \n\t" \
+ "stmia r0!, {r12, r14} \n\t"
+
+#define FAST_MULT_ASM_8 \
+ "add r0, 24 \n\t" \
+ "add r2, 24 \n\t" \
+ "ldmia r1!, {r3,r4} \n\t" \
+ "ldmia r2!, {r6,r7} \n\t" \
+ \
+ "umull r11, r12, r3, r6 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r9, r3, r7 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r14, r4, r6 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "umull r12, r14, r4, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adc r10, r14 \n\t" \
+ "stmia r0!, {r9, r10} \n\t" \
+ \
+ "sub r0, 28 \n\t" \
+ "sub r2, 20 \n\t" \
+ "ldmia r2!, {r6,r7,r8} \n\t" \
+ "ldmia r1!, {r5} \n\t" \
+ \
+ "umull r11, r12, r3, r6 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r9, r3, r7 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r14, r4, r6 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r3, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r5, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "ldmia r1!, {r3} \n\t" \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r4, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r5, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r3, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "ldmia r1!, {r4} \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r5, r8 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r3, r7 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r4, r6 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "ldr r9, [r0] \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, #0 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "ldmia r2!, {r6} \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r5, r6 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r3, r8 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r4, r7 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "ldr r10, [r0] \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "ldmia r2!, {r7} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r6 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "ldr r11, [r0] \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r14} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r3, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "umull r14, r9, r4, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adc r11, r9 \n\t" \
+ "stmia r0!, {r10, r11} \n\t" \
+ \
+ "sub r0, 52 \n\t" \
+ "sub r1, 20 \n\t" \
+ "sub r2, 32 \n\t" \
+ "ldmia r1!, {r3,r4,r5} \n\t" \
+ "ldmia r2!, {r6,r7,r8} \n\t" \
+ \
+ "umull r11, r12, r3, r6 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r9, r3, r7 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r11, r14, r4, r6 \n\t" \
+ "adds r12, r11 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r3, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r5, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "ldmia r1!, {r3} \n\t" \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r4, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r5, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r3, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "ldmia r1!, {r4} \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r5, r8 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r3, r7 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r4, r6 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "ldr r9, [r0] \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, #0 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "ldmia r1!, {r5} \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r3, r8 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r4, r7 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r5, r6 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "ldr r10, [r0] \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "ldmia r1!, {r3} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r4, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r5, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r6 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "ldr r11, [r0] \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r14} \n\t" \
+ \
+ "ldmia r1!, {r4} \n\t" \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r5, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r3, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "ldr r12, [r0] \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "ldmia r2!, {r6} \n\t" \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r5, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r3, r8 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r4, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "ldmia r2!, {r7} \n\t" \
+ "mov r14, #0 \n\t" \
+ "umull r9, r10, r5, r7 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r3, r6 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "umull r9, r10, r4, r8 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r14, #0 \n\t" \
+ "ldr r9, [r0] \n\t" \
+ "adds r11, r9 \n\t" \
+ "adcs r12, #0 \n\t" \
+ "adc r14, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "ldmia r2!, {r8} \n\t" \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r5, r8 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r3, r7 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "umull r10, r11, r4, r6 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "ldr r10, [r0] \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r14, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "ldmia r2!, {r6} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r5, r6 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r8 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r4, r7 \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "ldr r11, [r0] \n\t" \
+ "adds r14, r11 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r14} \n\t" \
+ \
+ "ldmia r2!, {r7} \n\t" \
+ "mov r11, #0 \n\t" \
+ "umull r12, r14, r5, r7 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r3, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "umull r12, r14, r4, r8 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, r14 \n\t" \
+ "adc r11, #0 \n\t" \
+ "ldr r12, [r0] \n\t" \
+ "adds r9, r12 \n\t" \
+ "adcs r10, #0 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r14, r9, r3, r7 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r14, r9, r4, r6 \n\t" \
+ "adds r10, r14 \n\t" \
+ "adcs r11, r9 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r10} \n\t" \
+ \
+ "umull r9, r10, r4, r7 \n\t" \
+ "adds r11, r9 \n\t" \
+ "adc r12, r10 \n\t" \
+ "stmia r0!, {r11, r12} \n\t"
+
+#define FAST_SQUARE_ASM_5 \
+ "ldmia r1!, {r2,r3,r4,r5,r6} \n\t" \
+ \
+ "umull r11, r12, r2, r2 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r2, r3 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r8, r11, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r8, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r2, r4 \n\t" \
+ "adds r11, r11 \n\t" \
+ "adcs r12, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r3 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r2, r5 \n\t" \
+ "umull r1, r14, r3, r4 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r14 \n\t" \
+ "adc r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r2, r6 \n\t" \
+ "umull r1, r14, r3, r5 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "umull r1, r14, r4, r4 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r3, r6 \n\t" \
+ "umull r1, r14, r4, r5 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r14 \n\t" \
+ "adc r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r8, #0 \n\t" \
+ "umull r1, r10, r4, r6 \n\t" \
+ "adds r1, r1 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "adds r11, r1 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "umull r1, r10, r5, r5 \n\t" \
+ "adds r11, r1 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r1, r10, r5, r6 \n\t" \
+ "adds r1, r1 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adc r11, #0 \n\t" \
+ "adds r12, r1 \n\t" \
+ "adcs r8, r10 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "umull r1, r10, r6, r6 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "stmia r0!, {r8, r11} \n\t"
+
+#define FAST_SQUARE_ASM_6 \
+ "ldmia r1!, {r2,r3,r4,r5,r6,r7} \n\t" \
+ \
+ "umull r11, r12, r2, r2 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r2, r3 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r8, r11, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r8, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r2, r4 \n\t" \
+ "adds r11, r11 \n\t" \
+ "adcs r12, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r3 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r2, r5 \n\t" \
+ "umull r1, r14, r3, r4 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r14 \n\t" \
+ "adc r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r2, r6 \n\t" \
+ "umull r1, r14, r3, r5 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "umull r1, r14, r4, r4 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r2, r7 \n\t" \
+ "umull r1, r14, r3, r6 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r14 \n\t" \
+ "adc r12, #0 \n\t" \
+ "umull r1, r14, r4, r5 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r14 \n\t" \
+ "adc r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r3, r7 \n\t" \
+ "umull r1, r14, r4, r6 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "umull r1, r14, r5, r5 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r9, r14 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r4, r7 \n\t" \
+ "umull r1, r14, r5, r6 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r14 \n\t" \
+ "adc r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r8, #0 \n\t" \
+ "umull r1, r10, r5, r7 \n\t" \
+ "adds r1, r1 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "adds r11, r1 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "umull r1, r10, r6, r6 \n\t" \
+ "adds r11, r1 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r1, r10, r6, r7 \n\t" \
+ "adds r1, r1 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adc r11, #0 \n\t" \
+ "adds r12, r1 \n\t" \
+ "adcs r8, r10 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "umull r1, r10, r7, r7 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "stmia r0!, {r8, r11} \n\t"
+
+#define FAST_SQUARE_ASM_7 \
+ "ldmia r1!, {r2} \n\t" \
+ "add r1, 20 \n\t" \
+ "ldmia r1!, {r5} \n\t" \
+ "add r0, 24 \n\t" \
+ "umull r8, r9, r2, r5 \n\t" \
+ "stmia r0!, {r8, r9} \n\t" \
+ "sub r0, 32 \n\t" \
+ "sub r1, 28 \n\t" \
+ \
+ "ldmia r1!, {r2, r3, r4, r5, r6, r7} \n\t" \
+ \
+ "umull r11, r12, r2, r2 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r2, r3 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r8, r11, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r8, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r2, r4 \n\t" \
+ "adds r11, r11 \n\t" \
+ "adcs r12, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r3 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r2, r5 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r3, r4 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r2, r6 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r3, r5 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r4, r4 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r2, r7 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r3, r6 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r4, r5 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "ldmia r1!, {r2} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r3, r7 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r4, r6 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r8, r14 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r5, r5 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r3, r2 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r4, r7 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r5, r6 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r8, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r4, r2 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r5, r7 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r6, r6 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r5, r2 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r6, r7 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r8, #0 \n\t" \
+ "umull r1, r10, r6, r2 \n\t" \
+ "adds r1, r1 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "adds r11, r1 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "umull r1, r10, r7, r7 \n\t" \
+ "adds r11, r1 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r1, r10, r7, r2 \n\t" \
+ "adds r1, r1 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adc r11, #0 \n\t" \
+ "adds r12, r1 \n\t" \
+ "adcs r8, r10 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "umull r1, r10, r2, r2 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "stmia r0!, {r8, r11} \n\t"
+
+#define FAST_SQUARE_ASM_8 \
+ "ldmia r1!, {r2, r3} \n\t" \
+ "add r1, 16 \n\t" \
+ "ldmia r1!, {r5, r6} \n\t" \
+ "add r0, 24 \n\t" \
+ \
+ "umull r8, r9, r2, r5 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "umull r12, r10, r2, r6 \n\t" \
+ "adds r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r9} \n\t" \
+ \
+ "umull r8, r9, r3, r6 \n\t" \
+ "adds r10, r8 \n\t" \
+ "adc r11, r9, #0 \n\t" \
+ "stmia r0!, {r10, r11} \n\t" \
+ \
+ "sub r0, 40 \n\t" \
+ "sub r1, 32 \n\t" \
+ "ldmia r1!, {r2,r3,r4,r5,r6,r7} \n\t" \
+ \
+ "umull r11, r12, r2, r2 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r9, #0 \n\t" \
+ "umull r10, r11, r2, r3 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r8, r11, #0 \n\t" \
+ "adc r9, #0 \n\t" \
+ "adds r12, r10 \n\t" \
+ "adcs r8, r11 \n\t" \
+ "adc r9, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r11, r12, r2, r4 \n\t" \
+ "adds r11, r11 \n\t" \
+ "adcs r12, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "umull r11, r12, r3, r3 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r2, r5 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r3, r4 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r2, r6 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r3, r5 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r4, r4 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r2, r7 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r3, r6 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r4, r5 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "ldmia r1!, {r2} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r3, r7 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r4, r6 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r8, r14 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r5, r5 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r3, r2 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r4, r7 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r5, r6 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r8, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "ldmia r1!, {r3} \n\t" \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r4, r2 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r5, r7 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r8, r14 \n\t" \
+ "adcs r9, #0 \n\t" \
+ "adc r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r6, r6 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r4, r3 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r5, r2 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r6, r7 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "ldr r14, [r0] \n\t" \
+ "adds r8, r14 \n\t" \
+ "adcs r11, #0 \n\t" \
+ "adc r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r10, #0 \n\t" \
+ "umull r8, r9, r5, r3 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r6, r2 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r9, r9 \n\t" \
+ "adc r10, r10 \n\t" \
+ "mov r14, r9 \n\t" \
+ "umlal r8, r9, r7, r7 \n\t" \
+ "cmp r14, r9 \n\t" \
+ "it hi \n\t" \
+ "adchi r10, #0 \n\t" \
+ "adds r8, r11 \n\t" \
+ "adcs r9, r12 \n\t" \
+ "adc r10, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r12, #0 \n\t" \
+ "umull r8, r11, r6, r3 \n\t" \
+ "mov r14, r11 \n\t" \
+ "umlal r8, r11, r7, r2 \n\t" \
+ "cmp r14, r11 \n\t" \
+ "it hi \n\t" \
+ "adchi r12, #0 \n\t" \
+ "adds r8, r8 \n\t" \
+ "adcs r11, r11 \n\t" \
+ "adc r12, r12 \n\t" \
+ "adds r8, r9 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "adc r12, #0 \n\t" \
+ "stmia r0!, {r8} \n\t" \
+ \
+ "mov r8, #0 \n\t" \
+ "umull r1, r10, r7, r3 \n\t" \
+ "adds r1, r1 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "adds r11, r1 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "umull r1, r10, r2, r2 \n\t" \
+ "adds r11, r1 \n\t" \
+ "adcs r12, r10 \n\t" \
+ "adc r8, #0 \n\t" \
+ "stmia r0!, {r11} \n\t" \
+ \
+ "mov r11, #0 \n\t" \
+ "umull r1, r10, r2, r3 \n\t" \
+ "adds r1, r1 \n\t" \
+ "adcs r10, r10 \n\t" \
+ "adc r11, #0 \n\t" \
+ "adds r12, r1 \n\t" \
+ "adcs r8, r10 \n\t" \
+ "adc r11, #0 \n\t" \
+ "stmia r0!, {r12} \n\t" \
+ \
+ "umull r1, r10, r3, r3 \n\t" \
+ "adds r8, r1 \n\t" \
+ "adcs r11, r10 \n\t" \
+ "stmia r0!, {r8, r11} \n\t"
+
+#endif /* _UECC_ASM_ARM_MULT_SQUARE_H_ */
diff --git a/asm_arm_small.inc b/asm_arm_small.inc
index 2b1b0a7..8e285fa 100644
--- a/asm_arm_small.inc
+++ b/asm_arm_small.inc
@@ -1,3 +1,8 @@
+/* Copyright 2015, Kenneth MacKay. Licensed under the BSD 2-clause license. */
+
+#ifndef _UECC_ASM_ARM_SMALL_H_
+#define _UECC_ASM_ARM_SMALL_H_
+
#if (uECC_PLATFORM == uECC_arm_thumb)
#define REG_RW "+l"
#define REG_WRITE "=l"
@@ -12,6 +17,7 @@
#define RESUME_SYNTAX ".syntax divided \n\t"
#endif
+#if !asm_add
static uECC_word_t vli_add(uECC_word_t *result,
const uECC_word_t *left,
const uECC_word_t *right,
@@ -41,11 +47,13 @@
return carry;
}
#define asm_add 1
+#endif
-static uint32_t vli_sub(uECC_word_t *result,
- const uECC_word_t *left,
- const uECC_word_t *right,
- wordcount_t num_words) {
+#if !asm_sub
+static uECC_word_t vli_sub(uECC_word_t *result,
+ const uECC_word_t *left,
+ const uECC_word_t *right,
+ wordcount_t num_words) {
uint32_t carry = 1; /* carry = 1 initially (means don't borrow) */
uint32_t left_word;
uint32_t right_word;
@@ -71,7 +79,9 @@
return !carry;
}
#define asm_sub 1
+#endif
+#if !asm_mult
static void vli_mult(uECC_word_t *result,
const uECC_word_t *left,
const uECC_word_t *right,
@@ -229,8 +239,10 @@
#endif
}
#define asm_mult 1
+#endif
#if uECC_SQUARE_FUNC
+#if !asm_square
static void vli_square(uECC_word_t *result, const uECC_word_t *left, wordcount_t num_words) {
#if (uECC_PLATFORM != uECC_arm_thumb)
uint32_t c0 = 0;
@@ -404,4 +416,7 @@
#endif
}
#define asm_square 1
+#endif
#endif /* uECC_SQUARE_FUNC */
+
+#endif /* _UECC_ASM_ARM_SMALL_H_ */
diff --git a/curve-specific.inc b/curve-specific.inc
index 57e528d..23a2395 100644
--- a/curve-specific.inc
+++ b/curve-specific.inc
@@ -1,3 +1,8 @@
+/* Copyright 2015, Kenneth MacKay. Licensed under the BSD 2-clause license. */
+
+#ifndef _UECC_CURVE_SPECIFIC_H_
+#define _UECC_CURVE_SPECIFIC_H_
+
#define num_bytes_secp160r1 20
#define num_bytes_secp192r1 24
#define num_bytes_secp224r1 28
@@ -46,26 +51,85 @@
#endif /* uECC_WORD_SIZE */
-static void double_jacobian_secp256k1(uECC_word_t * X1,
- uECC_word_t * Y1,
- uECC_word_t * Z1,
- uECC_Curve curve);
static void double_jacobian_default(uECC_word_t * X1,
uECC_word_t * Y1,
uECC_word_t * Z1,
- uECC_Curve curve);
+ uECC_Curve curve) {
+ /* t1 = X, t2 = Y, t3 = Z */
+ uECC_word_t t4[uECC_MAX_WORDS];
+ uECC_word_t t5[uECC_MAX_WORDS];
-static void mod_sqrt_default(uECC_word_t *a, uECC_Curve curve);
-static void mod_sqrt_secp224r1(uECC_word_t *a, uECC_Curve curve);
+ if (vli_isZero(Z1, curve->num_words)) {
+ return;
+ }
-static void x_side_default(uECC_word_t *result, const uECC_word_t *x, uECC_Curve curve);
-static void x_side_secp256k1(uECC_word_t *result, const uECC_word_t *x, uECC_Curve curve);
+ vli_modSquare_fast(t4, Y1, curve); /* t4 = y1^2 */
+ vli_modMult_fast(t5, X1, t4, curve); /* t5 = x1*y1^2 = A */
+ vli_modSquare_fast(t4, t4, curve); /* t4 = y1^4 */
+ vli_modMult_fast(Y1, Y1, Z1, curve); /* t2 = y1*z1 = z3 */
+ vli_modSquare_fast(Z1, Z1, curve); /* t3 = z1^2 */
+ vli_modAdd(X1, X1, Z1, curve->p, curve->num_words); /* t1 = x1 + z1^2 */
+ vli_modAdd(Z1, Z1, Z1, curve->p, curve->num_words); /* t3 = 2*z1^2 */
+ vli_modSub(Z1, X1, Z1, curve->p, curve->num_words); /* t3 = x1 - z1^2 */
+ vli_modMult_fast(X1, X1, Z1, curve); /* t1 = x1^2 - z1^4 */
+
+ vli_modAdd(Z1, X1, X1, curve->p, curve->num_words); /* t3 = 2*(x1^2 - z1^4) */
+ vli_modAdd(X1, X1, Z1, curve->p, curve->num_words); /* t1 = 3*(x1^2 - z1^4) */
+ if (vli_testBit(X1, 0)) {
+ uECC_word_t l_carry = vli_add(X1, X1, curve->p, curve->num_words);
+ vli_rshift1(X1, curve->num_words);
+ X1[curve->num_words - 1] |= l_carry << (uECC_WORD_BITS - 1);
+ } else {
+ vli_rshift1(X1, curve->num_words);
+ }
+ /* t1 = 3/2*(x1^2 - z1^4) = B */
+
+ vli_modSquare_fast(Z1, X1, curve); /* t3 = B^2 */
+ vli_modSub(Z1, Z1, t5, curve->p, curve->num_words); /* t3 = B^2 - A */
+ vli_modSub(Z1, Z1, t5, curve->p, curve->num_words); /* t3 = B^2 - 2A = x3 */
+ vli_modSub(t5, t5, Z1, curve->p, curve->num_words); /* t5 = A - x3 */
+ vli_modMult_fast(X1, X1, t5, curve); /* t1 = B * (A - x3) */
+ vli_modSub(t4, X1, t4, curve->p, curve->num_words); /* t4 = B * (A - x3) - y1^4 = y3 */
+
+ vli_set(X1, Z1, curve->num_words);
+ vli_set(Z1, Y1, curve->num_words);
+ vli_set(Y1, t4, curve->num_words);
+}
+
+/* Compute a = sqrt(a) (mod curve_p). */
+static void mod_sqrt_default(uECC_word_t *a, uECC_Curve curve) {
+ bitcount_t i;
+ uECC_word_t p1[uECC_MAX_WORDS] = {1};
+ uECC_word_t l_result[uECC_MAX_WORDS] = {1};
+
+ /* When curve->p == 3 (mod 4), we can compute
+ sqrt(a) = a^((curve->p + 1) / 4) (mod curve->p). */
+ vli_add(p1, curve->p, p1, curve->num_words); /* p1 = curve_p + 1 */
+ for (i = vli_numBits(p1, curve->num_words) - 1; i > 1; --i) {
+ vli_modSquare_fast(l_result, l_result, curve);
+ if (vli_testBit(p1, i)) {
+ vli_modMult_fast(l_result, l_result, a, curve);
+ }
+ }
+ vli_set(a, l_result, curve->num_words);
+}
+
+/* Computes result = x^3 + ax + b. result must not overlap x. */
+static void x_side_default(uECC_word_t *result, const uECC_word_t *x, uECC_Curve curve) {
+ uECC_word_t _3[uECC_MAX_WORDS] = {3}; /* -a = 3 */
+
+ vli_modSquare_fast(result, x, curve); /* r = x^2 */
+ vli_modSub(result, result, _3, curve->p, curve->num_words); /* r = x^2 - 3 */
+ vli_modMult_fast(result, result, x, curve); /* r = x^3 - 3x */
+ vli_modAdd(result, result, curve->b, curve->p, curve->num_words); /* r = x^3 - 3x + b */
+}
+
+#if uECC_SUPPORTS_secp160r1
+
+#if (uECC_OPTIMIZATION_LEVEL > 0)
static void vli_mmod_fast_secp160r1(uECC_word_t *result, uECC_word_t *product);
-static void vli_mmod_fast_secp192r1(uECC_word_t *result, uECC_word_t *product);
-static void vli_mmod_fast_secp224r1(uECC_word_t *result, uECC_word_t *product);
-static void vli_mmod_fast_secp256r1(uECC_word_t *result, uECC_word_t *product);
-static void vli_mmod_fast_secp256k1(uECC_word_t *result, uECC_word_t *product);
+#endif
static const struct uECC_Curve_t curve_secp160r1 = {
num_words_secp160r1,
@@ -90,9 +154,126 @@
&double_jacobian_default,
&mod_sqrt_default,
&x_side_default,
+#if (uECC_OPTIMIZATION_LEVEL > 0)
&vli_mmod_fast_secp160r1
+#endif
};
+uECC_Curve uECC_secp160r1(void) { return &curve_secp160r1; }
+
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+/* Computes result = product % curve_p
+ see http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf page 354
+
+ Note that this only works if log2(omega) < log2(p) / 2 */
+static void omega_mult_secp160r1(uECC_word_t *result, const uECC_word_t *right);
+#if uECC_WORD_SIZE == 8
+static void vli_mmod_fast_secp160r1(uECC_word_t *result, uECC_word_t *product) {
+ uECC_word_t tmp[2 * num_words_secp160r1];
+ uECC_word_t copy;
+
+ vli_clear(tmp, num_words_secp160r1);
+ vli_clear(tmp + num_words_secp160r1, num_words_secp160r1);
+
+ omega_mult_secp160r1(tmp, product + num_words_secp160r1 - 1); /* (Rq, q) = q * c */
+
+ product[num_words_secp160r1 - 1] &= 0xffffffff;
+ copy = tmp[num_words_secp160r1 - 1];
+ tmp[num_words_secp160r1 - 1] &= 0xffffffff;
+ vli_add(result, product, tmp, num_words_secp160r1); /* (C, r) = r + q */
+ vli_clear(product, num_words_secp160r1);
+ tmp[num_words_secp160r1 - 1] = copy;
+ omega_mult_secp160r1(product, tmp + num_words_secp160r1 - 1); /* Rq*c */
+ vli_add(result, result, product, num_words_secp160r1); /* (C1, r) = r + Rq*c */
+
+ while (vli_cmp(result, curve_secp160r1.p, num_words_secp160r1) > 0) {
+ vli_sub(result, result, curve_secp160r1.p, num_words_secp160r1);
+ }
+}
+
+static void omega_mult_secp160r1(uint64_t *result, const uint64_t *right) {
+ uint32_t carry;
+ unsigned i;
+
+ /* Multiply by (2^31 + 1). */
+ carry = 0;
+ for (i = 0; i < num_words_secp160r1; ++i) {
+ uint64_t tmp = (right[i] >> 32) | (right[i + 1] << 32);
+ result[i] = (tmp << 31) + tmp + carry;
+ carry = (tmp >> 33) + (result[i] < tmp || (carry && result[i] == tmp));
+ }
+ result[i] = carry;
+}
+#else
+static void vli_mmod_fast_secp160r1(uECC_word_t *result, uECC_word_t *product) {
+ uECC_word_t tmp[2 * num_words_secp160r1];
+ uECC_word_t carry;
+
+ vli_clear(tmp, num_words_secp160r1);
+ vli_clear(tmp + num_words_secp160r1, num_words_secp160r1);
+
+ omega_mult_secp160r1(tmp, product + num_words_secp160r1); /* (Rq, q) = q * c */
+
+ carry = vli_add(result, product, tmp, num_words_secp160r1); /* (C, r) = r + q */
+ vli_clear(product, num_words_secp160r1);
+ omega_mult_secp160r1(product, tmp + num_words_secp160r1); /* Rq*c */
+ carry += vli_add(result, result, product, num_words_secp160r1); /* (C1, r) = r + Rq*c */
+
+ while (carry > 0) {
+ --carry;
+ vli_sub(result, result, curve_secp160r1.p, num_words_secp160r1);
+ }
+ if (vli_cmp(result, curve_secp160r1.p, num_words_secp160r1) > 0) {
+ vli_sub(result, result, curve_secp160r1.p, num_words_secp160r1);
+ }
+}
+#endif
+
+#if uECC_WORD_SIZE == 1
+static void omega_mult_secp160r1(uint8_t *result, const uint8_t *right) {
+ uint8_t carry;
+ uint8_t i;
+
+ /* Multiply by (2^31 + 1). */
+ vli_set(result + 4, right, num_words_secp160r1); /* 2^32 */
+ vli_rshift1(result + 4, num_words_secp160r1); /* 2^31 */
+ result[3] = right[0] << 7; /* get last bit from shift */
+
+ carry = vli_add(result, result, right, num_words_secp160r1); /* 2^31 + 1 */
+ for (i = num_words_secp160r1; carry; ++i) {
+ uint16_t sum = (uint16_t)result[i] + carry;
+ result[i] = (uint8_t)sum;
+ carry = sum >> 8;
+ }
+}
+#elif uECC_WORD_SIZE == 4
+static void omega_mult_secp160r1(uint32_t *result, const uint32_t *right) {
+ uint32_t carry;
+ unsigned i;
+
+ /* Multiply by (2^31 + 1). */
+ vli_set(result + 1, right, num_words_secp160r1); /* 2^32 */
+ vli_rshift1(result + 1, num_words_secp160r1); /* 2^31 */
+ result[0] = right[0] << 31; /* get last bit from shift */
+
+ carry = vli_add(result, result, right, num_words_secp160r1); /* 2^31 + 1 */
+ for (i = num_words_secp160r1; carry; ++i) {
+ uint64_t sum = (uint64_t)result[i] + carry;
+ result[i] = (uint32_t)sum;
+ carry = sum >> 32;
+ }
+}
+#endif /* uECC_WORD_SIZE */
+#endif /* (uECC_OPTIMIZATION_LEVEL > 0) */
+
+#endif /* uECC_SUPPORTS_secp160r1 */
+
+#if uECC_SUPPORTS_secp192r1
+
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+static void vli_mmod_fast_secp192r1(uECC_word_t *result, uECC_word_t *product);
+#endif
+
static const struct uECC_Curve_t curve_secp192r1 = {
num_words_secp192r1,
num_words_secp192r1,
@@ -116,9 +297,109 @@
&double_jacobian_default,
&mod_sqrt_default,
&x_side_default,
+#if (uECC_OPTIMIZATION_LEVEL > 0)
&vli_mmod_fast_secp192r1
+#endif
};
+uECC_Curve uECC_secp192r1(void) { return &curve_secp192r1; }
+
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+/* Computes result = product % curve_p.
+ See algorithm 5 and 6 from http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf */
+#if uECC_WORD_SIZE == 1
+static void vli_mmod_fast_secp192r1(uint8_t *result, uint8_t *product) {
+ uint8_t tmp[num_words_secp192r1];
+ uint8_t carry;
+
+ vli_set(result, product, num_words_secp192r1);
+
+ vli_set(tmp, &product[24], num_words_secp192r1);
+ carry = vli_add(result, result, tmp, num_words_secp192r1);
+
+ tmp[0] = tmp[1] = tmp[2] = tmp[3] = tmp[4] = tmp[5] = tmp[6] = tmp[7] = 0;
+ tmp[8] = product[24]; tmp[9] = product[25]; tmp[10] = product[26]; tmp[11] = product[27];
+ tmp[12] = product[28]; tmp[13] = product[29]; tmp[14] = product[30]; tmp[15] = product[31];
+ tmp[16] = product[32]; tmp[17] = product[33]; tmp[18] = product[34]; tmp[19] = product[35];
+ tmp[20] = product[36]; tmp[21] = product[37]; tmp[22] = product[38]; tmp[23] = product[39];
+ carry += vli_add(result, result, tmp, num_words_secp192r1);
+
+ tmp[0] = tmp[8] = product[40];
+ tmp[1] = tmp[9] = product[41];
+ tmp[2] = tmp[10] = product[42];
+ tmp[3] = tmp[11] = product[43];
+ tmp[4] = tmp[12] = product[44];
+ tmp[5] = tmp[13] = product[45];
+ tmp[6] = tmp[14] = product[46];
+ tmp[7] = tmp[15] = product[47];
+ tmp[16] = tmp[17] = tmp[18] = tmp[19] = tmp[20] = tmp[21] = tmp[22] = tmp[23] = 0;
+ carry += vli_add(result, result, tmp, num_words_secp192r1);
+
+ while (carry || vli_cmp(curve_secp192r1.p, result, num_words_secp192r1) != 1) {
+ carry -= vli_sub(result, result, curve_secp192r1.p, num_words_secp192r1);
+ }
+}
+#elif uECC_WORD_SIZE == 4
+static void vli_mmod_fast_secp192r1(uint32_t *result, uint32_t *product) {
+ uint32_t tmp[num_words_secp192r1];
+ int carry;
+
+ vli_set(result, product, num_words_secp192r1);
+
+ vli_set(tmp, &product[6], num_words_secp192r1);
+ carry = vli_add(result, result, tmp, num_words_secp192r1);
+
+ tmp[0] = tmp[1] = 0;
+ tmp[2] = product[6];
+ tmp[3] = product[7];
+ tmp[4] = product[8];
+ tmp[5] = product[9];
+ carry += vli_add(result, result, tmp, num_words_secp192r1);
+
+ tmp[0] = tmp[2] = product[10];
+ tmp[1] = tmp[3] = product[11];
+ tmp[4] = tmp[5] = 0;
+ carry += vli_add(result, result, tmp, num_words_secp192r1);
+
+ while (carry || vli_cmp(curve_secp192r1.p, result, num_words_secp192r1) != 1) {
+ carry -= vli_sub(result, result, curve_secp192r1.p, num_words_secp192r1);
+ }
+}
+#else
+static void vli_mmod_fast_secp192r1(uint64_t *result, uint64_t *product) {
+ uint64_t tmp[num_words_secp192r1];
+ int carry;
+
+ vli_set(result, product, num_words_secp192r1);
+
+ vli_set(tmp, &product[3], num_words_secp192r1);
+ carry = vli_add(result, result, tmp, num_words_secp192r1);
+
+ tmp[0] = 0;
+ tmp[1] = product[3];
+ tmp[2] = product[4];
+ carry += vli_add(result, result, tmp, num_words_secp192r1);
+
+ tmp[0] = tmp[1] = product[5];
+ tmp[2] = 0;
+ carry += vli_add(result, result, tmp, num_words_secp192r1);
+
+ while (carry || vli_cmp(curve_secp192r1.p, result, num_words_secp192r1) != 1) {
+ carry -= vli_sub(result, result, curve_secp192r1.p, num_words_secp192r1);
+ }
+}
+#endif /* uECC_WORD_SIZE */
+#endif /* (uECC_OPTIMIZATION_LEVEL > 0) */
+
+#endif /* uECC_SUPPORTS_secp192r1 */
+
+#if uECC_SUPPORTS_secp224r1
+
+static void mod_sqrt_secp224r1(uECC_word_t *a, uECC_Curve curve);
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+static void vli_mmod_fast_secp224r1(uECC_word_t *result, uECC_word_t *product);
+#endif
+
static const struct uECC_Curve_t curve_secp224r1 = {
num_words_secp224r1,
num_words_secp224r1,
@@ -147,179 +428,13 @@
&double_jacobian_default,
&mod_sqrt_secp224r1,
&x_side_default,
+#if (uECC_OPTIMIZATION_LEVEL > 0)
&vli_mmod_fast_secp224r1
+#endif
};
-static const struct uECC_Curve_t curve_secp256r1 = {
- num_words_secp256r1,
- num_words_secp256r1,
- num_bytes_secp256r1,
- { BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
- BYTES_TO_WORDS_8(FF, FF, FF, FF, 00, 00, 00, 00),
- BYTES_TO_WORDS_8(00, 00, 00, 00, 00, 00, 00, 00),
- BYTES_TO_WORDS_8(01, 00, 00, 00, FF, FF, FF, FF) },
- { BYTES_TO_WORDS_8(51, 25, 63, FC, C2, CA, B9, F3),
- BYTES_TO_WORDS_8(84, 9E, 17, A7, AD, FA, E6, BC),
- BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
- BYTES_TO_WORDS_8(00, 00, 00, 00, FF, FF, FF, FF) },
- { BYTES_TO_WORDS_8(96, C2, 98, D8, 45, 39, A1, F4),
- BYTES_TO_WORDS_8(A0, 33, EB, 2D, 81, 7D, 03, 77),
- BYTES_TO_WORDS_8(F2, 40, A4, 63, E5, E6, BC, F8),
- BYTES_TO_WORDS_8(47, 42, 2C, E1, F2, D1, 17, 6B),
-
- BYTES_TO_WORDS_8(F5, 51, BF, 37, 68, 40, B6, CB),
- BYTES_TO_WORDS_8(CE, 5E, 31, 6B, 57, 33, CE, 2B),
- BYTES_TO_WORDS_8(16, 9E, 0F, 7C, 4A, EB, E7, 8E),
- BYTES_TO_WORDS_8(9B, 7F, 1A, FE, E2, 42, E3, 4F) },
- { BYTES_TO_WORDS_8(4B, 60, D2, 27, 3E, 3C, CE, 3B),
- BYTES_TO_WORDS_8(F6, B0, 53, CC, B0, 06, 1D, 65),
- BYTES_TO_WORDS_8(BC, 86, 98, 76, 55, BD, EB, B3),
- BYTES_TO_WORDS_8(E7, 93, 3A, AA, D8, 35, C6, 5A) },
- &double_jacobian_default,
- &mod_sqrt_default,
- &x_side_default,
- &vli_mmod_fast_secp256r1
-};
-
-static const struct uECC_Curve_t curve_secp256k1 = {
- num_words_secp256k1,
- num_words_secp256k1,
- num_bytes_secp256k1,
- { BYTES_TO_WORDS_8(2F, FC, FF, FF, FE, FF, FF, FF),
- BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
- BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
- BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF) },
- { BYTES_TO_WORDS_8(41, 41, 36, D0, 8C, 5E, D2, BF),
- BYTES_TO_WORDS_8(3B, A0, 48, AF, E6, DC, AE, BA),
- BYTES_TO_WORDS_8(FE, FF, FF, FF, FF, FF, FF, FF),
- BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF) },
- { BYTES_TO_WORDS_8(98, 17, F8, 16, 5B, 81, F2, 59),
- BYTES_TO_WORDS_8(D9, 28, CE, 2D, DB, FC, 9B, 02),
- BYTES_TO_WORDS_8(07, 0B, 87, CE, 95, 62, A0, 55),
- BYTES_TO_WORDS_8(AC, BB, DC, F9, 7E, 66, BE, 79),
-
- BYTES_TO_WORDS_8(B8, D4, 10, FB, 8F, D0, 47, 9C),
- BYTES_TO_WORDS_8(19, 54, 85, A6, 48, B4, 17, FD),
- BYTES_TO_WORDS_8(A8, 08, 11, 0E, FC, FB, A4, 5D),
- BYTES_TO_WORDS_8(65, C4, A3, 26, 77, DA, 3A, 48) },
- { BYTES_TO_WORDS_8(07, 00, 00, 00, 00, 00, 00, 00),
- BYTES_TO_WORDS_8(00, 00, 00, 00, 00, 00, 00, 00),
- BYTES_TO_WORDS_8(00, 00, 00, 00, 00, 00, 00, 00),
- BYTES_TO_WORDS_8(00, 00, 00, 00, 00, 00, 00, 00) },
- &double_jacobian_secp256k1,
- &mod_sqrt_default,
- &x_side_secp256k1,
- &vli_mmod_fast_secp256k1
-};
-
-uECC_Curve uECC_secp160r1(void) { return &curve_secp160r1; }
-uECC_Curve uECC_secp192r1(void) { return &curve_secp192r1; }
uECC_Curve uECC_secp224r1(void) { return &curve_secp224r1; }
-uECC_Curve uECC_secp256r1(void) { return &curve_secp256r1; }
-uECC_Curve uECC_secp256k1(void) { return &curve_secp256k1; }
-/* Double in place */
-static void double_jacobian_secp256k1(uECC_word_t * X1,
- uECC_word_t * Y1,
- uECC_word_t * Z1,
- uECC_Curve curve) {
- /* t1 = X, t2 = Y, t3 = Z */
- uECC_word_t t4[num_words_secp256k1];
- uECC_word_t t5[num_words_secp256k1];
-
- if (vli_isZero(Z1, num_words_secp256k1)) {
- return;
- }
-
- vli_modSquare_fast(t5, Y1, curve); /* t5 = y1^2 */
- vli_modMult_fast(t4, X1, t5, curve); /* t4 = x1*y1^2 = A */
- vli_modSquare_fast(X1, X1, curve); /* t1 = x1^2 */
- vli_modSquare_fast(t5, t5, curve); /* t5 = y1^4 */
- vli_modMult_fast(Z1, Y1, Z1, curve); /* t3 = y1*z1 = z3 */
-
- vli_modAdd(Y1, X1, X1, curve->p, num_words_secp256k1); /* t2 = 2*x1^2 */
- vli_modAdd(Y1, Y1, X1, curve->p, num_words_secp256k1); /* t2 = 3*x1^2 */
- if (vli_testBit(Y1, 0)) {
- uECC_word_t carry = vli_add(Y1, Y1, curve->p, num_words_secp256k1);
- vli_rshift1(Y1, num_words_secp256k1);
- Y1[num_words_secp256k1 - 1] |= carry << (uECC_WORD_BITS - 1);
- } else {
- vli_rshift1(Y1, num_words_secp256k1);
- }
- /* t2 = 3/2*(x1^2) = B */
-
- vli_modSquare_fast(X1, Y1, curve); /* t1 = B^2 */
- vli_modSub(X1, X1, t4, curve->p, num_words_secp256k1); /* t1 = B^2 - A */
- vli_modSub(X1, X1, t4, curve->p, num_words_secp256k1); /* t1 = B^2 - 2A = x3 */
-
- vli_modSub(t4, t4, X1, curve->p, num_words_secp256k1); /* t4 = A - x3 */
- vli_modMult_fast(Y1, Y1, t4, curve); /* t2 = B * (A - x3) */
- vli_modSub(Y1, Y1, t5, curve->p, num_words_secp256k1); /* t2 = B * (A - x3) - y1^4 = y3 */
-}
-
-static void double_jacobian_default(uECC_word_t * X1,
- uECC_word_t * Y1,
- uECC_word_t * Z1,
- uECC_Curve curve) {
- /* t1 = X, t2 = Y, t3 = Z */
- uECC_word_t t4[uECC_MAX_WORDS];
- uECC_word_t t5[uECC_MAX_WORDS];
-
- if (vli_isZero(Z1, curve->num_words)) {
- return;
- }
-
- vli_modSquare_fast(t4, Y1, curve); /* t4 = y1^2 */
- vli_modMult_fast(t5, X1, t4, curve); /* t5 = x1*y1^2 = A */
- vli_modSquare_fast(t4, t4, curve); /* t4 = y1^4 */
- vli_modMult_fast(Y1, Y1, Z1, curve); /* t2 = y1*z1 = z3 */
- vli_modSquare_fast(Z1, Z1, curve); /* t3 = z1^2 */
-
- vli_modAdd(X1, X1, Z1, curve->p, curve->num_words); /* t1 = x1 + z1^2 */
- vli_modAdd(Z1, Z1, Z1, curve->p, curve->num_words); /* t3 = 2*z1^2 */
- vli_modSub(Z1, X1, Z1, curve->p, curve->num_words); /* t3 = x1 - z1^2 */
- vli_modMult_fast(X1, X1, Z1, curve); /* t1 = x1^2 - z1^4 */
-
- vli_modAdd(Z1, X1, X1, curve->p, curve->num_words); /* t3 = 2*(x1^2 - z1^4) */
- vli_modAdd(X1, X1, Z1, curve->p, curve->num_words); /* t1 = 3*(x1^2 - z1^4) */
- if (vli_testBit(X1, 0)) {
- uECC_word_t l_carry = vli_add(X1, X1, curve->p, curve->num_words);
- vli_rshift1(X1, curve->num_words);
- X1[curve->num_words - 1] |= l_carry << (uECC_WORD_BITS - 1);
- } else {
- vli_rshift1(X1, curve->num_words);
- }
- /* t1 = 3/2*(x1^2 - z1^4) = B */
-
- vli_modSquare_fast(Z1, X1, curve); /* t3 = B^2 */
- vli_modSub(Z1, Z1, t5, curve->p, curve->num_words); /* t3 = B^2 - A */
- vli_modSub(Z1, Z1, t5, curve->p, curve->num_words); /* t3 = B^2 - 2A = x3 */
- vli_modSub(t5, t5, Z1, curve->p, curve->num_words); /* t5 = A - x3 */
- vli_modMult_fast(X1, X1, t5, curve); /* t1 = B * (A - x3) */
- vli_modSub(t4, X1, t4, curve->p, curve->num_words); /* t4 = B * (A - x3) - y1^4 = y3 */
-
- vli_set(X1, Z1, curve->num_words);
- vli_set(Z1, Y1, curve->num_words);
- vli_set(Y1, t4, curve->num_words);
-}
-
-/* Compute a = sqrt(a) (mod curve_p). */
-static void mod_sqrt_default(uECC_word_t *a, uECC_Curve curve) {
- bitcount_t i;
- uECC_word_t p1[uECC_MAX_WORDS] = {1};
- uECC_word_t l_result[uECC_MAX_WORDS] = {1};
-
- /* When curve->p == 3 (mod 4), we can compute
- sqrt(a) = a^((curve->p + 1) / 4) (mod curve->p). */
- vli_add(p1, curve->p, p1, curve->num_words); /* p1 = curve_p + 1 */
- for (i = vli_numBits(p1, curve->num_words) - 1; i > 1; --i) {
- vli_modSquare_fast(l_result, l_result, curve);
- if (vli_testBit(p1, i)) {
- vli_modMult_fast(l_result, l_result, a, curve);
- }
- }
- vli_set(a, l_result, curve->num_words);
-}
/* Routine 3.2.4 RS; from http://www.nsa.gov/ia/_files/nist-routines.pdf */
static void mod_sqrt_secp224r1_rs(uECC_word_t *d1,
@@ -437,213 +552,11 @@
vli_modMult_fast(a, d0, f1, &curve_secp224r1); /* a <-- d0 / e0 */
}
-/* Computes result = x^3 + ax + b. result must not overlap x. */
-static void x_side_default(uECC_word_t *result, const uECC_word_t *x, uECC_Curve curve) {
- uECC_word_t _3[uECC_MAX_WORDS] = {3}; /* -a = 3 */
-
- vli_modSquare_fast(result, x, curve); /* r = x^2 */
- vli_modSub(result, result, _3, curve->p, curve->num_words); /* r = x^2 - 3 */
- vli_modMult_fast(result, result, x, curve); /* r = x^3 - 3x */
- vli_modAdd(result, result, curve->b, curve->p, curve->num_words); /* r = x^3 - 3x + b */
-}
-
-/* Computes result = x^3 + b. result must not overlap x. */
-static void x_side_secp256k1(uECC_word_t *result, const uECC_word_t *x, uECC_Curve curve) {
- vli_modSquare_fast(result, x, curve); /* r = x^2 */
- vli_modMult_fast(result, result, x, curve); /* r = x^3 */
- vli_modAdd(result, result, curve->b, curve->p, num_words_secp256k1); /* r = x^3 + b */
-}
-
-/* Computes result = product % curve_p
- see http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf page 354
-
- Note that this only works if log2(omega) < log2(p) / 2 */
-static void omega_mult_secp160r1(uECC_word_t *result, const uECC_word_t *right);
-#if uECC_WORD_SIZE == 8
-static void vli_mmod_fast_secp160r1(uECC_word_t *result, uECC_word_t *product) {
- uECC_word_t tmp[2 * num_words_secp160r1];
- uECC_word_t copy;
-
- vli_clear(tmp, 2 * num_words_secp160r1);
-
- omega_mult_secp160r1(tmp, product + num_words_secp160r1 - 1); /* (Rq, q) = q * c */
-
- product[num_words_secp160r1 - 1] &= 0xffffffff;
- copy = tmp[num_words_secp160r1 - 1];
- tmp[num_words_secp160r1 - 1] &= 0xffffffff;
- vli_add(result, product, tmp, num_words_secp160r1); /* (C, r) = r + q */
- vli_clear(product, num_words_secp160r1);
- tmp[num_words_secp160r1 - 1] = copy;
- omega_mult_secp160r1(product, tmp + num_words_secp160r1 - 1); /* Rq*c */
- vli_add(result, result, product, num_words_secp160r1); /* (C1, r) = r + Rq*c */
-
- while (vli_cmp(result, curve_secp160r1.p, num_words_secp160r1) > 0) {
- vli_sub(result, result, curve_secp160r1.p, num_words_secp160r1);
- }
-}
-
-static void omega_mult_secp160r1(uint64_t *result, const uint64_t *right) {
- uint32_t carry;
- unsigned i;
-
- /* Multiply by (2^31 + 1). */
- carry = 0;
- for (i = 0; i < num_words_secp160r1; ++i) {
- uint64_t tmp = (right[i] >> 32) | (right[i + 1] << 32);
- result[i] = (tmp << 31) + tmp + carry;
- carry = (tmp >> 33) + (result[i] < tmp || (carry && result[i] == tmp));
- }
- result[i] = carry;
-}
-#else
-static void vli_mmod_fast_secp160r1(uECC_word_t *result, uECC_word_t *product) {
- uECC_word_t tmp[2 * num_words_secp160r1];
- uECC_word_t carry;
-
- vli_clear(tmp, 2 * num_words_secp160r1);
-
- omega_mult_secp160r1(tmp, product + num_words_secp160r1); /* (Rq, q) = q * c */
-
- carry = vli_add(result, product, tmp, num_words_secp160r1); /* (C, r) = r + q */
- vli_clear(product, num_words_secp160r1);
- omega_mult_secp160r1(product, tmp + num_words_secp160r1); /* Rq*c */
- carry += vli_add(result, result, product, num_words_secp160r1); /* (C1, r) = r + Rq*c */
-
- while (carry > 0) {
- --carry;
- vli_sub(result, result, curve_secp160r1.p, num_words_secp160r1);
- }
- if (vli_cmp(result, curve_secp160r1.p, num_words_secp160r1) > 0) {
- vli_sub(result, result, curve_secp160r1.p, num_words_secp160r1);
- }
-}
-#endif
-
-#if uECC_WORD_SIZE == 1
-static void omega_mult_secp160r1(uint8_t *result, const uint8_t *right) {
- uint8_t carry;
- uint8_t i;
-
- /* Multiply by (2^31 + 1). */
- vli_set(result + 4, right, num_words_secp160r1); /* 2^32 */
- vli_rshift1(result + 4, num_words_secp160r1); /* 2^31 */
- result[3] = right[0] << 7; /* get last bit from shift */
-
- carry = vli_add(result, result, right, num_words_secp160r1); /* 2^31 + 1 */
- for (i = num_words_secp160r1; carry; ++i) {
- uint16_t sum = (uint16_t)result[i] + carry;
- result[i] = (uint8_t)sum;
- carry = sum >> 8;
- }
-}
-#elif uECC_WORD_SIZE == 4
-static void omega_mult_secp160r1(uint32_t *result, const uint32_t *right) {
- uint32_t carry;
- unsigned i;
-
- /* Multiply by (2^31 + 1). */
- vli_set(result + 1, right, num_words_secp160r1); /* 2^32 */
- vli_rshift1(result + 1, num_words_secp160r1); /* 2^31 */
- result[0] = right[0] << 31; /* get last bit from shift */
-
- carry = vli_add(result, result, right, num_words_secp160r1); /* 2^31 + 1 */
- for (i = num_words_secp160r1; carry; ++i) {
- uint64_t sum = (uint64_t)result[i] + carry;
- result[i] = (uint32_t)sum;
- carry = sum >> 32;
- }
-}
-#endif /* uECC_WORD_SIZE */
-
-/* Computes result = product % curve_p.
- See algorithm 5 and 6 from http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf */
-#if uECC_WORD_SIZE == 1
-static void vli_mmod_fast_secp192r1(uint8_t *result, uint8_t *product) {
- uint8_t tmp[num_words_secp192r1];
- uint8_t carry;
-
- vli_set(result, product, num_words_secp192r1);
-
- vli_set(tmp, &product[24], num_words_secp192r1);
- carry = vli_add(result, result, tmp, num_words_secp192r1);
-
- tmp[0] = tmp[1] = tmp[2] = tmp[3] = tmp[4] = tmp[5] = tmp[6] = tmp[7] = 0;
- tmp[8] = product[24]; tmp[9] = product[25]; tmp[10] = product[26]; tmp[11] = product[27];
- tmp[12] = product[28]; tmp[13] = product[29]; tmp[14] = product[30]; tmp[15] = product[31];
- tmp[16] = product[32]; tmp[17] = product[33]; tmp[18] = product[34]; tmp[19] = product[35];
- tmp[20] = product[36]; tmp[21] = product[37]; tmp[22] = product[38]; tmp[23] = product[39];
- carry += vli_add(result, result, tmp, num_words_secp192r1);
-
- tmp[0] = tmp[8] = product[40];
- tmp[1] = tmp[9] = product[41];
- tmp[2] = tmp[10] = product[42];
- tmp[3] = tmp[11] = product[43];
- tmp[4] = tmp[12] = product[44];
- tmp[5] = tmp[13] = product[45];
- tmp[6] = tmp[14] = product[46];
- tmp[7] = tmp[15] = product[47];
- tmp[16] = tmp[17] = tmp[18] = tmp[19] = tmp[20] = tmp[21] = tmp[22] = tmp[23] = 0;
- carry += vli_add(result, result, tmp, num_words_secp192r1);
-
- while (carry || vli_cmp(curve_secp192r1.p, result, num_words_secp192r1) != 1) {
- carry -= vli_sub(result, result, curve_secp192r1.p, num_words_secp192r1);
- }
-}
-#elif uECC_WORD_SIZE == 4
-static void vli_mmod_fast_secp192r1(uint32_t *result, uint32_t *product) {
- uint32_t tmp[num_words_secp192r1];
- int carry;
-
- vli_set(result, product, num_words_secp192r1);
-
- vli_set(tmp, &product[6], num_words_secp192r1);
- carry = vli_add(result, result, tmp, num_words_secp192r1);
-
- tmp[0] = tmp[1] = 0;
- tmp[2] = product[6];
- tmp[3] = product[7];
- tmp[4] = product[8];
- tmp[5] = product[9];
- carry += vli_add(result, result, tmp, num_words_secp192r1);
-
- tmp[0] = tmp[2] = product[10];
- tmp[1] = tmp[3] = product[11];
- tmp[4] = tmp[5] = 0;
- carry += vli_add(result, result, tmp, num_words_secp192r1);
-
- while (carry || vli_cmp(curve_secp192r1.p, result, num_words_secp192r1) != 1) {
- carry -= vli_sub(result, result, curve_secp192r1.p, num_words_secp192r1);
- }
-}
-#else
-static void vli_mmod_fast_secp192r1(uint64_t *result, uint64_t *product) {
- uint64_t tmp[num_words_secp192r1];
- int carry;
-
- vli_set(result, product, num_words_secp192r1);
-
- vli_set(tmp, &product[3], num_words_secp192r1);
- carry = vli_add(result, result, tmp, num_words_secp192r1);
-
- tmp[0] = 0;
- tmp[1] = product[3];
- tmp[2] = product[4];
- carry += vli_add(result, result, tmp, num_words_secp192r1);
-
- tmp[0] = tmp[1] = product[5];
- tmp[2] = 0;
- carry += vli_add(result, result, tmp, num_words_secp192r1);
-
- while (carry || vli_cmp(curve_secp192r1.p, result, num_words_secp192r1) != 1) {
- carry -= vli_sub(result, result, curve_secp192r1.p, num_words_secp192r1);
- }
-}
-#endif /* uECC_WORD_SIZE */
-
+#if (uECC_OPTIMIZATION_LEVEL > 0)
/* Computes result = product % curve_p
from http://www.nsa.gov/ia/_files/nist-routines.pdf */
#if uECC_WORD_SIZE == 1
-void vli_mmod_fast_secp224r1(uint8_t *result, uint8_t *product) {
+static void vli_mmod_fast_secp224r1(uint8_t *result, uint8_t *product) {
uint8_t tmp[num_words_secp224r1];
int8_t carry;
@@ -698,7 +611,7 @@
}
}
#elif uECC_WORD_SIZE == 4
-void vli_mmod_fast_secp224r1(uint32_t *result, uint32_t *product)
+static void vli_mmod_fast_secp224r1(uint32_t *result, uint32_t *product)
{
uint32_t tmp[num_words_secp224r1];
int carry;
@@ -749,7 +662,7 @@
}
}
#else
-void vli_mmod_fast_secp224r1(uint64_t *result, uint64_t *product)
+static void vli_mmod_fast_secp224r1(uint64_t *result, uint64_t *product)
{
uint64_t tmp[num_words_secp224r1];
int carry = 0;
@@ -795,7 +708,53 @@
}
}
#endif /* uECC_WORD_SIZE */
+#endif /* (uECC_OPTIMIZATION_LEVEL > 0) */
+#endif /* uECC_SUPPORTS_secp224r1 */
+
+#if uECC_SUPPORTS_secp256r1
+
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+static void vli_mmod_fast_secp256r1(uECC_word_t *result, uECC_word_t *product);
+#endif
+
+static const struct uECC_Curve_t curve_secp256r1 = {
+ num_words_secp256r1,
+ num_words_secp256r1,
+ num_bytes_secp256r1,
+ { BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
+ BYTES_TO_WORDS_8(FF, FF, FF, FF, 00, 00, 00, 00),
+ BYTES_TO_WORDS_8(00, 00, 00, 00, 00, 00, 00, 00),
+ BYTES_TO_WORDS_8(01, 00, 00, 00, FF, FF, FF, FF) },
+ { BYTES_TO_WORDS_8(51, 25, 63, FC, C2, CA, B9, F3),
+ BYTES_TO_WORDS_8(84, 9E, 17, A7, AD, FA, E6, BC),
+ BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
+ BYTES_TO_WORDS_8(00, 00, 00, 00, FF, FF, FF, FF) },
+ { BYTES_TO_WORDS_8(96, C2, 98, D8, 45, 39, A1, F4),
+ BYTES_TO_WORDS_8(A0, 33, EB, 2D, 81, 7D, 03, 77),
+ BYTES_TO_WORDS_8(F2, 40, A4, 63, E5, E6, BC, F8),
+ BYTES_TO_WORDS_8(47, 42, 2C, E1, F2, D1, 17, 6B),
+
+ BYTES_TO_WORDS_8(F5, 51, BF, 37, 68, 40, B6, CB),
+ BYTES_TO_WORDS_8(CE, 5E, 31, 6B, 57, 33, CE, 2B),
+ BYTES_TO_WORDS_8(16, 9E, 0F, 7C, 4A, EB, E7, 8E),
+ BYTES_TO_WORDS_8(9B, 7F, 1A, FE, E2, 42, E3, 4F) },
+ { BYTES_TO_WORDS_8(4B, 60, D2, 27, 3E, 3C, CE, 3B),
+ BYTES_TO_WORDS_8(F6, B0, 53, CC, B0, 06, 1D, 65),
+ BYTES_TO_WORDS_8(BC, 86, 98, 76, 55, BD, EB, B3),
+ BYTES_TO_WORDS_8(E7, 93, 3A, AA, D8, 35, C6, 5A) },
+ &double_jacobian_default,
+ &mod_sqrt_default,
+ &x_side_default,
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+ &vli_mmod_fast_secp256r1
+#endif
+};
+
+uECC_Curve uECC_secp256r1(void) { return &curve_secp256r1; }
+
+
+#if (uECC_OPTIMIZATION_LEVEL > 0)
/* Computes result = product % curve_p
from http://www.nsa.gov/ia/_files/nist-routines.pdf */
#if uECC_WORD_SIZE == 1
@@ -1077,13 +1036,111 @@
}
}
#endif /* uECC_WORD_SIZE */
+#endif /* (uECC_OPTIMIZATION_LEVEL > 0) */
+#endif /* uECC_SUPPORTS_secp256r1 */
+
+#if uECC_SUPPORTS_secp256k1
+
+static void double_jacobian_secp256k1(uECC_word_t * X1,
+ uECC_word_t * Y1,
+ uECC_word_t * Z1,
+ uECC_Curve curve);
+static void x_side_secp256k1(uECC_word_t *result, const uECC_word_t *x, uECC_Curve curve);
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+static void vli_mmod_fast_secp256k1(uECC_word_t *result, uECC_word_t *product);
+#endif
+
+static const struct uECC_Curve_t curve_secp256k1 = {
+ num_words_secp256k1,
+ num_words_secp256k1,
+ num_bytes_secp256k1,
+ { BYTES_TO_WORDS_8(2F, FC, FF, FF, FE, FF, FF, FF),
+ BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
+ BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF),
+ BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF) },
+ { BYTES_TO_WORDS_8(41, 41, 36, D0, 8C, 5E, D2, BF),
+ BYTES_TO_WORDS_8(3B, A0, 48, AF, E6, DC, AE, BA),
+ BYTES_TO_WORDS_8(FE, FF, FF, FF, FF, FF, FF, FF),
+ BYTES_TO_WORDS_8(FF, FF, FF, FF, FF, FF, FF, FF) },
+ { BYTES_TO_WORDS_8(98, 17, F8, 16, 5B, 81, F2, 59),
+ BYTES_TO_WORDS_8(D9, 28, CE, 2D, DB, FC, 9B, 02),
+ BYTES_TO_WORDS_8(07, 0B, 87, CE, 95, 62, A0, 55),
+ BYTES_TO_WORDS_8(AC, BB, DC, F9, 7E, 66, BE, 79),
+
+ BYTES_TO_WORDS_8(B8, D4, 10, FB, 8F, D0, 47, 9C),
+ BYTES_TO_WORDS_8(19, 54, 85, A6, 48, B4, 17, FD),
+ BYTES_TO_WORDS_8(A8, 08, 11, 0E, FC, FB, A4, 5D),
+ BYTES_TO_WORDS_8(65, C4, A3, 26, 77, DA, 3A, 48) },
+ { BYTES_TO_WORDS_8(07, 00, 00, 00, 00, 00, 00, 00),
+ BYTES_TO_WORDS_8(00, 00, 00, 00, 00, 00, 00, 00),
+ BYTES_TO_WORDS_8(00, 00, 00, 00, 00, 00, 00, 00),
+ BYTES_TO_WORDS_8(00, 00, 00, 00, 00, 00, 00, 00) },
+ &double_jacobian_secp256k1,
+ &mod_sqrt_default,
+ &x_side_secp256k1,
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+ &vli_mmod_fast_secp256k1
+#endif
+};
+
+uECC_Curve uECC_secp256k1(void) { return &curve_secp256k1; }
+
+
+/* Double in place */
+static void double_jacobian_secp256k1(uECC_word_t * X1,
+ uECC_word_t * Y1,
+ uECC_word_t * Z1,
+ uECC_Curve curve) {
+ /* t1 = X, t2 = Y, t3 = Z */
+ uECC_word_t t4[num_words_secp256k1];
+ uECC_word_t t5[num_words_secp256k1];
+
+ if (vli_isZero(Z1, num_words_secp256k1)) {
+ return;
+ }
+
+ vli_modSquare_fast(t5, Y1, curve); /* t5 = y1^2 */
+ vli_modMult_fast(t4, X1, t5, curve); /* t4 = x1*y1^2 = A */
+ vli_modSquare_fast(X1, X1, curve); /* t1 = x1^2 */
+ vli_modSquare_fast(t5, t5, curve); /* t5 = y1^4 */
+ vli_modMult_fast(Z1, Y1, Z1, curve); /* t3 = y1*z1 = z3 */
+
+ vli_modAdd(Y1, X1, X1, curve->p, num_words_secp256k1); /* t2 = 2*x1^2 */
+ vli_modAdd(Y1, Y1, X1, curve->p, num_words_secp256k1); /* t2 = 3*x1^2 */
+ if (vli_testBit(Y1, 0)) {
+ uECC_word_t carry = vli_add(Y1, Y1, curve->p, num_words_secp256k1);
+ vli_rshift1(Y1, num_words_secp256k1);
+ Y1[num_words_secp256k1 - 1] |= carry << (uECC_WORD_BITS - 1);
+ } else {
+ vli_rshift1(Y1, num_words_secp256k1);
+ }
+ /* t2 = 3/2*(x1^2) = B */
+
+ vli_modSquare_fast(X1, Y1, curve); /* t1 = B^2 */
+ vli_modSub(X1, X1, t4, curve->p, num_words_secp256k1); /* t1 = B^2 - A */
+ vli_modSub(X1, X1, t4, curve->p, num_words_secp256k1); /* t1 = B^2 - 2A = x3 */
+
+ vli_modSub(t4, t4, X1, curve->p, num_words_secp256k1); /* t4 = A - x3 */
+ vli_modMult_fast(Y1, Y1, t4, curve); /* t2 = B * (A - x3) */
+ vli_modSub(Y1, Y1, t5, curve->p, num_words_secp256k1); /* t2 = B * (A - x3) - y1^4 = y3 */
+}
+
+/* Computes result = x^3 + b. result must not overlap x. */
+static void x_side_secp256k1(uECC_word_t *result, const uECC_word_t *x, uECC_Curve curve) {
+ vli_modSquare_fast(result, x, curve); /* r = x^2 */
+ vli_modMult_fast(result, result, x, curve); /* r = x^3 */
+ vli_modAdd(result, result, curve->b, curve->p, num_words_secp256k1); /* r = x^3 + b */
+}
+
+#if (uECC_OPTIMIZATION_LEVEL > 0)
static void omega_mult_secp256k1(uECC_word_t *result, const uECC_word_t *right);
static void vli_mmod_fast_secp256k1(uECC_word_t *result, uECC_word_t *product) {
uECC_word_t tmp[2 * num_words_secp256k1];
uECC_word_t carry;
- vli_clear(tmp, 2 * num_words_secp256k1);
+ vli_clear(tmp, num_words_secp256k1);
+ vli_clear(tmp + num_words_secp256k1, num_words_secp256k1);
omega_mult_secp256k1(tmp, product + num_words_secp256k1); /* (Rq, q) = q * c */
@@ -1163,3 +1220,8 @@
result[num_words_secp256k1] = r0;
}
#endif /* uECC_WORD_SIZE */
+#endif /* (uECC_OPTIMIZATION_LEVEL > 0) */
+
+#endif /* uECC_SUPPORTS_secp256k1 */
+
+#endif /* _UECC_CURVE_SPECIFIC_H_ */
diff --git a/platform-specific.inc b/platform-specific.inc
index bf15ce2..207ac8b 100644
--- a/platform-specific.inc
+++ b/platform-specific.inc
@@ -1,3 +1,8 @@
+/* Copyright 2015, Kenneth MacKay. Licensed under the BSD 2-clause license. */
+
+#ifndef _UECC_PLATFORM_SPECIFIC_H_
+#define _UECC_PLATFORM_SPECIFIC_H_
+
#ifndef uECC_PLATFORM
#if __AVR__
#define uECC_PLATFORM uECC_avr
@@ -32,29 +37,16 @@
#error "Unsupported value for uECC_WORD_SIZE"
#endif
-/* Optimization options... */
-#define uECC_asm_none 0
-#define uECC_asm_small 1
-#define uECC_asm_fast 2
-#ifndef uECC_ASM
- #define uECC_ASM uECC_asm_small
-#endif
-
-#ifndef uECC_SQUARE_FUNC
- #define uECC_SQUARE_FUNC 0
-#endif
-/* --- end --- */
-
-#if (uECC_ASM && (uECC_PLATFORM == uECC_avr) && (uECC_WORD_SIZE != 1))
- #pragma message ("uECC_WORD_SIZE must be 1 when using AVR asm")
+#if ((uECC_PLATFORM == uECC_avr) && (uECC_WORD_SIZE != 1))
+ #pragma message ("uECC_WORD_SIZE must be 1 for AVR")
#undef uECC_WORD_SIZE
#define uECC_WORD_SIZE 1
#endif
-#if (uECC_ASM && \
- (uECC_PLATFORM == uECC_arm || uECC_PLATFORM == uECC_arm_thumb) && \
+#if ((uECC_PLATFORM == uECC_arm || uECC_PLATFORM == uECC_arm_thumb || \
+ uECC_PLATFORM == uECC_arm_thumb2) && \
(uECC_WORD_SIZE != 4))
- #pragma message ("uECC_WORD_SIZE must be 4 when using ARM asm")
+ #pragma message ("uECC_WORD_SIZE must be 4 for ARM")
#undef uECC_WORD_SIZE
#define uECC_WORD_SIZE 4
#endif
@@ -172,15 +164,6 @@
return 0;
}
-#endif
+#endif /* platform */
-// #ifdef __GNUC__ /* Only support GCC inline asm for now */
-// #if (uECC_ASM && (uECC_PLATFORM == uECC_avr))
-// #include "asm_avr.inc"
-// #endif
-//
-// #if (uECC_ASM && (uECC_PLATFORM == uECC_arm || uECC_PLATFORM == uECC_arm_thumb || \
-// uECC_PLATFORM == uECC_arm_thumb2))
-// #include "asm_arm.inc"
-// #endif
-// #endif
+#endif /* _UECC_PLATFORM_SPECIFIC_H_ */
diff --git a/uECC.c b/uECC.c
index 2820f6c..4c32e54 100644
--- a/uECC.c
+++ b/uECC.c
@@ -4,14 +4,30 @@
#define MAX_TRIES 64
+#if uECC_SUPPORTS_secp160r1
+ #define uECC_MAX_BYTES 21 /* Due to the size of curve_n. */
+#endif
+#if uECC_SUPPORTS_secp192r1
+ #undef uECC_MAX_BYTES
+ #define uECC_MAX_BYTES 24
+#endif
+#if uECC_SUPPORTS_secp224r1
+ #undef uECC_MAX_BYTES
+ #define uECC_MAX_BYTES 28
+#endif
+#if (uECC_SUPPORTS_secp256r1 || uECC_SUPPORTS_secp256k1)
+ #undef uECC_MAX_BYTES
+ #define uECC_MAX_BYTES 32
+#endif
+
#include "platform-specific.inc"
#if (uECC_WORD_SIZE == 1)
- #define uECC_MAX_WORDS 32
+ #define uECC_MAX_WORDS uECC_MAX_BYTES
#elif (uECC_WORD_SIZE == 4)
- #define uECC_MAX_WORDS 8
+ #define uECC_MAX_WORDS ((uECC_MAX_BYTES + 3) / 4)
#elif (uECC_WORD_SIZE == 8)
- #define uECC_MAX_WORDS 4
+ #define uECC_MAX_WORDS ((uECC_MAX_BYTES + 7) / 8)
#endif /* uECC_WORD_SIZE */
struct uECC_Curve_t {
@@ -28,14 +44,24 @@
uECC_Curve curve);
void (*mod_sqrt)(uECC_word_t *a, uECC_Curve curve);
void (*x_side)(uECC_word_t *result, const uECC_word_t *x, uECC_Curve curve);
+#if (uECC_OPTIMIZATION_LEVEL > 0)
void (*mmod_fast)(uECC_word_t *result, uECC_word_t *product);
+#endif
};
-#if (uECC_ASM == uECC_asm_small)
- #if (uECC_PLATFORM == uECC_arm || uECC_PLATFORM == uECC_arm_thumb || \
- uECC_PLATFORM == uECC_arm_thumb2)
- #include "asm_arm_small.inc"
+#if (uECC_OPTIMIZATION_LEVEL > 0) && (uECC_OPTIMIZATION_LEVEL % 2 == 0)
+ #define uECC_SQUARE_FUNC 1
+#else
+ #define uECC_SQUARE_FUNC 0
+#endif
+
+#if (uECC_PLATFORM == uECC_arm || uECC_PLATFORM == uECC_arm_thumb || \
+ uECC_PLATFORM == uECC_arm_thumb2)
+ #if (uECC_OPTIMIZATION_LEVEL > 2)
+ #include "asm_arm_mult_square.inc"
+ #include "asm_arm_fast.inc"
#endif
+ #include "asm_arm_small.inc"
#endif
static uECC_RNG_Function g_rng_function = &default_RNG;
@@ -190,7 +216,7 @@
i2 += (i0 >> 32);
i2 += i1;
- if (i2 < i1) { // overflow
+ if (i2 < i1) { /* overflow */
i3 += 0x100000000ull;
}
@@ -266,7 +292,7 @@
i2 += (i0 >> 32);
i2 += i1;
if (i2 < i1)
- { // overflow
+ { /* overflow */
i3 += 0x100000000ull;
}
@@ -365,7 +391,7 @@
uECC_word_t *v[2] = {tmp, product};
uECC_word_t index;
- // Shift mod so its highest set bit is at the maximum position.
+ /* Shift mod so its highest set bit is at the maximum position. */
bitcount_t shift = (num_words * 2 * uECC_WORD_BITS) - vli_numBits(mod, num_words);
wordcount_t word_shift = shift / uECC_WORD_BITS;
wordcount_t bit_shift = shift % uECC_WORD_BITS;
@@ -381,9 +407,19 @@
}
for (index = 1; shift >= 0; --shift) {
- uECC_word_t borrow = vli_sub(v[1 - index], v[index], mod_multiple, num_words * 2);
+ uECC_word_t borrow = 0;
+ wordcount_t i;
+ for (i = 0; i < num_words * 2; ++i) {
+ uECC_word_t diff = v[index][i] - mod_multiple[i] - borrow;
+ if (diff != v[index][i]) {
+ borrow = (diff > v[index][i]);
+ }
+ v[1 - index][i] = diff;
+ }
index = !(index ^ borrow); /* Swap the index if there was no borrow */
- vli_rshift1(mod_multiple, num_words * 2);
+ vli_rshift1(mod_multiple, num_words);
+ mod_multiple[num_words - 1] |= mod_multiple[num_words] << (uECC_WORD_BITS - 1);
+ vli_rshift1(mod_multiple + num_words, num_words);
}
vli_set(result, v[index], num_words);
}
@@ -399,6 +435,8 @@
vli_mmod(result, product, mod, num_words);
}
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+
static void vli_modMult_fast(uECC_word_t *result,
const uECC_word_t *left,
const uECC_word_t *right,
@@ -408,6 +446,13 @@
curve->mmod_fast(result, product);
}
+#else /* (uECC_OPTIMIZATION_LEVEL > 0) */
+
+#define vli_modMult_fast(result, left, right, curve) \
+ vli_modMult((result), (left), (right), (curve)->p, (curve)->num_words)
+
+#endif /* (uECC_OPTIMIZATION_LEVEL > 0) */
+
#if uECC_SQUARE_FUNC
/* Computes result = left^2 % mod. */
@@ -420,6 +465,8 @@
vli_mmod(result, product, mod, num_words);
}
+#if (uECC_OPTIMIZATION_LEVEL > 0)
+
static void vli_modSquare_fast(uECC_word_t *result,
const uECC_word_t *left,
uECC_Curve curve) {
@@ -428,6 +475,13 @@
curve->mmod_fast(result, product);
}
+#else /* (uECC_OPTIMIZATION_LEVEL > 0) */
+
+#define vli_modSquare_fast(result, left, curve) \
+ vli_modSquare((result), (left), (curve)->p, (curve)->num_words)
+
+#endif /* (uECC_OPTIMIZATION_LEVEL > 0) */
+
#else /* uECC_SQUARE_FUNC */
#define vli_modSquare(result, left, mod, num_words) \
@@ -508,7 +562,8 @@
/* Returns 1 if 'point' is the point at infinity, 0 otherwise. */
static cmpresult_t EccPoint_isZero(const uECC_word_t *point, uECC_Curve curve) {
- return vli_isZero(point, curve->num_words * 2);
+ return vli_isZero(point, curve->num_words) &&
+ vli_isZero(point + curve->num_words, curve->num_words);
}
/* Point multiplication algorithm using Montgomery's ladder with co-Z coordinates.
@@ -695,8 +750,8 @@
return 0;
}
- // Regularize the bitcount for the private key so that attackers cannot use a side channel
- // attack to learn the number of leading zeros.
+ /* Regularize the bitcount for the private key so that attackers cannot use a side channel
+ attack to learn the number of leading zeros. */
carry = regularize_k(private, tmp1, tmp2, curve);
EccPoint_mult(result, curve->G, p2[!carry], 0,
@@ -822,12 +877,12 @@
vli_bytesToNative(public, public_key, curve);
vli_bytesToNative(public + curve->num_words, public_key + curve->num_bytes, curve);
- // Regularize the bitcount for the private key so that attackers cannot use a side channel
- // attack to learn the number of leading zeros.
+ /* Regularize the bitcount for the private key so that attackers cannot use a side channel
+ attack to learn the number of leading zeros. */
carry = regularize_k(private, private, tmp, curve);
- // If an RNG function was specified, try to get a random initial Z value to improve
- // protection against side-channel attacks.
+ /* If an RNG function was specified, try to get a random initial Z value to improve
+ protection against side-channel attacks. */
if (g_rng_function != &default_RNG) {
for (tries = 0; tries < MAX_TRIES; ++tries) {
if (!generate_random_int(p2[carry], curve->num_words, curve->num_bytes * 8)) {
@@ -880,12 +935,12 @@
vli_bytesToNative(public, public_key, curve);
vli_bytesToNative(public + curve->num_words, public_key + curve->num_bytes, curve);
- // The point at infinity is invalid.
+ /* The point at infinity is invalid. */
if (EccPoint_isZero(public, curve)) {
return 0;
}
-
- // x and y must be smaller than p.
+
+ /* x and y must be smaller than p. */
if (vli_cmp(curve->p, public, curve->num_words) != 1 ||
vli_cmp(curve->p, public + curve->num_words, curve->num_words) != 1) {
return 0;
@@ -1065,8 +1120,8 @@
V[i] = 0x01;
K[i] = 0;
}
-
- // K = HMAC_K(V || 0x00 || int2octets(x) || h(m))
+
+ /* K = HMAC_K(V || 0x00 || int2octets(x) || h(m)) */
HMAC_init(hash_context, K);
V[hash_context->result_size] = 0x00;
HMAC_update(hash_context, V, hash_context->result_size + 1);
@@ -1075,8 +1130,8 @@
HMAC_finish(hash_context, K, K);
update_V(hash_context, K, V);
-
- // K = HMAC_K(V || 0x01 || int2octets(x) || h(m))
+
+ /* K = HMAC_K(V || 0x01 || int2octets(x) || h(m)) */
HMAC_init(hash_context, K);
V[hash_context->result_size] = 0x01;
HMAC_update(hash_context, V, hash_context->result_size + 1);
@@ -1106,7 +1161,7 @@
return 1;
}
- // K = HMAC_K(V || 0x00)
+ /* K = HMAC_K(V || 0x00) */
HMAC_init(hash_context, K);
V[hash_context->result_size] = 0x00;
HMAC_update(hash_context, V, hash_context->result_size + 1);
diff --git a/uECC.h b/uECC.h
index 6e416c3..0c4b092 100644
--- a/uECC.h
+++ b/uECC.h
@@ -1,7 +1,7 @@
/* Copyright 2014, Kenneth MacKay. Licensed under the BSD 2-clause license. */
-#ifndef _MICRO_ECC_H_
-#define _MICRO_ECC_H_
+#ifndef _UECC_H_
+#define _UECC_H_
#include <stdint.h>
@@ -17,11 +17,34 @@
#define uECC_arm64 6
#define uECC_avr 7
-
/* If desired, you can define uECC_WORD_SIZE as appropriate for your platform (1, 4, or 8 bytes).
If uECC_WORD_SIZE is not explicitly defined then it will be automatically set based on your
platform. */
+/* Optimization level; trade speed for code size.
+ Larger values produce code that is faster but larger.
+ Currently supported values are 0 - 4; 0 is unusably slow for most applications. */
+#ifndef uECC_OPTIMIZATION_LEVEL
+ #define uECC_OPTIMIZATION_LEVEL 1
+#endif
+
+/* Curve support selection. Set to 0 to remove that curve. */
+#ifndef uECC_SUPPORTS_secp160r1
+ #define uECC_SUPPORTS_secp160r1 1
+#endif
+#ifndef uECC_SUPPORTS_secp192r1
+ #define uECC_SUPPORTS_secp192r1 1
+#endif
+#ifndef uECC_SUPPORTS_secp224r1
+ #define uECC_SUPPORTS_secp224r1 1
+#endif
+#ifndef uECC_SUPPORTS_secp256r1
+ #define uECC_SUPPORTS_secp256r1 1
+#endif
+#ifndef uECC_SUPPORTS_secp256k1
+ #define uECC_SUPPORTS_secp256k1 1
+#endif
+
struct uECC_Curve_t;
typedef const struct uECC_Curve_t * uECC_Curve;
@@ -30,11 +53,21 @@
{
#endif
+#if uECC_SUPPORTS_secp160r1
uECC_Curve uECC_secp160r1(void);
+#endif
+#if uECC_SUPPORTS_secp192r1
uECC_Curve uECC_secp192r1(void);
+#endif
+#if uECC_SUPPORTS_secp224r1
uECC_Curve uECC_secp224r1(void);
+#endif
+#if uECC_SUPPORTS_secp256r1
uECC_Curve uECC_secp256r1(void);
+#endif
+#if uECC_SUPPORTS_secp256k1
uECC_Curve uECC_secp256k1(void);
+#endif
/* uECC_RNG_Function type
The RNG function should fill 'size' random bytes into 'dest'. It should return 1 if
@@ -261,4 +294,4 @@
} /* end of extern "C" */
#endif
-#endif /* _MICRO_ECC_H_ */
+#endif /* _UECC_H_ */