memset.S 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /*
  2. * linux/arch/arm/lib/memset.S
  3. *
  4. * Copyright (C) 1995-2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * ASM optimised string functions
  11. */
  12. #include <linux/linkage.h>
  13. #include <asm/assembler.h>
  14. .text
  15. .align 5
  16. .syntax unified
  17. #if defined(CONFIG_SYS_THUMB_BUILD) && !defined(MEMSET_NO_THUMB_BUILD)
  18. .thumb
  19. .thumb_func
  20. #endif
  21. ENTRY(memset)
  22. ands r3, r0, #3 @ 1 unaligned?
  23. mov ip, r0 @ preserve r0 as return value
  24. bne 6f @ 1
  25. /*
  26. * we know that the pointer in ip is aligned to a word boundary.
  27. */
  28. 1: orr r1, r1, r1, lsl #8
  29. orr r1, r1, r1, lsl #16
  30. mov r3, r1
  31. cmp r2, #16
  32. blt 4f
  33. #if ! CALGN(1)+0
  34. /*
  35. * We need 2 extra registers for this loop - use r8 and the LR
  36. */
  37. stmfd sp!, {r8, lr}
  38. mov r8, r1
  39. mov lr, r1
  40. 2: subs r2, r2, #64
  41. stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
  42. stmiage ip!, {r1, r3, r8, lr}
  43. stmiage ip!, {r1, r3, r8, lr}
  44. stmiage ip!, {r1, r3, r8, lr}
  45. bgt 2b
  46. ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
  47. /*
  48. * No need to correct the count; we're only testing bits from now on
  49. */
  50. tst r2, #32
  51. stmiane ip!, {r1, r3, r8, lr}
  52. stmiane ip!, {r1, r3, r8, lr}
  53. tst r2, #16
  54. stmiane ip!, {r1, r3, r8, lr}
  55. ldmfd sp!, {r8, lr}
  56. #else
  57. /*
  58. * This version aligns the destination pointer in order to write
  59. * whole cache lines at once.
  60. */
  61. stmfd sp!, {r4-r8, lr}
  62. mov r4, r1
  63. mov r5, r1
  64. mov r6, r1
  65. mov r7, r1
  66. mov r8, r1
  67. mov lr, r1
  68. cmp r2, #96
  69. tstgt ip, #31
  70. ble 3f
  71. and r8, ip, #31
  72. rsb r8, r8, #32
  73. sub r2, r2, r8
  74. movs r8, r8, lsl #(32 - 4)
  75. stmiacs ip!, {r4, r5, r6, r7}
  76. stmiami ip!, {r4, r5}
  77. tst r8, #(1 << 30)
  78. mov r8, r1
  79. strne r1, [ip], #4
  80. 3: subs r2, r2, #64
  81. stmiage ip!, {r1, r3-r8, lr}
  82. stmiage ip!, {r1, r3-r8, lr}
  83. bgt 3b
  84. ldmfdeq sp!, {r4-r8, pc}
  85. tst r2, #32
  86. stmiane ip!, {r1, r3-r8, lr}
  87. tst r2, #16
  88. stmiane ip!, {r4-r7}
  89. ldmfd sp!, {r4-r8, lr}
  90. #endif
  91. 4: tst r2, #8
  92. stmiane ip!, {r1, r3}
  93. tst r2, #4
  94. strne r1, [ip], #4
  95. /*
  96. * When we get here, we've got less than 4 bytes to zero. We
  97. * may have an unaligned pointer as well.
  98. */
  99. 5: tst r2, #2
  100. strbne r1, [ip], #1
  101. strbne r1, [ip], #1
  102. tst r2, #1
  103. strbne r1, [ip], #1
  104. ret lr
  105. 6: subs r2, r2, #4 @ 1 do we have enough
  106. blt 5b @ 1 bytes to align with?
  107. cmp r3, #2 @ 1
  108. strblt r1, [ip], #1 @ 1
  109. strble r1, [ip], #1 @ 1
  110. strb r1, [ip], #1 @ 1
  111. add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
  112. b 1b
  113. ENDPROC(memset)