3 * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
6 #include "arm-codegen.h"
9 arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size) {
10 ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
13 ARM_PUSH(p, (1 << ARMREG_A1)
18 ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR));
20 if (local_size != 0) {
21 if ((local_size & (~0xFF)) == 0) {
22 ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
25 p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
26 ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
27 ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
28 ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
35 arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs) {
36 if (local_size != 0) {
37 if ((local_size & (~0xFF)) == 0) {
38 ARM_ADD_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
41 p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
42 ARM_ADD_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
46 ARM_POP_NWB(p, (1 << ARMREG_SP) | (1 << ARMREG_PC) | (pop_regs & 0x3FF));
52 /* do not push A1-A4 */
53 arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs) {
54 ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
55 /* push_regs upto R10 will be saved */
56 ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR) | (push_regs & 0x3FF));
58 if (local_size != 0) {
59 if ((local_size & (~0xFF)) == 0) {
60 ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
63 p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
64 ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
65 /* restore IP from stack */
66 ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
67 ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
74 /* Bit scan forward. */
75 int arm_bsf(armword_t val) {
79 if (val == 0) return 0;
80 for (i=1, mask=1; (i <= 8 * sizeof(armword_t)) && ((val & mask) == 0); ++i, mask<<=1);
86 int arm_is_power_of_2(armword_t val) {
87 return ((val & (val-1)) == 0);
93 * 1 - unable to represent
94 * positive even number - MOV-representable
95 * negative even number - MVN-representable
97 int calc_arm_mov_const_shift(armword_t val) {
101 for (shift=0; shift < 32; shift+=2) {
102 mask = ARM_SCALE(0xFF, shift);
103 if ((val & (~mask)) == 0) {
107 if (((~val) & (~mask)) == 0) {
117 int is_arm_const(armword_t val) {
119 res = arm_is_power_of_2(val);
121 res = calc_arm_mov_const_shift(val);
122 res = !(res < 0 || res == 1);
128 int arm_const_steps(armword_t val) {
129 int shift, steps = 0;
132 shift = (arm_bsf(val) - 1) & (~1);
133 val &= ~(0xFF << shift);
141 * ARM cannot load arbitrary 32-bit constants directly into registers;
142 * widely used work-around for this is to store constants into a
143 * PC-addressable pool and use LDR instruction with PC-relative address
144 * to load constant into register. Easiest way to implement this is to
145 * embed constant inside a function with unconditional branch around it.
146 * The above method is not used at the moment.
147 * This routine always emits sequence of instructions to generate
148 * requested constant. In the worst case it takes 4 instructions to
149 * synthesize a constant - 1 MOV and 3 subsequent ORRs.
151 arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond) {
155 int shift = calc_arm_mov_const_shift(imm32);
157 if ((shift & 0x80000001) != 1) {
159 ARM_MOV_REG_IMM_COND(p, reg, imm32 >> ((32 - shift) & 31), shift, cond);
161 ARM_MVN_REG_IMM_COND(p, reg, (imm32 ^ (~0)) >> ((32 + 2 + shift) & 31), (-shift - 2), cond);
167 if (arm_const_steps(imm32) > arm_const_steps(~imm32)) {
173 shift = (arm_bsf(imm32) - 1) & (~1);
174 snip = imm32 & (0xFF << shift);
175 ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, 0, 0, mov_op, cond));
177 while ((imm32 ^= snip) != 0) {
178 shift = (arm_bsf(imm32) - 1) & (~1);
179 snip = imm32 & (0xFF << shift);
180 ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, reg, 0, step_op, cond));
188 arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32) {
189 return arm_mov_reg_imm32_cond(p, reg, imm32, ARMCOND_AL);