1%def unused():
2    bkpt
3
4%def op_const():
5    /* const vAA, #+BBBBbbbb */
6    mov     r3, rINST, lsr #8           @ r3<- AA
7    FETCH r0, 1                         @ r0<- bbbb (low)
8    FETCH r1, 2                         @ r1<- BBBB (high)
9    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
10    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb
11    GET_INST_OPCODE ip                  @ extract opcode from rINST
12    SET_VREG r0, r3                     @ vAA<- r0
13    GOTO_OPCODE ip                      @ jump to next instruction
14
15%def op_const_16():
16    /* const/16 vAA, #+BBBB */
17    FETCH_S r0, 1                       @ r0<- ssssBBBB (sign-extended)
18    mov     r3, rINST, lsr #8           @ r3<- AA
19    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
20    SET_VREG r0, r3                     @ vAA<- r0
21    GET_INST_OPCODE ip                  @ extract opcode from rINST
22    GOTO_OPCODE ip                      @ jump to next instruction
23
24%def op_const_4():
25    /* const/4 vA, #+B */
26    sbfx    r1, rINST, #12, #4          @ r1<- sssssssB (sign-extended)
27    ubfx    r0, rINST, #8, #4           @ r0<- A
28    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
29    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
30    SET_VREG r1, r0                     @ fp[A]<- r1
31    GOTO_OPCODE ip                      @ execute next instruction
32
33%def op_const_high16():
34    /* const/high16 vAA, #+BBBB0000 */
35    FETCH r0, 1                         @ r0<- 0000BBBB (zero-extended)
36    mov     r3, rINST, lsr #8           @ r3<- AA
37    mov     r0, r0, lsl #16             @ r0<- BBBB0000
38    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
39    SET_VREG r0, r3                     @ vAA<- r0
40    GET_INST_OPCODE ip                  @ extract opcode from rINST
41    GOTO_OPCODE ip                      @ jump to next instruction
42
43%def op_const_object(jumbo="0", helper="nterp_load_object"):
44   // Fast-path which gets the object from thread-local cache.
45%  fetch_from_thread_cache("r0", miss_label="2f")
46   cmp rMR, #0
47   bne 3f
481:
49   mov     r1, rINST, lsr #8           @ r1<- AA
50   .if $jumbo
51   FETCH_ADVANCE_INST 3                // advance rPC, load rINST
52   .else
53   FETCH_ADVANCE_INST 2                // advance rPC, load rINST
54   .endif
55   GET_INST_OPCODE ip                  // extract opcode from rINST
56   SET_VREG_OBJECT r0, r1              // vAA <- value
57   GOTO_OPCODE ip                      // jump to next instruction
582:
59   EXPORT_PC
60   mov r0, rSELF
61   ldr r1, [sp]
62   mov r2, rPC
63   bl $helper
64   b 1b
653:
66   bl art_quick_read_barrier_mark_reg00
67   b 1b
68
69%def op_const_class():
70%  op_const_object(jumbo="0", helper="nterp_get_class")
71
72%def op_const_method_handle():
73%  op_const_object(jumbo="0")
74
75%def op_const_method_type():
76%  op_const_object(jumbo="0")
77
78%def op_const_string():
79   /* const/string vAA, String@BBBB */
80%  op_const_object(jumbo="0")
81
82%def op_const_string_jumbo():
83   /* const/string vAA, String@BBBBBBBB */
84%  op_const_object(jumbo="1")
85
86%def op_const_wide():
87    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
88    FETCH r0, 1                         @ r0<- bbbb (low)
89    FETCH r1, 2                         @ r1<- BBBB (low middle)
90    FETCH r2, 3                         @ r2<- hhhh (high middle)
91    orr     r0, r0, r1, lsl #16         @ r0<- BBBBbbbb (low word)
92    FETCH r3, 4                         @ r3<- HHHH (high)
93    mov     r4, rINST, lsr #8           @ r4<- AA
94    orr     r1, r2, r3, lsl #16         @ r1<- HHHHhhhh (high word)
95    CLEAR_SHADOW_PAIR r4, r2, r3        @ Zero out the shadow regs
96    FETCH_ADVANCE_INST 5                @ advance rPC, load rINST
97    VREG_INDEX_TO_ADDR r4, r4           @ r4<- &fp[AA]
98    GET_INST_OPCODE ip                  @ extract opcode from rINST
99    SET_VREG_WIDE_BY_ADDR r0, r1, r4    @ vAA<- r0/r1
100    GOTO_OPCODE ip                      @ jump to next instruction
101
102%def op_const_wide_16():
103    /* const-wide/16 vAA, #+BBBB */
104    FETCH_S r0, 1                       @ r0<- ssssBBBB (sign-extended)
105    mov     r3, rINST, lsr #8           @ r3<- AA
106    mov     r1, r0, asr #31             @ r1<- ssssssss
107    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
108    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
109    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
110    GET_INST_OPCODE ip                  @ extract opcode from rINST
111    SET_VREG_WIDE_BY_ADDR r0, r1, r3    @ vAA<- r0/r1
112    GOTO_OPCODE ip                      @ jump to next instruction
113
114%def op_const_wide_32():
115    /* const-wide/32 vAA, #+BBBBbbbb */
116    FETCH r0, 1                         @ r0<- 0000bbbb (low)
117    mov     r3, rINST, lsr #8           @ r3<- AA
118    FETCH_S r2, 2                       @ r2<- ssssBBBB (high)
119    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
120    orr     r0, r0, r2, lsl #16         @ r0<- BBBBbbbb
121    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
122    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
123    mov     r1, r0, asr #31             @ r1<- ssssssss
124    GET_INST_OPCODE ip                  @ extract opcode from rINST
125    SET_VREG_WIDE_BY_ADDR r0, r1, r3    @ vAA<- r0/r1
126    GOTO_OPCODE ip                      @ jump to next instruction
127
128%def op_const_wide_high16():
129    /* const-wide/high16 vAA, #+BBBB000000000000 */
130    FETCH r1, 1                         @ r1<- 0000BBBB (zero-extended)
131    mov     r3, rINST, lsr #8           @ r3<- AA
132    mov     r0, #0                      @ r0<- 00000000
133    mov     r1, r1, lsl #16             @ r1<- BBBB0000
134    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
135    CLEAR_SHADOW_PAIR r3, r0, r2        @ Zero shadow regs
136    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[AA]
137    GET_INST_OPCODE ip                  @ extract opcode from rINST
138    SET_VREG_WIDE_BY_ADDR r0, r1, r3    @ vAA<- r0/r1
139    GOTO_OPCODE ip                      @ jump to next instruction
140
141%def op_monitor_enter():
142    /*
143     * Synchronize on an object.
144     */
145    /* monitor-enter vAA */
146    EXPORT_PC
147    mov      r2, rINST, lsr #8           @ r2<- AA
148    GET_VREG r0, r2                      @ r0<- vAA (object)
149    bl       art_quick_lock_object
150    FETCH_ADVANCE_INST 1
151    GET_INST_OPCODE ip                   @ extract opcode from rINST
152    GOTO_OPCODE ip                       @ jump to next instruction
153
154%def op_monitor_exit():
155    /*
156     * Unlock an object.
157     *
158     * Exceptions that occur when unlocking a monitor need to appear as
159     * if they happened at the following instruction.  See the Dalvik
160     * instruction spec.
161     */
162    /* monitor-exit vAA */
163    EXPORT_PC
164    mov      r2, rINST, lsr #8          @ r2<- AA
165    GET_VREG r0, r2                     @ r0<- vAA (object)
166    bl       art_quick_unlock_object
167    FETCH_ADVANCE_INST 1                @ before throw: advance rPC, load rINST
168    GET_INST_OPCODE ip                  @ extract opcode from rINST
169    GOTO_OPCODE ip                      @ jump to next instruction
170
171%def op_move(is_object="0"):
172    /* for move, move-object, long-to-int */
173    /* op vA, vB */
174    mov     r1, rINST, lsr #12          @ r1<- B from 15:12
175    ubfx    r0, rINST, #8, #4           @ r0<- A from 11:8
176    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
177    GET_VREG r2, r1                     @ r2<- fp[B]
178    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
179    .if $is_object
180    SET_VREG_OBJECT r2, r0              @ fp[A]<- r2
181    .else
182    SET_VREG r2, r0                     @ fp[A]<- r2
183    .endif
184    GOTO_OPCODE ip                      @ execute next instruction
185
186%def op_move_16(is_object="0"):
187    /* for: move/16, move-object/16 */
188    /* op vAAAA, vBBBB */
189    FETCH r1, 2                         @ r1<- BBBB
190    FETCH r0, 1                         @ r0<- AAAA
191    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
192    GET_VREG r2, r1                     @ r2<- fp[BBBB]
193    GET_INST_OPCODE ip                  @ extract opcode from rINST
194    .if $is_object
195    SET_VREG_OBJECT r2, r0              @ fp[AAAA]<- r2
196    .else
197    SET_VREG r2, r0                     @ fp[AAAA]<- r2
198    .endif
199    GOTO_OPCODE ip                      @ jump to next instruction
200
201%def op_move_exception():
202    /* move-exception vAA */
203    mov     r2, rINST, lsr #8           @ r2<- AA
204    ldr     r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
205    mov     r1, #0                      @ r1<- 0
206    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
207    SET_VREG_OBJECT r3, r2              @ fp[AA]<- exception obj
208    GET_INST_OPCODE ip                  @ extract opcode from rINST
209    str     r1, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ clear exception
210    GOTO_OPCODE ip                      @ jump to next instruction
211
212%def op_move_from16(is_object="0"):
213    /* for: move/from16, move-object/from16 */
214    /* op vAA, vBBBB */
215    FETCH r1, 1                         @ r1<- BBBB
216    mov     r0, rINST, lsr #8           @ r0<- AA
217    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
218    GET_VREG r2, r1                     @ r2<- fp[BBBB]
219    GET_INST_OPCODE ip                  @ extract opcode from rINST
220    .if $is_object
221    SET_VREG_OBJECT r2, r0              @ fp[AA]<- r2
222    .else
223    SET_VREG r2, r0                     @ fp[AA]<- r2
224    .endif
225    GOTO_OPCODE ip                      @ jump to next instruction
226
227%def op_move_object():
228%  op_move(is_object="1")
229
230%def op_move_object_16():
231%  op_move_16(is_object="1")
232
233%def op_move_object_from16():
234%  op_move_from16(is_object="1")
235
236%def op_move_result(is_object="0"):
237    /* for: move-result, move-result-object */
238    /* op vAA */
239    mov     r2, rINST, lsr #8           @ r2<- AA
240    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
241    GET_INST_OPCODE ip                  @ extract opcode from rINST
242    .if $is_object
243    SET_VREG_OBJECT r0, r2              @ fp[AA]<- r0
244    .else
245    SET_VREG r0, r2                     @ fp[AA]<- r0
246    .endif
247    GOTO_OPCODE ip                      @ jump to next instruction
248
249%def op_move_result_object():
250%  op_move_result(is_object="1")
251
252%def op_move_result_wide():
253    /* move-result-wide vAA */
254    mov     rINST, rINST, lsr #8        @ rINST<- AA
255    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[AA]
256    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
257    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
258    SET_VREG_WIDE_BY_ADDR r0, r1, r2    @ fp[AA]<- r0/r1
259    GET_INST_OPCODE ip                  @ extract opcode from rINST
260    GOTO_OPCODE ip                      @ jump to next instruction
261
262%def op_move_wide():
263    /* move-wide vA, vB */
264    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
265    mov     r3, rINST, lsr #12          @ r3<- B
266    ubfx    rINST, rINST, #8, #4        @ rINST<- A
267    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[B]
268    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[A]
269    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- fp[B]
270    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
271    FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
272    GET_INST_OPCODE ip                  @ extract opcode from rINST
273    SET_VREG_WIDE_BY_ADDR r0, r1, r2    @ fp[A]<- r0/r1
274    GOTO_OPCODE ip                      @ jump to next instruction
275
276%def op_move_wide_16():
277    /* move-wide/16 vAAAA, vBBBB */
278    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
279    FETCH r3, 2                         @ r3<- BBBB
280    FETCH r2, 1                         @ r2<- AAAA
281    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BBBB]
282    VREG_INDEX_TO_ADDR lr, r2           @ r2<- &fp[AAAA]
283    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- fp[BBBB]
284    FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
285    CLEAR_SHADOW_PAIR r2, r3, ip        @ Zero out the shadow regs
286    SET_VREG_WIDE_BY_ADDR r0, r1, lr    @ fp[AAAA]<- r0/r1
287    GET_INST_OPCODE ip                  @ extract opcode from rINST
288    GOTO_OPCODE ip                      @ jump to next instruction
289
290%def op_move_wide_from16():
291    /* move-wide/from16 vAA, vBBBB */
292    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
293    FETCH r3, 1                         @ r3<- BBBB
294    mov     rINST, rINST, lsr #8        @ rINST<- AA
295    VREG_INDEX_TO_ADDR r3, r3           @ r3<- &fp[BBBB]
296    VREG_INDEX_TO_ADDR r2, rINST        @ r2<- &fp[AA]
297    GET_VREG_WIDE_BY_ADDR r0, r1, r3    @ r0/r1<- fp[BBBB]
298    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
299    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
300    GET_INST_OPCODE ip                  @ extract opcode from rINST
301    SET_VREG_WIDE_BY_ADDR r0, r1, r2    @ fp[AA]<- r0/r1
302    GOTO_OPCODE ip                      @ jump to next instruction
303
304%def op_nop():
305    FETCH_ADVANCE_INST 1                @ advance to next instr, load rINST
306    GET_INST_OPCODE ip                  @ ip<- opcode from rINST
307    GOTO_OPCODE ip                      @ execute it
308
309%def op_unused_3e():
310%  unused()
311
312%def op_unused_3f():
313%  unused()
314
315%def op_unused_40():
316%  unused()
317
318%def op_unused_41():
319%  unused()
320
321%def op_unused_42():
322%  unused()
323
324%def op_unused_43():
325%  unused()
326
327%def op_unused_73():
328%  unused()
329
330%def op_unused_79():
331%  unused()
332
333%def op_unused_7a():
334%  unused()
335
336%def op_unused_e3():
337%  unused()
338
339%def op_unused_e4():
340%  unused()
341
342%def op_unused_e5():
343%  unused()
344
345%def op_unused_e6():
346%  unused()
347
348%def op_unused_e7():
349%  unused()
350
351%def op_unused_e8():
352%  unused()
353
354%def op_unused_e9():
355%  unused()
356
357%def op_unused_ea():
358%  unused()
359
360%def op_unused_eb():
361%  unused()
362
363%def op_unused_ec():
364%  unused()
365
366%def op_unused_ed():
367%  unused()
368
369%def op_unused_ee():
370%  unused()
371
372%def op_unused_ef():
373%  unused()
374
375%def op_unused_f0():
376%  unused()
377
378%def op_unused_f1():
379%  unused()
380
381%def op_unused_f2():
382%  unused()
383
384%def op_unused_f3():
385%  unused()
386
387%def op_unused_f4():
388%  unused()
389
390%def op_unused_f5():
391%  unused()
392
393%def op_unused_f6():
394%  unused()
395
396%def op_unused_f7():
397%  unused()
398
399%def op_unused_f8():
400%  unused()
401
402%def op_unused_f9():
403%  unused()
404
405%def op_unused_fc():
406%  unused()
407
408%def op_unused_fd():
409%  unused()
410