View on GitHub

Vistle

Distributed Data-parallel Scientific Visualization in VR

valgrind.h
Go to the documentation of this file.
1/* -*- c -*-
2 ----------------------------------------------------------------
3
4 Notice that the following BSD-style license applies to this one
5 file (valgrind.h) only. The rest of Valgrind is licensed under the
6 terms of the GNU General Public License, version 2, unless
7 otherwise indicated. See the COPYING file in the source
8 distribution for details.
9
10 ----------------------------------------------------------------
11
12 This file is part of Valgrind, a dynamic binary instrumentation
13 framework.
14
15 Copyright (C) 2000-2012 Julian Seward. All rights reserved.
16
17 Redistribution and use in source and binary forms, with or without
18 modification, are permitted provided that the following conditions
19 are met:
20
21 1. Redistributions of source code must retain the above copyright
22 notice, this list of conditions and the following disclaimer.
23
24 2. The origin of this software must not be misrepresented; you must
25 not claim that you wrote the original software. If you use this
26 software in a product, an acknowledgment in the product
27 documentation would be appreciated but is not required.
28
29 3. Altered source versions must be plainly marked as such, and must
30 not be misrepresented as being the original software.
31
32 4. The name of the author may not be used to endorse or promote
33 products derived from this software without specific prior written
34 permission.
35
36 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
37 OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
38 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
40 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
42 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
43 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
44 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
45 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
46 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47
48 ----------------------------------------------------------------
49
50 Notice that the above BSD-style license applies to this one file
51 (valgrind.h) only. The entire rest of Valgrind is licensed under
52 the terms of the GNU General Public License, version 2. See the
53 COPYING file in the source distribution for details.
54
55 ----------------------------------------------------------------
56*/
57
58
59/* This file is for inclusion into client (your!) code.
60
61 You can use these macros to manipulate and query Valgrind's
62 execution inside your own programs.
63
64 The resulting executables will still run without Valgrind, just a
65 little bit more slowly than they otherwise would, but otherwise
66 unchanged. When not running on valgrind, each client request
67 consumes very few (eg. 7) instructions, so the resulting performance
68 loss is negligible unless you plan to execute client requests
69 millions of times per second. Nevertheless, if that is still a
70 problem, you can compile with the NVALGRIND symbol defined (gcc
71 -DNVALGRIND) so that client requests are not even compiled in. */
72
73#ifndef __VALGRIND_H
74#define __VALGRIND_H
75
76
77/* ------------------------------------------------------------------ */
78/* VERSION NUMBER OF VALGRIND */
79/* ------------------------------------------------------------------ */
80
81/* Specify Valgrind's version number, so that user code can
82 conditionally compile based on our version number. Note that these
83 were introduced at version 3.6 and so do not exist in version 3.5
84 or earlier. The recommended way to use them to check for "version
85 X.Y or later" is (eg)
86
87#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
88 && (__VALGRIND_MAJOR__ > 3 \
89 || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
90*/
91#define __VALGRIND_MAJOR__ 3
92#define __VALGRIND_MINOR__ 8
93
94
95#include <stdarg.h>
96
97/* Nb: this file might be included in a file compiled with -ansi. So
98 we can't use C++ style "//" comments nor the "asm" keyword (instead
99 use "__asm__"). */
100
101/* Derive some tags indicating what the target platform is. Note
102 that in this file we're using the compiler's CPP symbols for
103 identifying architectures, which are different to the ones we use
104 within the rest of Valgrind. Note, __powerpc__ is active for both
105 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
106 latter (on Linux, that is).
107
108 Misc note: how to find out what's predefined in gcc by default:
109 gcc -Wp,-dM somefile.c
110*/
111#undef PLAT_x86_darwin
112#undef PLAT_amd64_darwin
113#undef PLAT_x86_win32
114#undef PLAT_x86_linux
115#undef PLAT_amd64_linux
116#undef PLAT_ppc32_linux
117#undef PLAT_ppc64_linux
118#undef PLAT_arm_linux
119#undef PLAT_s390x_linux
120#undef PLAT_mips32_linux
121
122
123#if defined(__APPLE__) && defined(__i386__)
124#define PLAT_x86_darwin 1
125#elif defined(__APPLE__) && defined(__x86_64__)
126#define PLAT_amd64_darwin 1
127#elif defined(__MINGW32__) || defined(__CYGWIN32__) || (defined(_WIN32) && defined(_M_IX86))
128#define PLAT_x86_win32 1
129#elif defined(__linux__) && defined(__i386__)
130#define PLAT_x86_linux 1
131#elif defined(__linux__) && defined(__x86_64__)
132#define PLAT_amd64_linux 1
133#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
134#define PLAT_ppc32_linux 1
135#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
136#define PLAT_ppc64_linux 1
137#elif defined(__linux__) && defined(__arm__)
138#define PLAT_arm_linux 1
139#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
140#define PLAT_s390x_linux 1
141#elif defined(__linux__) && defined(__mips__)
142#define PLAT_mips32_linux 1
143#else
144/* If we're not compiling for our target platform, don't generate
145 any inline asms. */
146#if !defined(NVALGRIND)
147#define NVALGRIND 1
148#endif
149#endif
150
151
152/* ------------------------------------------------------------------ */
153/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
154/* in here of use to end-users -- skip to the next section. */
155/* ------------------------------------------------------------------ */
156
157/*
158 * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
159 * request. Accepts both pointers and integers as arguments.
160 *
161 * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind
162 * client request that does not return a value.
163
164 * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
165 * client request and whose value equals the client request result. Accepts
166 * both pointers and integers as arguments. Note that such calls are not
167 * necessarily pure functions -- they may have side effects.
168 */
169
170#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
171 _zzq_arg5) \
172 do { \
173 (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
174 (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); \
175 } while (0)
176
177#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
178 do { \
179 (void)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, (_zzq_request), (_zzq_arg1), (_zzq_arg2), (_zzq_arg3), (_zzq_arg4), \
180 (_zzq_arg5)); \
181 } while (0)
182
183#if defined(NVALGRIND)
184
185/* Define NVALGRIND to completely remove the Valgrind magic sequence
186 from the compiled code (analogous to NDEBUG's effects on
187 assert()) */
188#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
189 _zzq_arg5) \
190 (_zzq_default)
191
192#else /* ! NVALGRIND */
193
194/* The following defines the magic code sequences which the JITter
195 spots and handles magically. Don't look too closely at them as
196 they will rot your brain.
197
198 The assembly code sequences for all architectures is in this one
199 file. This is because this file must be stand-alone, and we don't
200 want to have multiple files.
201
202 For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
203 value gets put in the return slot, so that everything works when
204 this is executed not under Valgrind. Args are passed in a memory
205 block, and so there's no intrinsic limit to the number that could
206 be passed, but it's currently five.
207
208 The macro args are:
209 _zzq_rlval result lvalue
210 _zzq_default default value (result returned when running on real CPU)
211 _zzq_request request code
212 _zzq_arg1..5 request params
213
214 The other two macros are used to support function wrapping, and are
215 a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
216 guest's NRADDR pseudo-register and whatever other information is
217 needed to safely run the call original from the wrapper: on
218 ppc64-linux, the R2 value at the divert point is also needed. This
219 information is abstracted into a user-visible type, OrigFn.
220
221 VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
222 guest, but guarantees that the branch instruction will not be
223 redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
224 branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
225 complete inline asm, since it needs to be combined with more magic
226 inline asm stuff to be useful.
227*/
228
229/* ------------------------- x86-{linux,darwin} ---------------- */
230
231#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) || (defined(PLAT_x86_win32) && defined(__GNUC__))
232
233typedef struct {
234 unsigned int nraddr; /* where's the code? */
235} OrigFn;
236
237#define __SPECIAL_INSTRUCTION_PREAMBLE \
238 "roll $3, %%edi ; roll $13, %%edi\n\t" \
239 "roll $29, %%edi ; roll $19, %%edi\n\t"
240
241#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
242 _zzq_arg5) \
243 __extension__({ \
244 volatile unsigned int _zzq_args[6]; \
245 volatile unsigned int _zzq_result; \
246 _zzq_args[0] = (unsigned int)(_zzq_request); \
247 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
248 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
249 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
250 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
251 _zzq_args[5] = (unsigned int)(_zzq_arg5); \
252 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE /* %EDX = client_request ( %EAX ) */ \
253 "xchgl %%ebx,%%ebx" \
254 : "=d"(_zzq_result) \
255 : "a"(&_zzq_args[0]), "0"(_zzq_default) \
256 : "cc", "memory"); \
257 _zzq_result; \
258 })
259
260#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
261 { \
262 volatile OrigFn *_zzq_orig = &(_zzq_rlval); \
263 volatile unsigned int __addr; \
264 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE /* %EAX = guest_NRADDR */ \
265 "xchgl %%ecx,%%ecx" \
266 : "=a"(__addr) \
267 : \
268 : "cc", "memory"); \
269 _zzq_orig->nraddr = __addr; \
270 }
271
272#define VALGRIND_CALL_NOREDIR_EAX \
273 __SPECIAL_INSTRUCTION_PREAMBLE \
274 /* call-noredir *%EAX */ \
275 "xchgl %%edx,%%edx\n\t"
276
277#define VALGRIND_VEX_INJECT_IR() \
278 do { \
279 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE "xchgl %%edi,%%edi\n\t" : : : "cc", "memory"); \
280 } while (0)
281
282#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
283
284/* ------------------------- x86-Win32 ------------------------- */
285
286#if defined(PLAT_x86_win32) && !defined(__GNUC__)
287
288typedef struct {
289 unsigned int nraddr; /* where's the code? */
290} OrigFn;
291
292#if defined(_MSC_VER)
293
294#define __SPECIAL_INSTRUCTION_PREAMBLE __asm rol edi, 3 __asm rol edi, 13 __asm rol edi, 29 __asm rol edi, 19
295
296#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
297 _zzq_arg5) \
298 valgrind_do_client_request_expr((uintptr_t)(_zzq_default), (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
299 (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), (uintptr_t)(_zzq_arg4), \
300 (uintptr_t)(_zzq_arg5))
301
302static __inline uintptr_t valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
303 uintptr_t _zzq_arg1, uintptr_t _zzq_arg2, uintptr_t _zzq_arg3,
304 uintptr_t _zzq_arg4, uintptr_t _zzq_arg5)
305{
306 volatile uintptr_t _zzq_args[6];
307 volatile unsigned int _zzq_result;
308 _zzq_args[0] = (uintptr_t)(_zzq_request);
309 _zzq_args[1] = (uintptr_t)(_zzq_arg1);
310 _zzq_args[2] = (uintptr_t)(_zzq_arg2);
311 _zzq_args[3] = (uintptr_t)(_zzq_arg3);
312 _zzq_args[4] = (uintptr_t)(_zzq_arg4);
313 _zzq_args[5] = (uintptr_t)(_zzq_arg5);
314 __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
315 __SPECIAL_INSTRUCTION_PREAMBLE
316 /* %EDX = client_request ( %EAX ) */
317 __asm xchg ebx,ebx
318 __asm mov _zzq_result, edx
319 }
320 return _zzq_result;
321}
322
323#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
324 { \
325 volatile OrigFn *_zzq_orig = &(_zzq_rlval); \
326 volatile unsigned int __addr; \
327 __asm { __SPECIAL_INSTRUCTION_PREAMBLE /* %EAX = guest_NRADDR */ \
328 __asm xchg ecx,ecx \
329 __asm mov __addr, eax} \
330 _zzq_orig->nraddr = __addr; \
331 }
332
333#define VALGRIND_CALL_NOREDIR_EAX ERROR
334
335#define VALGRIND_VEX_INJECT_IR() \
336 do { \
337 __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
338 __asm xchg edi,edi} \
339 } while (0)
340
341#else
342#error Unsupported compiler.
343#endif
344
345#endif /* PLAT_x86_win32 */
346
347/* ------------------------ amd64-{linux,darwin} --------------- */
348
349#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
350
351typedef struct {
352 unsigned long long int nraddr; /* where's the code? */
353} OrigFn;
354
355#define __SPECIAL_INSTRUCTION_PREAMBLE \
356 "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
357 "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
358
359#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
360 _zzq_arg5) \
361 __extension__({ \
362 volatile unsigned long long int _zzq_args[6]; \
363 volatile unsigned long long int _zzq_result; \
364 _zzq_args[0] = (unsigned long long int)(_zzq_request); \
365 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
366 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
367 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
368 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
369 _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
370 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE /* %RDX = client_request ( %RAX ) */ \
371 "xchgq %%rbx,%%rbx" \
372 : "=d"(_zzq_result) \
373 : "a"(&_zzq_args[0]), "0"(_zzq_default) \
374 : "cc", "memory"); \
375 _zzq_result; \
376 })
377
378#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
379 { \
380 volatile OrigFn *_zzq_orig = &(_zzq_rlval); \
381 volatile unsigned long long int __addr; \
382 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE /* %RAX = guest_NRADDR */ \
383 "xchgq %%rcx,%%rcx" \
384 : "=a"(__addr) \
385 : \
386 : "cc", "memory"); \
387 _zzq_orig->nraddr = __addr; \
388 }
389
390#define VALGRIND_CALL_NOREDIR_RAX \
391 __SPECIAL_INSTRUCTION_PREAMBLE \
392 /* call-noredir *%RAX */ \
393 "xchgq %%rdx,%%rdx\n\t"
394
395#define VALGRIND_VEX_INJECT_IR() \
396 do { \
397 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE "xchgq %%rdi,%%rdi\n\t" : : : "cc", "memory"); \
398 } while (0)
399
400#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
401
402/* ------------------------ ppc32-linux ------------------------ */
403
404#if defined(PLAT_ppc32_linux)
405
406typedef struct {
407 unsigned int nraddr; /* where's the code? */
408} OrigFn;
409
410#define __SPECIAL_INSTRUCTION_PREAMBLE \
411 "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
412 "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
413
414#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
415 _zzq_arg5) \
416\
417 __extension__({ \
418 unsigned int _zzq_args[6]; \
419 unsigned int _zzq_result; \
420 unsigned int *_zzq_ptr; \
421 _zzq_args[0] = (unsigned int)(_zzq_request); \
422 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
423 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
424 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
425 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
426 _zzq_args[5] = (unsigned int)(_zzq_arg5); \
427 _zzq_ptr = _zzq_args; \
428 __asm__ volatile("mr 3,%1\n\t" /*default*/ \
429 "mr 4,%2\n\t" /*ptr*/ \
430 __SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = client_request ( %R4 ) */ \
431 "or 1,1,1\n\t" \
432 "mr %0,3" /*result*/ \
433 : "=b"(_zzq_result) \
434 : "b"(_zzq_default), "b"(_zzq_ptr) \
435 : "cc", "memory", "r3", "r4"); \
436 _zzq_result; \
437 })
438
439#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
440 { \
441 volatile OrigFn *_zzq_orig = &(_zzq_rlval); \
442 unsigned int __addr; \
443 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = guest_NRADDR */ \
444 "or 2,2,2\n\t" \
445 "mr %0,3" \
446 : "=b"(__addr) \
447 : \
448 : "cc", "memory", "r3"); \
449 _zzq_orig->nraddr = __addr; \
450 }
451
452#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
453 __SPECIAL_INSTRUCTION_PREAMBLE \
454 /* branch-and-link-to-noredir *%R11 */ \
455 "or 3,3,3\n\t"
456
457#define VALGRIND_VEX_INJECT_IR() \
458 do { \
459 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE "or 5,5,5\n\t"); \
460 } while (0)
461
462#endif /* PLAT_ppc32_linux */
463
464/* ------------------------ ppc64-linux ------------------------ */
465
466#if defined(PLAT_ppc64_linux)
467
468typedef struct {
469 unsigned long long int nraddr; /* where's the code? */
470 unsigned long long int r2; /* what tocptr do we need? */
471} OrigFn;
472
473#define __SPECIAL_INSTRUCTION_PREAMBLE \
474 "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
475 "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
476
477#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
478 _zzq_arg5) \
479\
480 __extension__({ \
481 unsigned long long int _zzq_args[6]; \
482 unsigned long long int _zzq_result; \
483 unsigned long long int *_zzq_ptr; \
484 _zzq_args[0] = (unsigned long long int)(_zzq_request); \
485 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
486 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
487 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
488 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
489 _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
490 _zzq_ptr = _zzq_args; \
491 __asm__ volatile("mr 3,%1\n\t" /*default*/ \
492 "mr 4,%2\n\t" /*ptr*/ \
493 __SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = client_request ( %R4 ) */ \
494 "or 1,1,1\n\t" \
495 "mr %0,3" /*result*/ \
496 : "=b"(_zzq_result) \
497 : "b"(_zzq_default), "b"(_zzq_ptr) \
498 : "cc", "memory", "r3", "r4"); \
499 _zzq_result; \
500 })
501
502#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
503 { \
504 volatile OrigFn *_zzq_orig = &(_zzq_rlval); \
505 unsigned long long int __addr; \
506 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = guest_NRADDR */ \
507 "or 2,2,2\n\t" \
508 "mr %0,3" \
509 : "=b"(__addr) \
510 : \
511 : "cc", "memory", "r3"); \
512 _zzq_orig->nraddr = __addr; \
513 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE /* %R3 = guest_NRADDR_GPR2 */ \
514 "or 4,4,4\n\t" \
515 "mr %0,3" \
516 : "=b"(__addr) \
517 : \
518 : "cc", "memory", "r3"); \
519 _zzq_orig->r2 = __addr; \
520 }
521
522#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
523 __SPECIAL_INSTRUCTION_PREAMBLE \
524 /* branch-and-link-to-noredir *%R11 */ \
525 "or 3,3,3\n\t"
526
527#define VALGRIND_VEX_INJECT_IR() \
528 do { \
529 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE "or 5,5,5\n\t"); \
530 } while (0)
531
532#endif /* PLAT_ppc64_linux */
533
534/* ------------------------- arm-linux ------------------------- */
535
536#if defined(PLAT_arm_linux)
537
538typedef struct {
539 unsigned int nraddr; /* where's the code? */
540} OrigFn;
541
542#define __SPECIAL_INSTRUCTION_PREAMBLE \
543 "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
544 "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
545
546#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
547 _zzq_arg5) \
548\
549 __extension__({ \
550 volatile unsigned int _zzq_args[6]; \
551 volatile unsigned int _zzq_result; \
552 _zzq_args[0] = (unsigned int)(_zzq_request); \
553 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
554 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
555 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
556 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
557 _zzq_args[5] = (unsigned int)(_zzq_arg5); \
558 __asm__ volatile("mov r3, %1\n\t" /*default*/ \
559 "mov r4, %2\n\t" /*ptr*/ \
560 __SPECIAL_INSTRUCTION_PREAMBLE /* R3 = client_request ( R4 ) */ \
561 "orr r10, r10, r10\n\t" \
562 "mov %0, r3" /*result*/ \
563 : "=r"(_zzq_result) \
564 : "r"(_zzq_default), "r"(&_zzq_args[0]) \
565 : "cc", "memory", "r3", "r4"); \
566 _zzq_result; \
567 })
568
569#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
570 { \
571 volatile OrigFn *_zzq_orig = &(_zzq_rlval); \
572 unsigned int __addr; \
573 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE /* R3 = guest_NRADDR */ \
574 "orr r11, r11, r11\n\t" \
575 "mov %0, r3" \
576 : "=r"(__addr) \
577 : \
578 : "cc", "memory", "r3"); \
579 _zzq_orig->nraddr = __addr; \
580 }
581
582#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
583 __SPECIAL_INSTRUCTION_PREAMBLE \
584 /* branch-and-link-to-noredir *%R4 */ \
585 "orr r12, r12, r12\n\t"
586
587#define VALGRIND_VEX_INJECT_IR() \
588 do { \
589 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE "orr r9, r9, r9\n\t" : : : "cc", "memory"); \
590 } while (0)
591
592#endif /* PLAT_arm_linux */
593
594/* ------------------------ s390x-linux ------------------------ */
595
596#if defined(PLAT_s390x_linux)
597
598typedef struct {
599 unsigned long long int nraddr; /* where's the code? */
600} OrigFn;
601
602/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
603 * code. This detection is implemented in platform specific toIR.c
604 * (e.g. VEX/priv/guest_s390_decoder.c).
605 */
606#define __SPECIAL_INSTRUCTION_PREAMBLE \
607 "lr 15,15\n\t" \
608 "lr 1,1\n\t" \
609 "lr 2,2\n\t" \
610 "lr 3,3\n\t"
611
612#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
613#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
614#define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
615#define __VEX_INJECT_IR_CODE "lr 5,5\n\t"
616
617#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
618 _zzq_arg5) \
619 __extension__({ \
620 volatile unsigned long long int _zzq_args[6]; \
621 volatile unsigned long long int _zzq_result; \
622 _zzq_args[0] = (unsigned long long int)(_zzq_request); \
623 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
624 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
625 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
626 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
627 _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
628 __asm__ volatile(/* r2 = args */ \
629 "lgr 2,%1\n\t" /* r3 = default */ \
630 "lgr 3,%2\n\t" __SPECIAL_INSTRUCTION_PREAMBLE __CLIENT_REQUEST_CODE /* results = r3 */ \
631 "lgr %0, 3\n\t" \
632 : "=d"(_zzq_result) \
633 : "a"(&_zzq_args[0]), "0"(_zzq_default) \
634 : "cc", "2", "3", "memory"); \
635 _zzq_result; \
636 })
637
638#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
639 { \
640 volatile OrigFn *_zzq_orig = &(_zzq_rlval); \
641 volatile unsigned long long int __addr; \
642 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE __GET_NR_CONTEXT_CODE "lgr %0, 3\n\t" \
643 : "=a"(__addr) \
644 : \
645 : "cc", "3", "memory"); \
646 _zzq_orig->nraddr = __addr; \
647 }
648
649#define VALGRIND_CALL_NOREDIR_R1 \
650 __SPECIAL_INSTRUCTION_PREAMBLE \
651 __CALL_NO_REDIR_CODE
652
653#define VALGRIND_VEX_INJECT_IR() \
654 do { \
655 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE __VEX_INJECT_IR_CODE); \
656 } while (0)
657
658#endif /* PLAT_s390x_linux */
659
660/* ------------------------- mips32-linux ---------------- */
661
662#if defined(PLAT_mips32_linux)
663
664typedef struct {
665 unsigned int nraddr; /* where's the code? */
666} OrigFn;
667
668/* .word 0x342
669 * .word 0x742
670 * .word 0xC2
671 * .word 0x4C2*/
672#define __SPECIAL_INSTRUCTION_PREAMBLE \
673 "srl $0, $0, 13\n\t" \
674 "srl $0, $0, 29\n\t" \
675 "srl $0, $0, 3\n\t" \
676 "srl $0, $0, 19\n\t"
677
678#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, \
679 _zzq_arg5) \
680 __extension__({ \
681 volatile unsigned int _zzq_args[6]; \
682 volatile unsigned int _zzq_result; \
683 _zzq_args[0] = (unsigned int)(_zzq_request); \
684 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
685 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
686 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
687 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
688 _zzq_args[5] = (unsigned int)(_zzq_arg5); \
689 __asm__ volatile("move $11, %1\n\t" /*default*/ \
690 "move $12, %2\n\t" /*ptr*/ \
691 __SPECIAL_INSTRUCTION_PREAMBLE /* T3 = client_request ( T4 ) */ \
692 "or $13, $13, $13\n\t" \
693 "move %0, $11\n\t" /*result*/ \
694 : "=r"(_zzq_result) \
695 : "r"(_zzq_default), "r"(&_zzq_args[0]) \
696 : "cc", "memory", "t3", "t4"); \
697 _zzq_result; \
698 })
699
700#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
701 { \
702 volatile OrigFn *_zzq_orig = &(_zzq_rlval); \
703 volatile unsigned int __addr; \
704 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE /* %t9 = guest_NRADDR */ \
705 "or $14, $14, $14\n\t" \
706 "move %0, $11" /*result*/ \
707 : "=r"(__addr) \
708 : \
709 : "cc", "memory", "t3"); \
710 _zzq_orig->nraddr = __addr; \
711 }
712
713#define VALGRIND_CALL_NOREDIR_T9 \
714 __SPECIAL_INSTRUCTION_PREAMBLE \
715 /* call-noredir *%t9 */ \
716 "or $15, $15, $15\n\t"
717
718#define VALGRIND_VEX_INJECT_IR() \
719 do { \
720 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE "or $11, $11, $11\n\t"); \
721 } while (0)
722
723
724#endif /* PLAT_mips32_linux */
725
726/* Insert assembly code for other platforms here... */
727
728#endif /* NVALGRIND */
729
730
731/* ------------------------------------------------------------------ */
732/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
733/* ugly. It's the least-worst tradeoff I can think of. */
734/* ------------------------------------------------------------------ */
735
736/* This section defines magic (a.k.a appalling-hack) macros for doing
737 guaranteed-no-redirection macros, so as to get from function
738 wrappers to the functions they are wrapping. The whole point is to
739 construct standard call sequences, but to do the call itself with a
740 special no-redirect call pseudo-instruction that the JIT
741 understands and handles specially. This section is long and
742 repetitious, and I can't see a way to make it shorter.
743
744 The naming scheme is as follows:
745
746 CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
747
748 'W' stands for "word" and 'v' for "void". Hence there are
749 different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
750 and for each, the possibility of returning a word-typed result, or
751 no result.
752*/
753
754/* Use these to write the name of your wrapper. NOTE: duplicates
755 VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts
756 the default behaviour equivalence class tag "0000" into the name.
757 See pub_tool_redir.h for details -- normally you don't need to
758 think about this, though. */
759
760/* Use an extra level of macroisation so as to ensure the soname/fnname
761 args are fully macro-expanded before pasting them together. */
762#define VG_CONCAT4(_aa, _bb, _cc, _dd) _aa##_bb##_cc##_dd
763
764#define I_WRAP_SONAME_FNNAME_ZU(soname, fnname) VG_CONCAT4(_vgw00000ZU_, soname, _, fnname)
765
766#define I_WRAP_SONAME_FNNAME_ZZ(soname, fnname) VG_CONCAT4(_vgw00000ZZ_, soname, _, fnname)
767
768/* Use this macro from within a wrapper function to collect the
769 context (address and possibly other info) of the original function.
770 Once you have that you can then use it in one of the CALL_FN_
771 macros. The type of the argument _lval is OrigFn. */
772#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
773
774/* Also provide end-user facilities for function replacement, rather
775 than wrapping. A replacement function differs from a wrapper in
776 that it has no way to get hold of the original function being
777 called, and hence no way to call onwards to it. In a replacement
778 function, VALGRIND_GET_ORIG_FN always returns zero. */
779
780#define I_REPLACE_SONAME_FNNAME_ZU(soname, fnname) VG_CONCAT4(_vgr00000ZU_, soname, _, fnname)
781
782#define I_REPLACE_SONAME_FNNAME_ZZ(soname, fnname) VG_CONCAT4(_vgr00000ZZ_, soname, _, fnname)
783
784/* Derivatives of the main macros below, for calling functions
785 returning void. */
786
787#define CALL_FN_v_v(fnptr) \
788 do { \
789 volatile unsigned long _junk; \
790 CALL_FN_W_v(_junk, fnptr); \
791 } while (0)
792
793#define CALL_FN_v_W(fnptr, arg1) \
794 do { \
795 volatile unsigned long _junk; \
796 CALL_FN_W_W(_junk, fnptr, arg1); \
797 } while (0)
798
799#define CALL_FN_v_WW(fnptr, arg1, arg2) \
800 do { \
801 volatile unsigned long _junk; \
802 CALL_FN_W_WW(_junk, fnptr, arg1, arg2); \
803 } while (0)
804
805#define CALL_FN_v_WWW(fnptr, arg1, arg2, arg3) \
806 do { \
807 volatile unsigned long _junk; \
808 CALL_FN_W_WWW(_junk, fnptr, arg1, arg2, arg3); \
809 } while (0)
810
811#define CALL_FN_v_WWWW(fnptr, arg1, arg2, arg3, arg4) \
812 do { \
813 volatile unsigned long _junk; \
814 CALL_FN_W_WWWW(_junk, fnptr, arg1, arg2, arg3, arg4); \
815 } while (0)
816
817#define CALL_FN_v_5W(fnptr, arg1, arg2, arg3, arg4, arg5) \
818 do { \
819 volatile unsigned long _junk; \
820 CALL_FN_W_5W(_junk, fnptr, arg1, arg2, arg3, arg4, arg5); \
821 } while (0)
822
823#define CALL_FN_v_6W(fnptr, arg1, arg2, arg3, arg4, arg5, arg6) \
824 do { \
825 volatile unsigned long _junk; \
826 CALL_FN_W_6W(_junk, fnptr, arg1, arg2, arg3, arg4, arg5, arg6); \
827 } while (0)
828
829#define CALL_FN_v_7W(fnptr, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
830 do { \
831 volatile unsigned long _junk; \
832 CALL_FN_W_7W(_junk, fnptr, arg1, arg2, arg3, arg4, arg5, arg6, arg7); \
833 } while (0)
834
835/* ------------------------- x86-{linux,darwin} ---------------- */
836
837#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
838
839/* These regs are trashed by the hidden call. No need to mention eax
840 as gcc can already see that, plus causes gcc to bomb. */
841#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
842
843/* Macros to save and align the stack before making a function
844 call and restore it afterwards as gcc may not keep the stack
845 pointer aligned if it doesn't realise calls are being made
846 to other functions. */
847
848#define VALGRIND_ALIGN_STACK \
849 "movl %%esp,%%edi\n\t" \
850 "andl $0xfffffff0,%%esp\n\t"
851#define VALGRIND_RESTORE_STACK "movl %%edi,%%esp\n\t"
852
853/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
854 long) == 4. */
855
856#define CALL_FN_W_v(lval, orig) \
857 do { \
858 volatile OrigFn _orig = (orig); \
859 volatile unsigned long _argvec[1]; \
860 volatile unsigned long _res; \
861 _argvec[0] = (unsigned long)_orig.nraddr; \
862 __asm__ volatile(VALGRIND_ALIGN_STACK "movl (%%eax), %%eax\n\t" /* target->%eax */ \
863 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
864 : /*out*/ "=a"(_res) \
865 : /*in*/ "a"(&_argvec[0]) \
866 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
867 lval = (__typeof__(lval))_res; \
868 } while (0)
869
870#define CALL_FN_W_W(lval, orig, arg1) \
871 do { \
872 volatile OrigFn _orig = (orig); \
873 volatile unsigned long _argvec[2]; \
874 volatile unsigned long _res; \
875 _argvec[0] = (unsigned long)_orig.nraddr; \
876 _argvec[1] = (unsigned long)(arg1); \
877 __asm__ volatile(VALGRIND_ALIGN_STACK "subl $12, %%esp\n\t" \
878 "pushl 4(%%eax)\n\t" \
879 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
880 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
881 : /*out*/ "=a"(_res) \
882 : /*in*/ "a"(&_argvec[0]) \
883 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
884 lval = (__typeof__(lval))_res; \
885 } while (0)
886
887#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
888 do { \
889 volatile OrigFn _orig = (orig); \
890 volatile unsigned long _argvec[3]; \
891 volatile unsigned long _res; \
892 _argvec[0] = (unsigned long)_orig.nraddr; \
893 _argvec[1] = (unsigned long)(arg1); \
894 _argvec[2] = (unsigned long)(arg2); \
895 __asm__ volatile(VALGRIND_ALIGN_STACK "subl $8, %%esp\n\t" \
896 "pushl 8(%%eax)\n\t" \
897 "pushl 4(%%eax)\n\t" \
898 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
899 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
900 : /*out*/ "=a"(_res) \
901 : /*in*/ "a"(&_argvec[0]) \
902 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
903 lval = (__typeof__(lval))_res; \
904 } while (0)
905
906#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
907 do { \
908 volatile OrigFn _orig = (orig); \
909 volatile unsigned long _argvec[4]; \
910 volatile unsigned long _res; \
911 _argvec[0] = (unsigned long)_orig.nraddr; \
912 _argvec[1] = (unsigned long)(arg1); \
913 _argvec[2] = (unsigned long)(arg2); \
914 _argvec[3] = (unsigned long)(arg3); \
915 __asm__ volatile(VALGRIND_ALIGN_STACK "subl $4, %%esp\n\t" \
916 "pushl 12(%%eax)\n\t" \
917 "pushl 8(%%eax)\n\t" \
918 "pushl 4(%%eax)\n\t" \
919 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
920 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
921 : /*out*/ "=a"(_res) \
922 : /*in*/ "a"(&_argvec[0]) \
923 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
924 lval = (__typeof__(lval))_res; \
925 } while (0)
926
927#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
928 do { \
929 volatile OrigFn _orig = (orig); \
930 volatile unsigned long _argvec[5]; \
931 volatile unsigned long _res; \
932 _argvec[0] = (unsigned long)_orig.nraddr; \
933 _argvec[1] = (unsigned long)(arg1); \
934 _argvec[2] = (unsigned long)(arg2); \
935 _argvec[3] = (unsigned long)(arg3); \
936 _argvec[4] = (unsigned long)(arg4); \
937 __asm__ volatile(VALGRIND_ALIGN_STACK "pushl 16(%%eax)\n\t" \
938 "pushl 12(%%eax)\n\t" \
939 "pushl 8(%%eax)\n\t" \
940 "pushl 4(%%eax)\n\t" \
941 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
942 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
943 : /*out*/ "=a"(_res) \
944 : /*in*/ "a"(&_argvec[0]) \
945 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
946 lval = (__typeof__(lval))_res; \
947 } while (0)
948
949#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
950 do { \
951 volatile OrigFn _orig = (orig); \
952 volatile unsigned long _argvec[6]; \
953 volatile unsigned long _res; \
954 _argvec[0] = (unsigned long)_orig.nraddr; \
955 _argvec[1] = (unsigned long)(arg1); \
956 _argvec[2] = (unsigned long)(arg2); \
957 _argvec[3] = (unsigned long)(arg3); \
958 _argvec[4] = (unsigned long)(arg4); \
959 _argvec[5] = (unsigned long)(arg5); \
960 __asm__ volatile(VALGRIND_ALIGN_STACK "subl $12, %%esp\n\t" \
961 "pushl 20(%%eax)\n\t" \
962 "pushl 16(%%eax)\n\t" \
963 "pushl 12(%%eax)\n\t" \
964 "pushl 8(%%eax)\n\t" \
965 "pushl 4(%%eax)\n\t" \
966 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
967 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
968 : /*out*/ "=a"(_res) \
969 : /*in*/ "a"(&_argvec[0]) \
970 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
971 lval = (__typeof__(lval))_res; \
972 } while (0)
973
974#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6) \
975 do { \
976 volatile OrigFn _orig = (orig); \
977 volatile unsigned long _argvec[7]; \
978 volatile unsigned long _res; \
979 _argvec[0] = (unsigned long)_orig.nraddr; \
980 _argvec[1] = (unsigned long)(arg1); \
981 _argvec[2] = (unsigned long)(arg2); \
982 _argvec[3] = (unsigned long)(arg3); \
983 _argvec[4] = (unsigned long)(arg4); \
984 _argvec[5] = (unsigned long)(arg5); \
985 _argvec[6] = (unsigned long)(arg6); \
986 __asm__ volatile(VALGRIND_ALIGN_STACK "subl $8, %%esp\n\t" \
987 "pushl 24(%%eax)\n\t" \
988 "pushl 20(%%eax)\n\t" \
989 "pushl 16(%%eax)\n\t" \
990 "pushl 12(%%eax)\n\t" \
991 "pushl 8(%%eax)\n\t" \
992 "pushl 4(%%eax)\n\t" \
993 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
994 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
995 : /*out*/ "=a"(_res) \
996 : /*in*/ "a"(&_argvec[0]) \
997 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
998 lval = (__typeof__(lval))_res; \
999 } while (0)
1000
1001#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
1002 do { \
1003 volatile OrigFn _orig = (orig); \
1004 volatile unsigned long _argvec[8]; \
1005 volatile unsigned long _res; \
1006 _argvec[0] = (unsigned long)_orig.nraddr; \
1007 _argvec[1] = (unsigned long)(arg1); \
1008 _argvec[2] = (unsigned long)(arg2); \
1009 _argvec[3] = (unsigned long)(arg3); \
1010 _argvec[4] = (unsigned long)(arg4); \
1011 _argvec[5] = (unsigned long)(arg5); \
1012 _argvec[6] = (unsigned long)(arg6); \
1013 _argvec[7] = (unsigned long)(arg7); \
1014 __asm__ volatile(VALGRIND_ALIGN_STACK "subl $4, %%esp\n\t" \
1015 "pushl 28(%%eax)\n\t" \
1016 "pushl 24(%%eax)\n\t" \
1017 "pushl 20(%%eax)\n\t" \
1018 "pushl 16(%%eax)\n\t" \
1019 "pushl 12(%%eax)\n\t" \
1020 "pushl 8(%%eax)\n\t" \
1021 "pushl 4(%%eax)\n\t" \
1022 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1023 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1024 : /*out*/ "=a"(_res) \
1025 : /*in*/ "a"(&_argvec[0]) \
1026 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
1027 lval = (__typeof__(lval))_res; \
1028 } while (0)
1029
1030#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \
1031 do { \
1032 volatile OrigFn _orig = (orig); \
1033 volatile unsigned long _argvec[9]; \
1034 volatile unsigned long _res; \
1035 _argvec[0] = (unsigned long)_orig.nraddr; \
1036 _argvec[1] = (unsigned long)(arg1); \
1037 _argvec[2] = (unsigned long)(arg2); \
1038 _argvec[3] = (unsigned long)(arg3); \
1039 _argvec[4] = (unsigned long)(arg4); \
1040 _argvec[5] = (unsigned long)(arg5); \
1041 _argvec[6] = (unsigned long)(arg6); \
1042 _argvec[7] = (unsigned long)(arg7); \
1043 _argvec[8] = (unsigned long)(arg8); \
1044 __asm__ volatile(VALGRIND_ALIGN_STACK "pushl 32(%%eax)\n\t" \
1045 "pushl 28(%%eax)\n\t" \
1046 "pushl 24(%%eax)\n\t" \
1047 "pushl 20(%%eax)\n\t" \
1048 "pushl 16(%%eax)\n\t" \
1049 "pushl 12(%%eax)\n\t" \
1050 "pushl 8(%%eax)\n\t" \
1051 "pushl 4(%%eax)\n\t" \
1052 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1053 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1054 : /*out*/ "=a"(_res) \
1055 : /*in*/ "a"(&_argvec[0]) \
1056 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
1057 lval = (__typeof__(lval))_res; \
1058 } while (0)
1059
1060#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \
1061 do { \
1062 volatile OrigFn _orig = (orig); \
1063 volatile unsigned long _argvec[10]; \
1064 volatile unsigned long _res; \
1065 _argvec[0] = (unsigned long)_orig.nraddr; \
1066 _argvec[1] = (unsigned long)(arg1); \
1067 _argvec[2] = (unsigned long)(arg2); \
1068 _argvec[3] = (unsigned long)(arg3); \
1069 _argvec[4] = (unsigned long)(arg4); \
1070 _argvec[5] = (unsigned long)(arg5); \
1071 _argvec[6] = (unsigned long)(arg6); \
1072 _argvec[7] = (unsigned long)(arg7); \
1073 _argvec[8] = (unsigned long)(arg8); \
1074 _argvec[9] = (unsigned long)(arg9); \
1075 __asm__ volatile(VALGRIND_ALIGN_STACK "subl $12, %%esp\n\t" \
1076 "pushl 36(%%eax)\n\t" \
1077 "pushl 32(%%eax)\n\t" \
1078 "pushl 28(%%eax)\n\t" \
1079 "pushl 24(%%eax)\n\t" \
1080 "pushl 20(%%eax)\n\t" \
1081 "pushl 16(%%eax)\n\t" \
1082 "pushl 12(%%eax)\n\t" \
1083 "pushl 8(%%eax)\n\t" \
1084 "pushl 4(%%eax)\n\t" \
1085 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1086 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1087 : /*out*/ "=a"(_res) \
1088 : /*in*/ "a"(&_argvec[0]) \
1089 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
1090 lval = (__typeof__(lval))_res; \
1091 } while (0)
1092
1093#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) \
1094 do { \
1095 volatile OrigFn _orig = (orig); \
1096 volatile unsigned long _argvec[11]; \
1097 volatile unsigned long _res; \
1098 _argvec[0] = (unsigned long)_orig.nraddr; \
1099 _argvec[1] = (unsigned long)(arg1); \
1100 _argvec[2] = (unsigned long)(arg2); \
1101 _argvec[3] = (unsigned long)(arg3); \
1102 _argvec[4] = (unsigned long)(arg4); \
1103 _argvec[5] = (unsigned long)(arg5); \
1104 _argvec[6] = (unsigned long)(arg6); \
1105 _argvec[7] = (unsigned long)(arg7); \
1106 _argvec[8] = (unsigned long)(arg8); \
1107 _argvec[9] = (unsigned long)(arg9); \
1108 _argvec[10] = (unsigned long)(arg10); \
1109 __asm__ volatile(VALGRIND_ALIGN_STACK "subl $8, %%esp\n\t" \
1110 "pushl 40(%%eax)\n\t" \
1111 "pushl 36(%%eax)\n\t" \
1112 "pushl 32(%%eax)\n\t" \
1113 "pushl 28(%%eax)\n\t" \
1114 "pushl 24(%%eax)\n\t" \
1115 "pushl 20(%%eax)\n\t" \
1116 "pushl 16(%%eax)\n\t" \
1117 "pushl 12(%%eax)\n\t" \
1118 "pushl 8(%%eax)\n\t" \
1119 "pushl 4(%%eax)\n\t" \
1120 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1121 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1122 : /*out*/ "=a"(_res) \
1123 : /*in*/ "a"(&_argvec[0]) \
1124 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
1125 lval = (__typeof__(lval))_res; \
1126 } while (0)
1127
1128#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) \
1129 do { \
1130 volatile OrigFn _orig = (orig); \
1131 volatile unsigned long _argvec[12]; \
1132 volatile unsigned long _res; \
1133 _argvec[0] = (unsigned long)_orig.nraddr; \
1134 _argvec[1] = (unsigned long)(arg1); \
1135 _argvec[2] = (unsigned long)(arg2); \
1136 _argvec[3] = (unsigned long)(arg3); \
1137 _argvec[4] = (unsigned long)(arg4); \
1138 _argvec[5] = (unsigned long)(arg5); \
1139 _argvec[6] = (unsigned long)(arg6); \
1140 _argvec[7] = (unsigned long)(arg7); \
1141 _argvec[8] = (unsigned long)(arg8); \
1142 _argvec[9] = (unsigned long)(arg9); \
1143 _argvec[10] = (unsigned long)(arg10); \
1144 _argvec[11] = (unsigned long)(arg11); \
1145 __asm__ volatile(VALGRIND_ALIGN_STACK "subl $4, %%esp\n\t" \
1146 "pushl 44(%%eax)\n\t" \
1147 "pushl 40(%%eax)\n\t" \
1148 "pushl 36(%%eax)\n\t" \
1149 "pushl 32(%%eax)\n\t" \
1150 "pushl 28(%%eax)\n\t" \
1151 "pushl 24(%%eax)\n\t" \
1152 "pushl 20(%%eax)\n\t" \
1153 "pushl 16(%%eax)\n\t" \
1154 "pushl 12(%%eax)\n\t" \
1155 "pushl 8(%%eax)\n\t" \
1156 "pushl 4(%%eax)\n\t" \
1157 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1158 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1159 : /*out*/ "=a"(_res) \
1160 : /*in*/ "a"(&_argvec[0]) \
1161 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
1162 lval = (__typeof__(lval))_res; \
1163 } while (0)
1164
1165#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12) \
1166 do { \
1167 volatile OrigFn _orig = (orig); \
1168 volatile unsigned long _argvec[13]; \
1169 volatile unsigned long _res; \
1170 _argvec[0] = (unsigned long)_orig.nraddr; \
1171 _argvec[1] = (unsigned long)(arg1); \
1172 _argvec[2] = (unsigned long)(arg2); \
1173 _argvec[3] = (unsigned long)(arg3); \
1174 _argvec[4] = (unsigned long)(arg4); \
1175 _argvec[5] = (unsigned long)(arg5); \
1176 _argvec[6] = (unsigned long)(arg6); \
1177 _argvec[7] = (unsigned long)(arg7); \
1178 _argvec[8] = (unsigned long)(arg8); \
1179 _argvec[9] = (unsigned long)(arg9); \
1180 _argvec[10] = (unsigned long)(arg10); \
1181 _argvec[11] = (unsigned long)(arg11); \
1182 _argvec[12] = (unsigned long)(arg12); \
1183 __asm__ volatile(VALGRIND_ALIGN_STACK "pushl 48(%%eax)\n\t" \
1184 "pushl 44(%%eax)\n\t" \
1185 "pushl 40(%%eax)\n\t" \
1186 "pushl 36(%%eax)\n\t" \
1187 "pushl 32(%%eax)\n\t" \
1188 "pushl 28(%%eax)\n\t" \
1189 "pushl 24(%%eax)\n\t" \
1190 "pushl 20(%%eax)\n\t" \
1191 "pushl 16(%%eax)\n\t" \
1192 "pushl 12(%%eax)\n\t" \
1193 "pushl 8(%%eax)\n\t" \
1194 "pushl 4(%%eax)\n\t" \
1195 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
1196 VALGRIND_CALL_NOREDIR_EAX VALGRIND_RESTORE_STACK \
1197 : /*out*/ "=a"(_res) \
1198 : /*in*/ "a"(&_argvec[0]) \
1199 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi"); \
1200 lval = (__typeof__(lval))_res; \
1201 } while (0)
1202
1203#endif /* PLAT_x86_linux || PLAT_x86_darwin */
1204
1205/* ------------------------ amd64-{linux,darwin} --------------- */
1206
1207#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
1208
1209/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
1210
1211/* These regs are trashed by the hidden call. */
1212#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11"
1213
1214/* This is all pretty complex. It's so as to make stack unwinding
1215 work reliably. See bug 243270. The basic problem is the sub and
1216 add of 128 of %rsp in all of the following macros. If gcc believes
1217 the CFA is in %rsp, then unwinding may fail, because what's at the
1218 CFA is not what gcc "expected" when it constructs the CFIs for the
1219 places where the macros are instantiated.
1220
1221 But we can't just add a CFI annotation to increase the CFA offset
1222 by 128, to match the sub of 128 from %rsp, because we don't know
1223 whether gcc has chosen %rsp as the CFA at that point, or whether it
1224 has chosen some other register (eg, %rbp). In the latter case,
1225 adding a CFI annotation to change the CFA offset is simply wrong.
1226
1227 So the solution is to get hold of the CFA using
1228 __builtin_dwarf_cfa(), put it in a known register, and add a
1229 CFI annotation to say what the register is. We choose %rbp for
1230 this (perhaps perversely), because:
1231
1232 (1) %rbp is already subject to unwinding. If a new register was
1233 chosen then the unwinder would have to unwind it in all stack
1234 traces, which is expensive, and
1235
1236 (2) %rbp is already subject to precise exception updates in the
1237 JIT. If a new register was chosen, we'd have to have precise
1238 exceptions for it too, which reduces performance of the
1239 generated code.
1240
1241 However .. one extra complication. We can't just whack the result
1242 of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
1243 list of trashed registers at the end of the inline assembly
1244 fragments; gcc won't allow %rbp to appear in that list. Hence
1245 instead we need to stash %rbp in %r15 for the duration of the asm,
1246 and say that %r15 is trashed instead. gcc seems happy to go with
1247 that.
1248
1249 Oh .. and this all needs to be conditionalised so that it is
1250 unchanged from before this commit, when compiled with older gccs
1251 that don't support __builtin_dwarf_cfa. Furthermore, since
1252 this header file is freestanding, it has to be independent of
1253 config.h, and so the following conditionalisation cannot depend on
1254 configure time checks.
1255
1256 Although it's not clear from
1257 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
1258 this expression excludes Darwin.
1259 .cfi directives in Darwin assembly appear to be completely
1260 different and I haven't investigated how they work.
1261
1262 For even more entertainment value, note we have to use the
1263 completely undocumented __builtin_dwarf_cfa(), which appears to
1264 really compute the CFA, whereas __builtin_frame_address(0) claims
1265 to but actually doesn't. See
1266 https://bugs.kde.org/show_bug.cgi?id=243270#c47
1267*/
1268#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
1269#define __FRAME_POINTER , "r"(__builtin_dwarf_cfa())
1270#define VALGRIND_CFI_PROLOGUE \
1271 "movq %%rbp, %%r15\n\t" \
1272 "movq %2, %%rbp\n\t" \
1273 ".cfi_remember_state\n\t" \
1274 ".cfi_def_cfa rbp, 0\n\t"
1275#define VALGRIND_CFI_EPILOGUE \
1276 "movq %%r15, %%rbp\n\t" \
1277 ".cfi_restore_state\n\t"
1278#else
1279#define __FRAME_POINTER
1280#define VALGRIND_CFI_PROLOGUE
1281#define VALGRIND_CFI_EPILOGUE
1282#endif
1283
1284/* Macros to save and align the stack before making a function
1285 call and restore it afterwards as gcc may not keep the stack
1286 pointer aligned if it doesn't realise calls are being made
1287 to other functions. */
1288
1289#define VALGRIND_ALIGN_STACK \
1290 "movq %%rsp,%%r14\n\t" \
1291 "andq $0xfffffffffffffff0,%%rsp\n\t"
1292#define VALGRIND_RESTORE_STACK "movq %%r14,%%rsp\n\t"
1293
1294/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
1295 long) == 8. */
1296
1297/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
1298 macros. In order not to trash the stack redzone, we need to drop
1299 %rsp by 128 before the hidden call, and restore afterwards. The
1300 nastiness is that it is only by luck that the stack still appears
1301 to be unwindable during the hidden call - since then the behaviour
1302 of any routine using this macro does not match what the CFI data
1303 says. Sigh.
1304
1305 Why is this important? Imagine that a wrapper has a stack
1306 allocated local, and passes to the hidden call, a pointer to it.
1307 Because gcc does not know about the hidden call, it may allocate
1308 that local in the redzone. Unfortunately the hidden call may then
1309 trash it before it comes to use it. So we must step clear of the
1310 redzone, for the duration of the hidden call, to make it safe.
1311
1312 Probably the same problem afflicts the other redzone-style ABIs too
1313 (ppc64-linux); but for those, the stack is
1314 self describing (none of this CFI nonsense) so at least messing
1315 with the stack pointer doesn't give a danger of non-unwindable
1316 stack. */
1317
1318#define CALL_FN_W_v(lval, orig) \
1319 do { \
1320 volatile OrigFn _orig = (orig); \
1321 volatile unsigned long _argvec[1]; \
1322 volatile unsigned long _res; \
1323 _argvec[0] = (unsigned long)_orig.nraddr; \
1324 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1325 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1326 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1327 : /*out*/ "=a"(_res) \
1328 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1329 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1330 lval = (__typeof__(lval))_res; \
1331 } while (0)
1332
1333#define CALL_FN_W_W(lval, orig, arg1) \
1334 do { \
1335 volatile OrigFn _orig = (orig); \
1336 volatile unsigned long _argvec[2]; \
1337 volatile unsigned long _res; \
1338 _argvec[0] = (unsigned long)_orig.nraddr; \
1339 _argvec[1] = (unsigned long)(arg1); \
1340 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1341 "movq 8(%%rax), %%rdi\n\t" \
1342 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1343 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1344 : /*out*/ "=a"(_res) \
1345 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1346 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1347 lval = (__typeof__(lval))_res; \
1348 } while (0)
1349
1350#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
1351 do { \
1352 volatile OrigFn _orig = (orig); \
1353 volatile unsigned long _argvec[3]; \
1354 volatile unsigned long _res; \
1355 _argvec[0] = (unsigned long)_orig.nraddr; \
1356 _argvec[1] = (unsigned long)(arg1); \
1357 _argvec[2] = (unsigned long)(arg2); \
1358 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1359 "movq 16(%%rax), %%rsi\n\t" \
1360 "movq 8(%%rax), %%rdi\n\t" \
1361 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1362 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1363 : /*out*/ "=a"(_res) \
1364 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1365 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1366 lval = (__typeof__(lval))_res; \
1367 } while (0)
1368
1369#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
1370 do { \
1371 volatile OrigFn _orig = (orig); \
1372 volatile unsigned long _argvec[4]; \
1373 volatile unsigned long _res; \
1374 _argvec[0] = (unsigned long)_orig.nraddr; \
1375 _argvec[1] = (unsigned long)(arg1); \
1376 _argvec[2] = (unsigned long)(arg2); \
1377 _argvec[3] = (unsigned long)(arg3); \
1378 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1379 "movq 24(%%rax), %%rdx\n\t" \
1380 "movq 16(%%rax), %%rsi\n\t" \
1381 "movq 8(%%rax), %%rdi\n\t" \
1382 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1383 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1384 : /*out*/ "=a"(_res) \
1385 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1386 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1387 lval = (__typeof__(lval))_res; \
1388 } while (0)
1389
1390#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
1391 do { \
1392 volatile OrigFn _orig = (orig); \
1393 volatile unsigned long _argvec[5]; \
1394 volatile unsigned long _res; \
1395 _argvec[0] = (unsigned long)_orig.nraddr; \
1396 _argvec[1] = (unsigned long)(arg1); \
1397 _argvec[2] = (unsigned long)(arg2); \
1398 _argvec[3] = (unsigned long)(arg3); \
1399 _argvec[4] = (unsigned long)(arg4); \
1400 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1401 "movq 32(%%rax), %%rcx\n\t" \
1402 "movq 24(%%rax), %%rdx\n\t" \
1403 "movq 16(%%rax), %%rsi\n\t" \
1404 "movq 8(%%rax), %%rdi\n\t" \
1405 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1406 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1407 : /*out*/ "=a"(_res) \
1408 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1409 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1410 lval = (__typeof__(lval))_res; \
1411 } while (0)
1412
1413#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
1414 do { \
1415 volatile OrigFn _orig = (orig); \
1416 volatile unsigned long _argvec[6]; \
1417 volatile unsigned long _res; \
1418 _argvec[0] = (unsigned long)_orig.nraddr; \
1419 _argvec[1] = (unsigned long)(arg1); \
1420 _argvec[2] = (unsigned long)(arg2); \
1421 _argvec[3] = (unsigned long)(arg3); \
1422 _argvec[4] = (unsigned long)(arg4); \
1423 _argvec[5] = (unsigned long)(arg5); \
1424 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1425 "movq 40(%%rax), %%r8\n\t" \
1426 "movq 32(%%rax), %%rcx\n\t" \
1427 "movq 24(%%rax), %%rdx\n\t" \
1428 "movq 16(%%rax), %%rsi\n\t" \
1429 "movq 8(%%rax), %%rdi\n\t" \
1430 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1431 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1432 : /*out*/ "=a"(_res) \
1433 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1434 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1435 lval = (__typeof__(lval))_res; \
1436 } while (0)
1437
1438#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6) \
1439 do { \
1440 volatile OrigFn _orig = (orig); \
1441 volatile unsigned long _argvec[7]; \
1442 volatile unsigned long _res; \
1443 _argvec[0] = (unsigned long)_orig.nraddr; \
1444 _argvec[1] = (unsigned long)(arg1); \
1445 _argvec[2] = (unsigned long)(arg2); \
1446 _argvec[3] = (unsigned long)(arg3); \
1447 _argvec[4] = (unsigned long)(arg4); \
1448 _argvec[5] = (unsigned long)(arg5); \
1449 _argvec[6] = (unsigned long)(arg6); \
1450 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1451 "movq 48(%%rax), %%r9\n\t" \
1452 "movq 40(%%rax), %%r8\n\t" \
1453 "movq 32(%%rax), %%rcx\n\t" \
1454 "movq 24(%%rax), %%rdx\n\t" \
1455 "movq 16(%%rax), %%rsi\n\t" \
1456 "movq 8(%%rax), %%rdi\n\t" \
1457 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1458 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1459 : /*out*/ "=a"(_res) \
1460 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1461 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1462 lval = (__typeof__(lval))_res; \
1463 } while (0)
1464
1465#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
1466 do { \
1467 volatile OrigFn _orig = (orig); \
1468 volatile unsigned long _argvec[8]; \
1469 volatile unsigned long _res; \
1470 _argvec[0] = (unsigned long)_orig.nraddr; \
1471 _argvec[1] = (unsigned long)(arg1); \
1472 _argvec[2] = (unsigned long)(arg2); \
1473 _argvec[3] = (unsigned long)(arg3); \
1474 _argvec[4] = (unsigned long)(arg4); \
1475 _argvec[5] = (unsigned long)(arg5); \
1476 _argvec[6] = (unsigned long)(arg6); \
1477 _argvec[7] = (unsigned long)(arg7); \
1478 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $136,%%rsp\n\t" \
1479 "pushq 56(%%rax)\n\t" \
1480 "movq 48(%%rax), %%r9\n\t" \
1481 "movq 40(%%rax), %%r8\n\t" \
1482 "movq 32(%%rax), %%rcx\n\t" \
1483 "movq 24(%%rax), %%rdx\n\t" \
1484 "movq 16(%%rax), %%rsi\n\t" \
1485 "movq 8(%%rax), %%rdi\n\t" \
1486 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1487 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1488 : /*out*/ "=a"(_res) \
1489 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1490 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1491 lval = (__typeof__(lval))_res; \
1492 } while (0)
1493
1494#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \
1495 do { \
1496 volatile OrigFn _orig = (orig); \
1497 volatile unsigned long _argvec[9]; \
1498 volatile unsigned long _res; \
1499 _argvec[0] = (unsigned long)_orig.nraddr; \
1500 _argvec[1] = (unsigned long)(arg1); \
1501 _argvec[2] = (unsigned long)(arg2); \
1502 _argvec[3] = (unsigned long)(arg3); \
1503 _argvec[4] = (unsigned long)(arg4); \
1504 _argvec[5] = (unsigned long)(arg5); \
1505 _argvec[6] = (unsigned long)(arg6); \
1506 _argvec[7] = (unsigned long)(arg7); \
1507 _argvec[8] = (unsigned long)(arg8); \
1508 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1509 "pushq 64(%%rax)\n\t" \
1510 "pushq 56(%%rax)\n\t" \
1511 "movq 48(%%rax), %%r9\n\t" \
1512 "movq 40(%%rax), %%r8\n\t" \
1513 "movq 32(%%rax), %%rcx\n\t" \
1514 "movq 24(%%rax), %%rdx\n\t" \
1515 "movq 16(%%rax), %%rsi\n\t" \
1516 "movq 8(%%rax), %%rdi\n\t" \
1517 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1518 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1519 : /*out*/ "=a"(_res) \
1520 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1521 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1522 lval = (__typeof__(lval))_res; \
1523 } while (0)
1524
1525#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \
1526 do { \
1527 volatile OrigFn _orig = (orig); \
1528 volatile unsigned long _argvec[10]; \
1529 volatile unsigned long _res; \
1530 _argvec[0] = (unsigned long)_orig.nraddr; \
1531 _argvec[1] = (unsigned long)(arg1); \
1532 _argvec[2] = (unsigned long)(arg2); \
1533 _argvec[3] = (unsigned long)(arg3); \
1534 _argvec[4] = (unsigned long)(arg4); \
1535 _argvec[5] = (unsigned long)(arg5); \
1536 _argvec[6] = (unsigned long)(arg6); \
1537 _argvec[7] = (unsigned long)(arg7); \
1538 _argvec[8] = (unsigned long)(arg8); \
1539 _argvec[9] = (unsigned long)(arg9); \
1540 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $136,%%rsp\n\t" \
1541 "pushq 72(%%rax)\n\t" \
1542 "pushq 64(%%rax)\n\t" \
1543 "pushq 56(%%rax)\n\t" \
1544 "movq 48(%%rax), %%r9\n\t" \
1545 "movq 40(%%rax), %%r8\n\t" \
1546 "movq 32(%%rax), %%rcx\n\t" \
1547 "movq 24(%%rax), %%rdx\n\t" \
1548 "movq 16(%%rax), %%rsi\n\t" \
1549 "movq 8(%%rax), %%rdi\n\t" \
1550 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1551 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1552 : /*out*/ "=a"(_res) \
1553 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1554 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1555 lval = (__typeof__(lval))_res; \
1556 } while (0)
1557
1558#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) \
1559 do { \
1560 volatile OrigFn _orig = (orig); \
1561 volatile unsigned long _argvec[11]; \
1562 volatile unsigned long _res; \
1563 _argvec[0] = (unsigned long)_orig.nraddr; \
1564 _argvec[1] = (unsigned long)(arg1); \
1565 _argvec[2] = (unsigned long)(arg2); \
1566 _argvec[3] = (unsigned long)(arg3); \
1567 _argvec[4] = (unsigned long)(arg4); \
1568 _argvec[5] = (unsigned long)(arg5); \
1569 _argvec[6] = (unsigned long)(arg6); \
1570 _argvec[7] = (unsigned long)(arg7); \
1571 _argvec[8] = (unsigned long)(arg8); \
1572 _argvec[9] = (unsigned long)(arg9); \
1573 _argvec[10] = (unsigned long)(arg10); \
1574 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1575 "pushq 80(%%rax)\n\t" \
1576 "pushq 72(%%rax)\n\t" \
1577 "pushq 64(%%rax)\n\t" \
1578 "pushq 56(%%rax)\n\t" \
1579 "movq 48(%%rax), %%r9\n\t" \
1580 "movq 40(%%rax), %%r8\n\t" \
1581 "movq 32(%%rax), %%rcx\n\t" \
1582 "movq 24(%%rax), %%rdx\n\t" \
1583 "movq 16(%%rax), %%rsi\n\t" \
1584 "movq 8(%%rax), %%rdi\n\t" \
1585 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1586 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1587 : /*out*/ "=a"(_res) \
1588 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1589 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1590 lval = (__typeof__(lval))_res; \
1591 } while (0)
1592
1593#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) \
1594 do { \
1595 volatile OrigFn _orig = (orig); \
1596 volatile unsigned long _argvec[12]; \
1597 volatile unsigned long _res; \
1598 _argvec[0] = (unsigned long)_orig.nraddr; \
1599 _argvec[1] = (unsigned long)(arg1); \
1600 _argvec[2] = (unsigned long)(arg2); \
1601 _argvec[3] = (unsigned long)(arg3); \
1602 _argvec[4] = (unsigned long)(arg4); \
1603 _argvec[5] = (unsigned long)(arg5); \
1604 _argvec[6] = (unsigned long)(arg6); \
1605 _argvec[7] = (unsigned long)(arg7); \
1606 _argvec[8] = (unsigned long)(arg8); \
1607 _argvec[9] = (unsigned long)(arg9); \
1608 _argvec[10] = (unsigned long)(arg10); \
1609 _argvec[11] = (unsigned long)(arg11); \
1610 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $136,%%rsp\n\t" \
1611 "pushq 88(%%rax)\n\t" \
1612 "pushq 80(%%rax)\n\t" \
1613 "pushq 72(%%rax)\n\t" \
1614 "pushq 64(%%rax)\n\t" \
1615 "pushq 56(%%rax)\n\t" \
1616 "movq 48(%%rax), %%r9\n\t" \
1617 "movq 40(%%rax), %%r8\n\t" \
1618 "movq 32(%%rax), %%rcx\n\t" \
1619 "movq 24(%%rax), %%rdx\n\t" \
1620 "movq 16(%%rax), %%rsi\n\t" \
1621 "movq 8(%%rax), %%rdi\n\t" \
1622 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1623 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1624 : /*out*/ "=a"(_res) \
1625 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1626 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1627 lval = (__typeof__(lval))_res; \
1628 } while (0)
1629
1630#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12) \
1631 do { \
1632 volatile OrigFn _orig = (orig); \
1633 volatile unsigned long _argvec[13]; \
1634 volatile unsigned long _res; \
1635 _argvec[0] = (unsigned long)_orig.nraddr; \
1636 _argvec[1] = (unsigned long)(arg1); \
1637 _argvec[2] = (unsigned long)(arg2); \
1638 _argvec[3] = (unsigned long)(arg3); \
1639 _argvec[4] = (unsigned long)(arg4); \
1640 _argvec[5] = (unsigned long)(arg5); \
1641 _argvec[6] = (unsigned long)(arg6); \
1642 _argvec[7] = (unsigned long)(arg7); \
1643 _argvec[8] = (unsigned long)(arg8); \
1644 _argvec[9] = (unsigned long)(arg9); \
1645 _argvec[10] = (unsigned long)(arg10); \
1646 _argvec[11] = (unsigned long)(arg11); \
1647 _argvec[12] = (unsigned long)(arg12); \
1648 __asm__ volatile(VALGRIND_CFI_PROLOGUE VALGRIND_ALIGN_STACK "subq $128,%%rsp\n\t" \
1649 "pushq 96(%%rax)\n\t" \
1650 "pushq 88(%%rax)\n\t" \
1651 "pushq 80(%%rax)\n\t" \
1652 "pushq 72(%%rax)\n\t" \
1653 "pushq 64(%%rax)\n\t" \
1654 "pushq 56(%%rax)\n\t" \
1655 "movq 48(%%rax), %%r9\n\t" \
1656 "movq 40(%%rax), %%r8\n\t" \
1657 "movq 32(%%rax), %%rcx\n\t" \
1658 "movq 24(%%rax), %%rdx\n\t" \
1659 "movq 16(%%rax), %%rsi\n\t" \
1660 "movq 8(%%rax), %%rdi\n\t" \
1661 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
1662 VALGRIND_CALL_NOREDIR_RAX VALGRIND_RESTORE_STACK VALGRIND_CFI_EPILOGUE \
1663 : /*out*/ "=a"(_res) \
1664 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
1665 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15"); \
1666 lval = (__typeof__(lval))_res; \
1667 } while (0)
1668
1669#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
1670
1671/* ------------------------ ppc32-linux ------------------------ */
1672
1673#if defined(PLAT_ppc32_linux)
1674
1675/* This is useful for finding out about the on-stack stuff:
1676
1677 extern int f9 ( int,int,int,int,int,int,int,int,int );
1678 extern int f10 ( int,int,int,int,int,int,int,int,int,int );
1679 extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
1680 extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
1681
1682 int g9 ( void ) {
1683 return f9(11,22,33,44,55,66,77,88,99);
1684 }
1685 int g10 ( void ) {
1686 return f10(11,22,33,44,55,66,77,88,99,110);
1687 }
1688 int g11 ( void ) {
1689 return f11(11,22,33,44,55,66,77,88,99,110,121);
1690 }
1691 int g12 ( void ) {
1692 return f12(11,22,33,44,55,66,77,88,99,110,121,132);
1693 }
1694*/
1695
1696/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
1697
1698/* These regs are trashed by the hidden call. */
1699#define __CALLER_SAVED_REGS \
1700 "lr", "ctr", "xer", "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", "r0", "r2", "r3", "r4", "r5", "r6", \
1701 "r7", "r8", "r9", "r10", "r11", "r12", "r13"
1702
1703/* Macros to save and align the stack before making a function
1704 call and restore it afterwards as gcc may not keep the stack
1705 pointer aligned if it doesn't realise calls are being made
1706 to other functions. */
1707
1708#define VALGRIND_ALIGN_STACK \
1709 "mr 28,1\n\t" \
1710 "rlwinm 1,1,0,0,27\n\t"
1711#define VALGRIND_RESTORE_STACK "mr 1,28\n\t"
1712
1713/* These CALL_FN_ macros assume that on ppc32-linux,
1714 sizeof(unsigned long) == 4. */
1715
1716#define CALL_FN_W_v(lval, orig) \
1717 do { \
1718 volatile OrigFn _orig = (orig); \
1719 volatile unsigned long _argvec[1]; \
1720 volatile unsigned long _res; \
1721 _argvec[0] = (unsigned long)_orig.nraddr; \
1722 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1723 "lwz 11,0(11)\n\t" /* target->r11 */ \
1724 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1725 : /*out*/ "=r"(_res) \
1726 : /*in*/ "r"(&_argvec[0]) \
1727 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1728 lval = (__typeof__(lval))_res; \
1729 } while (0)
1730
1731#define CALL_FN_W_W(lval, orig, arg1) \
1732 do { \
1733 volatile OrigFn _orig = (orig); \
1734 volatile unsigned long _argvec[2]; \
1735 volatile unsigned long _res; \
1736 _argvec[0] = (unsigned long)_orig.nraddr; \
1737 _argvec[1] = (unsigned long)arg1; \
1738 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1739 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1740 "lwz 11,0(11)\n\t" /* target->r11 */ \
1741 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1742 : /*out*/ "=r"(_res) \
1743 : /*in*/ "r"(&_argvec[0]) \
1744 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1745 lval = (__typeof__(lval))_res; \
1746 } while (0)
1747
1748#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
1749 do { \
1750 volatile OrigFn _orig = (orig); \
1751 volatile unsigned long _argvec[3]; \
1752 volatile unsigned long _res; \
1753 _argvec[0] = (unsigned long)_orig.nraddr; \
1754 _argvec[1] = (unsigned long)arg1; \
1755 _argvec[2] = (unsigned long)arg2; \
1756 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1757 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1758 "lwz 4,8(11)\n\t" \
1759 "lwz 11,0(11)\n\t" /* target->r11 */ \
1760 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1761 : /*out*/ "=r"(_res) \
1762 : /*in*/ "r"(&_argvec[0]) \
1763 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1764 lval = (__typeof__(lval))_res; \
1765 } while (0)
1766
1767#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
1768 do { \
1769 volatile OrigFn _orig = (orig); \
1770 volatile unsigned long _argvec[4]; \
1771 volatile unsigned long _res; \
1772 _argvec[0] = (unsigned long)_orig.nraddr; \
1773 _argvec[1] = (unsigned long)arg1; \
1774 _argvec[2] = (unsigned long)arg2; \
1775 _argvec[3] = (unsigned long)arg3; \
1776 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1777 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1778 "lwz 4,8(11)\n\t" \
1779 "lwz 5,12(11)\n\t" \
1780 "lwz 11,0(11)\n\t" /* target->r11 */ \
1781 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1782 : /*out*/ "=r"(_res) \
1783 : /*in*/ "r"(&_argvec[0]) \
1784 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1785 lval = (__typeof__(lval))_res; \
1786 } while (0)
1787
1788#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
1789 do { \
1790 volatile OrigFn _orig = (orig); \
1791 volatile unsigned long _argvec[5]; \
1792 volatile unsigned long _res; \
1793 _argvec[0] = (unsigned long)_orig.nraddr; \
1794 _argvec[1] = (unsigned long)arg1; \
1795 _argvec[2] = (unsigned long)arg2; \
1796 _argvec[3] = (unsigned long)arg3; \
1797 _argvec[4] = (unsigned long)arg4; \
1798 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1799 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1800 "lwz 4,8(11)\n\t" \
1801 "lwz 5,12(11)\n\t" \
1802 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1803 "lwz 11,0(11)\n\t" /* target->r11 */ \
1804 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1805 : /*out*/ "=r"(_res) \
1806 : /*in*/ "r"(&_argvec[0]) \
1807 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1808 lval = (__typeof__(lval))_res; \
1809 } while (0)
1810
1811#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
1812 do { \
1813 volatile OrigFn _orig = (orig); \
1814 volatile unsigned long _argvec[6]; \
1815 volatile unsigned long _res; \
1816 _argvec[0] = (unsigned long)_orig.nraddr; \
1817 _argvec[1] = (unsigned long)arg1; \
1818 _argvec[2] = (unsigned long)arg2; \
1819 _argvec[3] = (unsigned long)arg3; \
1820 _argvec[4] = (unsigned long)arg4; \
1821 _argvec[5] = (unsigned long)arg5; \
1822 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1823 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1824 "lwz 4,8(11)\n\t" \
1825 "lwz 5,12(11)\n\t" \
1826 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1827 "lwz 7,20(11)\n\t" \
1828 "lwz 11,0(11)\n\t" /* target->r11 */ \
1829 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1830 : /*out*/ "=r"(_res) \
1831 : /*in*/ "r"(&_argvec[0]) \
1832 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1833 lval = (__typeof__(lval))_res; \
1834 } while (0)
1835
1836#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6) \
1837 do { \
1838 volatile OrigFn _orig = (orig); \
1839 volatile unsigned long _argvec[7]; \
1840 volatile unsigned long _res; \
1841 _argvec[0] = (unsigned long)_orig.nraddr; \
1842 _argvec[1] = (unsigned long)arg1; \
1843 _argvec[2] = (unsigned long)arg2; \
1844 _argvec[3] = (unsigned long)arg3; \
1845 _argvec[4] = (unsigned long)arg4; \
1846 _argvec[5] = (unsigned long)arg5; \
1847 _argvec[6] = (unsigned long)arg6; \
1848 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1849 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1850 "lwz 4,8(11)\n\t" \
1851 "lwz 5,12(11)\n\t" \
1852 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1853 "lwz 7,20(11)\n\t" \
1854 "lwz 8,24(11)\n\t" \
1855 "lwz 11,0(11)\n\t" /* target->r11 */ \
1856 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1857 : /*out*/ "=r"(_res) \
1858 : /*in*/ "r"(&_argvec[0]) \
1859 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1860 lval = (__typeof__(lval))_res; \
1861 } while (0)
1862
1863#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
1864 do { \
1865 volatile OrigFn _orig = (orig); \
1866 volatile unsigned long _argvec[8]; \
1867 volatile unsigned long _res; \
1868 _argvec[0] = (unsigned long)_orig.nraddr; \
1869 _argvec[1] = (unsigned long)arg1; \
1870 _argvec[2] = (unsigned long)arg2; \
1871 _argvec[3] = (unsigned long)arg3; \
1872 _argvec[4] = (unsigned long)arg4; \
1873 _argvec[5] = (unsigned long)arg5; \
1874 _argvec[6] = (unsigned long)arg6; \
1875 _argvec[7] = (unsigned long)arg7; \
1876 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1877 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1878 "lwz 4,8(11)\n\t" \
1879 "lwz 5,12(11)\n\t" \
1880 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1881 "lwz 7,20(11)\n\t" \
1882 "lwz 8,24(11)\n\t" \
1883 "lwz 9,28(11)\n\t" \
1884 "lwz 11,0(11)\n\t" /* target->r11 */ \
1885 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1886 : /*out*/ "=r"(_res) \
1887 : /*in*/ "r"(&_argvec[0]) \
1888 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1889 lval = (__typeof__(lval))_res; \
1890 } while (0)
1891
1892#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \
1893 do { \
1894 volatile OrigFn _orig = (orig); \
1895 volatile unsigned long _argvec[9]; \
1896 volatile unsigned long _res; \
1897 _argvec[0] = (unsigned long)_orig.nraddr; \
1898 _argvec[1] = (unsigned long)arg1; \
1899 _argvec[2] = (unsigned long)arg2; \
1900 _argvec[3] = (unsigned long)arg3; \
1901 _argvec[4] = (unsigned long)arg4; \
1902 _argvec[5] = (unsigned long)arg5; \
1903 _argvec[6] = (unsigned long)arg6; \
1904 _argvec[7] = (unsigned long)arg7; \
1905 _argvec[8] = (unsigned long)arg8; \
1906 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1907 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1908 "lwz 4,8(11)\n\t" \
1909 "lwz 5,12(11)\n\t" \
1910 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1911 "lwz 7,20(11)\n\t" \
1912 "lwz 8,24(11)\n\t" \
1913 "lwz 9,28(11)\n\t" \
1914 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1915 "lwz 11,0(11)\n\t" /* target->r11 */ \
1916 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1917 : /*out*/ "=r"(_res) \
1918 : /*in*/ "r"(&_argvec[0]) \
1919 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1920 lval = (__typeof__(lval))_res; \
1921 } while (0)
1922
1923#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \
1924 do { \
1925 volatile OrigFn _orig = (orig); \
1926 volatile unsigned long _argvec[10]; \
1927 volatile unsigned long _res; \
1928 _argvec[0] = (unsigned long)_orig.nraddr; \
1929 _argvec[1] = (unsigned long)arg1; \
1930 _argvec[2] = (unsigned long)arg2; \
1931 _argvec[3] = (unsigned long)arg3; \
1932 _argvec[4] = (unsigned long)arg4; \
1933 _argvec[5] = (unsigned long)arg5; \
1934 _argvec[6] = (unsigned long)arg6; \
1935 _argvec[7] = (unsigned long)arg7; \
1936 _argvec[8] = (unsigned long)arg8; \
1937 _argvec[9] = (unsigned long)arg9; \
1938 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1939 "addi 1,1,-16\n\t" /* arg9 */ \
1940 "lwz 3,36(11)\n\t" \
1941 "stw 3,8(1)\n\t" /* args1-8 */ \
1942 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1943 "lwz 4,8(11)\n\t" \
1944 "lwz 5,12(11)\n\t" \
1945 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1946 "lwz 7,20(11)\n\t" \
1947 "lwz 8,24(11)\n\t" \
1948 "lwz 9,28(11)\n\t" \
1949 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1950 "lwz 11,0(11)\n\t" /* target->r11 */ \
1951 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1952 : /*out*/ "=r"(_res) \
1953 : /*in*/ "r"(&_argvec[0]) \
1954 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1955 lval = (__typeof__(lval))_res; \
1956 } while (0)
1957
1958#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) \
1959 do { \
1960 volatile OrigFn _orig = (orig); \
1961 volatile unsigned long _argvec[11]; \
1962 volatile unsigned long _res; \
1963 _argvec[0] = (unsigned long)_orig.nraddr; \
1964 _argvec[1] = (unsigned long)arg1; \
1965 _argvec[2] = (unsigned long)arg2; \
1966 _argvec[3] = (unsigned long)arg3; \
1967 _argvec[4] = (unsigned long)arg4; \
1968 _argvec[5] = (unsigned long)arg5; \
1969 _argvec[6] = (unsigned long)arg6; \
1970 _argvec[7] = (unsigned long)arg7; \
1971 _argvec[8] = (unsigned long)arg8; \
1972 _argvec[9] = (unsigned long)arg9; \
1973 _argvec[10] = (unsigned long)arg10; \
1974 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
1975 "addi 1,1,-16\n\t" /* arg10 */ \
1976 "lwz 3,40(11)\n\t" \
1977 "stw 3,12(1)\n\t" /* arg9 */ \
1978 "lwz 3,36(11)\n\t" \
1979 "stw 3,8(1)\n\t" /* args1-8 */ \
1980 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
1981 "lwz 4,8(11)\n\t" \
1982 "lwz 5,12(11)\n\t" \
1983 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
1984 "lwz 7,20(11)\n\t" \
1985 "lwz 8,24(11)\n\t" \
1986 "lwz 9,28(11)\n\t" \
1987 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
1988 "lwz 11,0(11)\n\t" /* target->r11 */ \
1989 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
1990 : /*out*/ "=r"(_res) \
1991 : /*in*/ "r"(&_argvec[0]) \
1992 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
1993 lval = (__typeof__(lval))_res; \
1994 } while (0)
1995
1996#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) \
1997 do { \
1998 volatile OrigFn _orig = (orig); \
1999 volatile unsigned long _argvec[12]; \
2000 volatile unsigned long _res; \
2001 _argvec[0] = (unsigned long)_orig.nraddr; \
2002 _argvec[1] = (unsigned long)arg1; \
2003 _argvec[2] = (unsigned long)arg2; \
2004 _argvec[3] = (unsigned long)arg3; \
2005 _argvec[4] = (unsigned long)arg4; \
2006 _argvec[5] = (unsigned long)arg5; \
2007 _argvec[6] = (unsigned long)arg6; \
2008 _argvec[7] = (unsigned long)arg7; \
2009 _argvec[8] = (unsigned long)arg8; \
2010 _argvec[9] = (unsigned long)arg9; \
2011 _argvec[10] = (unsigned long)arg10; \
2012 _argvec[11] = (unsigned long)arg11; \
2013 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2014 "addi 1,1,-32\n\t" /* arg11 */ \
2015 "lwz 3,44(11)\n\t" \
2016 "stw 3,16(1)\n\t" /* arg10 */ \
2017 "lwz 3,40(11)\n\t" \
2018 "stw 3,12(1)\n\t" /* arg9 */ \
2019 "lwz 3,36(11)\n\t" \
2020 "stw 3,8(1)\n\t" /* args1-8 */ \
2021 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
2022 "lwz 4,8(11)\n\t" \
2023 "lwz 5,12(11)\n\t" \
2024 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
2025 "lwz 7,20(11)\n\t" \
2026 "lwz 8,24(11)\n\t" \
2027 "lwz 9,28(11)\n\t" \
2028 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
2029 "lwz 11,0(11)\n\t" /* target->r11 */ \
2030 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
2031 : /*out*/ "=r"(_res) \
2032 : /*in*/ "r"(&_argvec[0]) \
2033 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2034 lval = (__typeof__(lval))_res; \
2035 } while (0)
2036
2037#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12) \
2038 do { \
2039 volatile OrigFn _orig = (orig); \
2040 volatile unsigned long _argvec[13]; \
2041 volatile unsigned long _res; \
2042 _argvec[0] = (unsigned long)_orig.nraddr; \
2043 _argvec[1] = (unsigned long)arg1; \
2044 _argvec[2] = (unsigned long)arg2; \
2045 _argvec[3] = (unsigned long)arg3; \
2046 _argvec[4] = (unsigned long)arg4; \
2047 _argvec[5] = (unsigned long)arg5; \
2048 _argvec[6] = (unsigned long)arg6; \
2049 _argvec[7] = (unsigned long)arg7; \
2050 _argvec[8] = (unsigned long)arg8; \
2051 _argvec[9] = (unsigned long)arg9; \
2052 _argvec[10] = (unsigned long)arg10; \
2053 _argvec[11] = (unsigned long)arg11; \
2054 _argvec[12] = (unsigned long)arg12; \
2055 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2056 "addi 1,1,-32\n\t" /* arg12 */ \
2057 "lwz 3,48(11)\n\t" \
2058 "stw 3,20(1)\n\t" /* arg11 */ \
2059 "lwz 3,44(11)\n\t" \
2060 "stw 3,16(1)\n\t" /* arg10 */ \
2061 "lwz 3,40(11)\n\t" \
2062 "stw 3,12(1)\n\t" /* arg9 */ \
2063 "lwz 3,36(11)\n\t" \
2064 "stw 3,8(1)\n\t" /* args1-8 */ \
2065 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
2066 "lwz 4,8(11)\n\t" \
2067 "lwz 5,12(11)\n\t" \
2068 "lwz 6,16(11)\n\t" /* arg4->r6 */ \
2069 "lwz 7,20(11)\n\t" \
2070 "lwz 8,24(11)\n\t" \
2071 "lwz 9,28(11)\n\t" \
2072 "lwz 10,32(11)\n\t" /* arg8->r10 */ \
2073 "lwz 11,0(11)\n\t" /* target->r11 */ \
2074 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 VALGRIND_RESTORE_STACK "mr %0,3" \
2075 : /*out*/ "=r"(_res) \
2076 : /*in*/ "r"(&_argvec[0]) \
2077 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2078 lval = (__typeof__(lval))_res; \
2079 } while (0)
2080
2081#endif /* PLAT_ppc32_linux */
2082
2083/* ------------------------ ppc64-linux ------------------------ */
2084
2085#if defined(PLAT_ppc64_linux)
2086
2087/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
2088
2089/* These regs are trashed by the hidden call. */
2090#define __CALLER_SAVED_REGS \
2091 "lr", "ctr", "xer", "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", "r0", "r2", "r3", "r4", "r5", "r6", \
2092 "r7", "r8", "r9", "r10", "r11", "r12", "r13"
2093
2094/* Macros to save and align the stack before making a function
2095 call and restore it afterwards as gcc may not keep the stack
2096 pointer aligned if it doesn't realise calls are being made
2097 to other functions. */
2098
2099#define VALGRIND_ALIGN_STACK \
2100 "mr 28,1\n\t" \
2101 "rldicr 1,1,0,59\n\t"
2102#define VALGRIND_RESTORE_STACK "mr 1,28\n\t"
2103
2104/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
2105 long) == 8. */
2106
2107#define CALL_FN_W_v(lval, orig) \
2108 do { \
2109 volatile OrigFn _orig = (orig); \
2110 volatile unsigned long _argvec[3 + 0]; \
2111 volatile unsigned long _res; \
2112 /* _argvec[0] holds current r2 across the call */ \
2113 _argvec[1] = (unsigned long)_orig.r2; \
2114 _argvec[2] = (unsigned long)_orig.nraddr; \
2115 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2116 "std 2,-16(11)\n\t" /* save tocptr */ \
2117 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2118 "ld 11, 0(11)\n\t" /* target->r11 */ \
2119 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2120 "mr %0,3\n\t" \
2121 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2122 VALGRIND_RESTORE_STACK \
2123 : /*out*/ "=r"(_res) \
2124 : /*in*/ "r"(&_argvec[2]) \
2125 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2126 lval = (__typeof__(lval))_res; \
2127 } while (0)
2128
2129#define CALL_FN_W_W(lval, orig, arg1) \
2130 do { \
2131 volatile OrigFn _orig = (orig); \
2132 volatile unsigned long _argvec[3 + 1]; \
2133 volatile unsigned long _res; \
2134 /* _argvec[0] holds current r2 across the call */ \
2135 _argvec[1] = (unsigned long)_orig.r2; \
2136 _argvec[2] = (unsigned long)_orig.nraddr; \
2137 _argvec[2 + 1] = (unsigned long)arg1; \
2138 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2139 "std 2,-16(11)\n\t" /* save tocptr */ \
2140 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2141 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2142 "ld 11, 0(11)\n\t" /* target->r11 */ \
2143 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2144 "mr %0,3\n\t" \
2145 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2146 VALGRIND_RESTORE_STACK \
2147 : /*out*/ "=r"(_res) \
2148 : /*in*/ "r"(&_argvec[2]) \
2149 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2150 lval = (__typeof__(lval))_res; \
2151 } while (0)
2152
2153#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
2154 do { \
2155 volatile OrigFn _orig = (orig); \
2156 volatile unsigned long _argvec[3 + 2]; \
2157 volatile unsigned long _res; \
2158 /* _argvec[0] holds current r2 across the call */ \
2159 _argvec[1] = (unsigned long)_orig.r2; \
2160 _argvec[2] = (unsigned long)_orig.nraddr; \
2161 _argvec[2 + 1] = (unsigned long)arg1; \
2162 _argvec[2 + 2] = (unsigned long)arg2; \
2163 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2164 "std 2,-16(11)\n\t" /* save tocptr */ \
2165 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2166 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2167 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2168 "ld 11, 0(11)\n\t" /* target->r11 */ \
2169 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2170 "mr %0,3\n\t" \
2171 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2172 VALGRIND_RESTORE_STACK \
2173 : /*out*/ "=r"(_res) \
2174 : /*in*/ "r"(&_argvec[2]) \
2175 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2176 lval = (__typeof__(lval))_res; \
2177 } while (0)
2178
2179#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
2180 do { \
2181 volatile OrigFn _orig = (orig); \
2182 volatile unsigned long _argvec[3 + 3]; \
2183 volatile unsigned long _res; \
2184 /* _argvec[0] holds current r2 across the call */ \
2185 _argvec[1] = (unsigned long)_orig.r2; \
2186 _argvec[2] = (unsigned long)_orig.nraddr; \
2187 _argvec[2 + 1] = (unsigned long)arg1; \
2188 _argvec[2 + 2] = (unsigned long)arg2; \
2189 _argvec[2 + 3] = (unsigned long)arg3; \
2190 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2191 "std 2,-16(11)\n\t" /* save tocptr */ \
2192 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2193 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2194 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2195 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2196 "ld 11, 0(11)\n\t" /* target->r11 */ \
2197 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2198 "mr %0,3\n\t" \
2199 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2200 VALGRIND_RESTORE_STACK \
2201 : /*out*/ "=r"(_res) \
2202 : /*in*/ "r"(&_argvec[2]) \
2203 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2204 lval = (__typeof__(lval))_res; \
2205 } while (0)
2206
2207#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
2208 do { \
2209 volatile OrigFn _orig = (orig); \
2210 volatile unsigned long _argvec[3 + 4]; \
2211 volatile unsigned long _res; \
2212 /* _argvec[0] holds current r2 across the call */ \
2213 _argvec[1] = (unsigned long)_orig.r2; \
2214 _argvec[2] = (unsigned long)_orig.nraddr; \
2215 _argvec[2 + 1] = (unsigned long)arg1; \
2216 _argvec[2 + 2] = (unsigned long)arg2; \
2217 _argvec[2 + 3] = (unsigned long)arg3; \
2218 _argvec[2 + 4] = (unsigned long)arg4; \
2219 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2220 "std 2,-16(11)\n\t" /* save tocptr */ \
2221 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2222 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2223 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2224 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2225 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2226 "ld 11, 0(11)\n\t" /* target->r11 */ \
2227 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2228 "mr %0,3\n\t" \
2229 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2230 VALGRIND_RESTORE_STACK \
2231 : /*out*/ "=r"(_res) \
2232 : /*in*/ "r"(&_argvec[2]) \
2233 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2234 lval = (__typeof__(lval))_res; \
2235 } while (0)
2236
2237#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
2238 do { \
2239 volatile OrigFn _orig = (orig); \
2240 volatile unsigned long _argvec[3 + 5]; \
2241 volatile unsigned long _res; \
2242 /* _argvec[0] holds current r2 across the call */ \
2243 _argvec[1] = (unsigned long)_orig.r2; \
2244 _argvec[2] = (unsigned long)_orig.nraddr; \
2245 _argvec[2 + 1] = (unsigned long)arg1; \
2246 _argvec[2 + 2] = (unsigned long)arg2; \
2247 _argvec[2 + 3] = (unsigned long)arg3; \
2248 _argvec[2 + 4] = (unsigned long)arg4; \
2249 _argvec[2 + 5] = (unsigned long)arg5; \
2250 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2251 "std 2,-16(11)\n\t" /* save tocptr */ \
2252 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2253 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2254 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2255 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2256 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2257 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2258 "ld 11, 0(11)\n\t" /* target->r11 */ \
2259 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2260 "mr %0,3\n\t" \
2261 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2262 VALGRIND_RESTORE_STACK \
2263 : /*out*/ "=r"(_res) \
2264 : /*in*/ "r"(&_argvec[2]) \
2265 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2266 lval = (__typeof__(lval))_res; \
2267 } while (0)
2268
2269#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6) \
2270 do { \
2271 volatile OrigFn _orig = (orig); \
2272 volatile unsigned long _argvec[3 + 6]; \
2273 volatile unsigned long _res; \
2274 /* _argvec[0] holds current r2 across the call */ \
2275 _argvec[1] = (unsigned long)_orig.r2; \
2276 _argvec[2] = (unsigned long)_orig.nraddr; \
2277 _argvec[2 + 1] = (unsigned long)arg1; \
2278 _argvec[2 + 2] = (unsigned long)arg2; \
2279 _argvec[2 + 3] = (unsigned long)arg3; \
2280 _argvec[2 + 4] = (unsigned long)arg4; \
2281 _argvec[2 + 5] = (unsigned long)arg5; \
2282 _argvec[2 + 6] = (unsigned long)arg6; \
2283 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2284 "std 2,-16(11)\n\t" /* save tocptr */ \
2285 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2286 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2287 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2288 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2289 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2290 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2291 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2292 "ld 11, 0(11)\n\t" /* target->r11 */ \
2293 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2294 "mr %0,3\n\t" \
2295 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2296 VALGRIND_RESTORE_STACK \
2297 : /*out*/ "=r"(_res) \
2298 : /*in*/ "r"(&_argvec[2]) \
2299 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2300 lval = (__typeof__(lval))_res; \
2301 } while (0)
2302
2303#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
2304 do { \
2305 volatile OrigFn _orig = (orig); \
2306 volatile unsigned long _argvec[3 + 7]; \
2307 volatile unsigned long _res; \
2308 /* _argvec[0] holds current r2 across the call */ \
2309 _argvec[1] = (unsigned long)_orig.r2; \
2310 _argvec[2] = (unsigned long)_orig.nraddr; \
2311 _argvec[2 + 1] = (unsigned long)arg1; \
2312 _argvec[2 + 2] = (unsigned long)arg2; \
2313 _argvec[2 + 3] = (unsigned long)arg3; \
2314 _argvec[2 + 4] = (unsigned long)arg4; \
2315 _argvec[2 + 5] = (unsigned long)arg5; \
2316 _argvec[2 + 6] = (unsigned long)arg6; \
2317 _argvec[2 + 7] = (unsigned long)arg7; \
2318 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2319 "std 2,-16(11)\n\t" /* save tocptr */ \
2320 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2321 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2322 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2323 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2324 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2325 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2326 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2327 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2328 "ld 11, 0(11)\n\t" /* target->r11 */ \
2329 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2330 "mr %0,3\n\t" \
2331 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2332 VALGRIND_RESTORE_STACK \
2333 : /*out*/ "=r"(_res) \
2334 : /*in*/ "r"(&_argvec[2]) \
2335 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2336 lval = (__typeof__(lval))_res; \
2337 } while (0)
2338
2339#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \
2340 do { \
2341 volatile OrigFn _orig = (orig); \
2342 volatile unsigned long _argvec[3 + 8]; \
2343 volatile unsigned long _res; \
2344 /* _argvec[0] holds current r2 across the call */ \
2345 _argvec[1] = (unsigned long)_orig.r2; \
2346 _argvec[2] = (unsigned long)_orig.nraddr; \
2347 _argvec[2 + 1] = (unsigned long)arg1; \
2348 _argvec[2 + 2] = (unsigned long)arg2; \
2349 _argvec[2 + 3] = (unsigned long)arg3; \
2350 _argvec[2 + 4] = (unsigned long)arg4; \
2351 _argvec[2 + 5] = (unsigned long)arg5; \
2352 _argvec[2 + 6] = (unsigned long)arg6; \
2353 _argvec[2 + 7] = (unsigned long)arg7; \
2354 _argvec[2 + 8] = (unsigned long)arg8; \
2355 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2356 "std 2,-16(11)\n\t" /* save tocptr */ \
2357 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2358 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2359 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2360 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2361 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2362 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2363 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2364 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2365 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2366 "ld 11, 0(11)\n\t" /* target->r11 */ \
2367 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2368 "mr %0,3\n\t" \
2369 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2370 VALGRIND_RESTORE_STACK \
2371 : /*out*/ "=r"(_res) \
2372 : /*in*/ "r"(&_argvec[2]) \
2373 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2374 lval = (__typeof__(lval))_res; \
2375 } while (0)
2376
2377#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \
2378 do { \
2379 volatile OrigFn _orig = (orig); \
2380 volatile unsigned long _argvec[3 + 9]; \
2381 volatile unsigned long _res; \
2382 /* _argvec[0] holds current r2 across the call */ \
2383 _argvec[1] = (unsigned long)_orig.r2; \
2384 _argvec[2] = (unsigned long)_orig.nraddr; \
2385 _argvec[2 + 1] = (unsigned long)arg1; \
2386 _argvec[2 + 2] = (unsigned long)arg2; \
2387 _argvec[2 + 3] = (unsigned long)arg3; \
2388 _argvec[2 + 4] = (unsigned long)arg4; \
2389 _argvec[2 + 5] = (unsigned long)arg5; \
2390 _argvec[2 + 6] = (unsigned long)arg6; \
2391 _argvec[2 + 7] = (unsigned long)arg7; \
2392 _argvec[2 + 8] = (unsigned long)arg8; \
2393 _argvec[2 + 9] = (unsigned long)arg9; \
2394 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2395 "std 2,-16(11)\n\t" /* save tocptr */ \
2396 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2397 "addi 1,1,-128\n\t" /* expand stack frame */ /* arg9 */ \
2398 "ld 3,72(11)\n\t" \
2399 "std 3,112(1)\n\t" /* args1-8 */ \
2400 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2401 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2402 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2403 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2404 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2405 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2406 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2407 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2408 "ld 11, 0(11)\n\t" /* target->r11 */ \
2409 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2410 "mr %0,3\n\t" \
2411 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2412 VALGRIND_RESTORE_STACK \
2413 : /*out*/ "=r"(_res) \
2414 : /*in*/ "r"(&_argvec[2]) \
2415 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2416 lval = (__typeof__(lval))_res; \
2417 } while (0)
2418
2419#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) \
2420 do { \
2421 volatile OrigFn _orig = (orig); \
2422 volatile unsigned long _argvec[3 + 10]; \
2423 volatile unsigned long _res; \
2424 /* _argvec[0] holds current r2 across the call */ \
2425 _argvec[1] = (unsigned long)_orig.r2; \
2426 _argvec[2] = (unsigned long)_orig.nraddr; \
2427 _argvec[2 + 1] = (unsigned long)arg1; \
2428 _argvec[2 + 2] = (unsigned long)arg2; \
2429 _argvec[2 + 3] = (unsigned long)arg3; \
2430 _argvec[2 + 4] = (unsigned long)arg4; \
2431 _argvec[2 + 5] = (unsigned long)arg5; \
2432 _argvec[2 + 6] = (unsigned long)arg6; \
2433 _argvec[2 + 7] = (unsigned long)arg7; \
2434 _argvec[2 + 8] = (unsigned long)arg8; \
2435 _argvec[2 + 9] = (unsigned long)arg9; \
2436 _argvec[2 + 10] = (unsigned long)arg10; \
2437 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2438 "std 2,-16(11)\n\t" /* save tocptr */ \
2439 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2440 "addi 1,1,-128\n\t" /* expand stack frame */ /* arg10 */ \
2441 "ld 3,80(11)\n\t" \
2442 "std 3,120(1)\n\t" /* arg9 */ \
2443 "ld 3,72(11)\n\t" \
2444 "std 3,112(1)\n\t" /* args1-8 */ \
2445 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2446 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2447 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2448 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2449 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2450 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2451 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2452 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2453 "ld 11, 0(11)\n\t" /* target->r11 */ \
2454 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2455 "mr %0,3\n\t" \
2456 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2457 VALGRIND_RESTORE_STACK \
2458 : /*out*/ "=r"(_res) \
2459 : /*in*/ "r"(&_argvec[2]) \
2460 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2461 lval = (__typeof__(lval))_res; \
2462 } while (0)
2463
2464#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) \
2465 do { \
2466 volatile OrigFn _orig = (orig); \
2467 volatile unsigned long _argvec[3 + 11]; \
2468 volatile unsigned long _res; \
2469 /* _argvec[0] holds current r2 across the call */ \
2470 _argvec[1] = (unsigned long)_orig.r2; \
2471 _argvec[2] = (unsigned long)_orig.nraddr; \
2472 _argvec[2 + 1] = (unsigned long)arg1; \
2473 _argvec[2 + 2] = (unsigned long)arg2; \
2474 _argvec[2 + 3] = (unsigned long)arg3; \
2475 _argvec[2 + 4] = (unsigned long)arg4; \
2476 _argvec[2 + 5] = (unsigned long)arg5; \
2477 _argvec[2 + 6] = (unsigned long)arg6; \
2478 _argvec[2 + 7] = (unsigned long)arg7; \
2479 _argvec[2 + 8] = (unsigned long)arg8; \
2480 _argvec[2 + 9] = (unsigned long)arg9; \
2481 _argvec[2 + 10] = (unsigned long)arg10; \
2482 _argvec[2 + 11] = (unsigned long)arg11; \
2483 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2484 "std 2,-16(11)\n\t" /* save tocptr */ \
2485 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2486 "addi 1,1,-144\n\t" /* expand stack frame */ /* arg11 */ \
2487 "ld 3,88(11)\n\t" \
2488 "std 3,128(1)\n\t" /* arg10 */ \
2489 "ld 3,80(11)\n\t" \
2490 "std 3,120(1)\n\t" /* arg9 */ \
2491 "ld 3,72(11)\n\t" \
2492 "std 3,112(1)\n\t" /* args1-8 */ \
2493 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2494 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2495 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2496 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2497 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2498 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2499 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2500 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2501 "ld 11, 0(11)\n\t" /* target->r11 */ \
2502 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2503 "mr %0,3\n\t" \
2504 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2505 VALGRIND_RESTORE_STACK \
2506 : /*out*/ "=r"(_res) \
2507 : /*in*/ "r"(&_argvec[2]) \
2508 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2509 lval = (__typeof__(lval))_res; \
2510 } while (0)
2511
2512#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12) \
2513 do { \
2514 volatile OrigFn _orig = (orig); \
2515 volatile unsigned long _argvec[3 + 12]; \
2516 volatile unsigned long _res; \
2517 /* _argvec[0] holds current r2 across the call */ \
2518 _argvec[1] = (unsigned long)_orig.r2; \
2519 _argvec[2] = (unsigned long)_orig.nraddr; \
2520 _argvec[2 + 1] = (unsigned long)arg1; \
2521 _argvec[2 + 2] = (unsigned long)arg2; \
2522 _argvec[2 + 3] = (unsigned long)arg3; \
2523 _argvec[2 + 4] = (unsigned long)arg4; \
2524 _argvec[2 + 5] = (unsigned long)arg5; \
2525 _argvec[2 + 6] = (unsigned long)arg6; \
2526 _argvec[2 + 7] = (unsigned long)arg7; \
2527 _argvec[2 + 8] = (unsigned long)arg8; \
2528 _argvec[2 + 9] = (unsigned long)arg9; \
2529 _argvec[2 + 10] = (unsigned long)arg10; \
2530 _argvec[2 + 11] = (unsigned long)arg11; \
2531 _argvec[2 + 12] = (unsigned long)arg12; \
2532 __asm__ volatile(VALGRIND_ALIGN_STACK "mr 11,%1\n\t" \
2533 "std 2,-16(11)\n\t" /* save tocptr */ \
2534 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
2535 "addi 1,1,-144\n\t" /* expand stack frame */ /* arg12 */ \
2536 "ld 3,96(11)\n\t" \
2537 "std 3,136(1)\n\t" /* arg11 */ \
2538 "ld 3,88(11)\n\t" \
2539 "std 3,128(1)\n\t" /* arg10 */ \
2540 "ld 3,80(11)\n\t" \
2541 "std 3,120(1)\n\t" /* arg9 */ \
2542 "ld 3,72(11)\n\t" \
2543 "std 3,112(1)\n\t" /* args1-8 */ \
2544 "ld 3, 8(11)\n\t" /* arg1->r3 */ \
2545 "ld 4, 16(11)\n\t" /* arg2->r4 */ \
2546 "ld 5, 24(11)\n\t" /* arg3->r5 */ \
2547 "ld 6, 32(11)\n\t" /* arg4->r6 */ \
2548 "ld 7, 40(11)\n\t" /* arg5->r7 */ \
2549 "ld 8, 48(11)\n\t" /* arg6->r8 */ \
2550 "ld 9, 56(11)\n\t" /* arg7->r9 */ \
2551 "ld 10, 64(11)\n\t" /* arg8->r10 */ \
2552 "ld 11, 0(11)\n\t" /* target->r11 */ \
2553 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 "mr 11,%1\n\t" \
2554 "mr %0,3\n\t" \
2555 "ld 2,-16(11)\n\t" /* restore tocptr */ \
2556 VALGRIND_RESTORE_STACK \
2557 : /*out*/ "=r"(_res) \
2558 : /*in*/ "r"(&_argvec[2]) \
2559 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28"); \
2560 lval = (__typeof__(lval))_res; \
2561 } while (0)
2562
2563#endif /* PLAT_ppc64_linux */
2564
2565/* ------------------------- arm-linux ------------------------- */
2566
2567#if defined(PLAT_arm_linux)
2568
2569/* These regs are trashed by the hidden call. */
2570#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3", "r4", "r14"
2571
2572/* Macros to save and align the stack before making a function
2573 call and restore it afterwards as gcc may not keep the stack
2574 pointer aligned if it doesn't realise calls are being made
2575 to other functions. */
2576
2577/* This is a bit tricky. We store the original stack pointer in r10
2578 as it is callee-saves. gcc doesn't allow the use of r11 for some
2579 reason. Also, we can't directly "bic" the stack pointer in thumb
2580 mode since r13 isn't an allowed register number in that context.
2581 So use r4 as a temporary, since that is about to get trashed
2582 anyway, just after each use of this macro. Side effect is we need
2583 to be very careful about any future changes, since
2584 VALGRIND_ALIGN_STACK simply assumes r4 is usable. */
2585#define VALGRIND_ALIGN_STACK \
2586 "mov r10, sp\n\t" \
2587 "mov r4, sp\n\t" \
2588 "bic r4, r4, #7\n\t" \
2589 "mov sp, r4\n\t"
2590#define VALGRIND_RESTORE_STACK "mov sp, r10\n\t"
2591
2592/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
2593 long) == 4. */
2594
2595#define CALL_FN_W_v(lval, orig) \
2596 do { \
2597 volatile OrigFn _orig = (orig); \
2598 volatile unsigned long _argvec[1]; \
2599 volatile unsigned long _res; \
2600 _argvec[0] = (unsigned long)_orig.nraddr; \
2601 __asm__ volatile(VALGRIND_ALIGN_STACK "ldr r4, [%1] \n\t" /* target->r4 */ \
2602 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0\n" \
2603 : /*out*/ "=r"(_res) \
2604 : /*in*/ "0"(&_argvec[0]) \
2605 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2606 lval = (__typeof__(lval))_res; \
2607 } while (0)
2608
2609#define CALL_FN_W_W(lval, orig, arg1) \
2610 do { \
2611 volatile OrigFn _orig = (orig); \
2612 volatile unsigned long _argvec[2]; \
2613 volatile unsigned long _res; \
2614 _argvec[0] = (unsigned long)_orig.nraddr; \
2615 _argvec[1] = (unsigned long)(arg1); \
2616 __asm__ volatile(VALGRIND_ALIGN_STACK "ldr r0, [%1, #4] \n\t" \
2617 "ldr r4, [%1] \n\t" /* target->r4 */ \
2618 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0\n" \
2619 : /*out*/ "=r"(_res) \
2620 : /*in*/ "0"(&_argvec[0]) \
2621 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2622 lval = (__typeof__(lval))_res; \
2623 } while (0)
2624
2625#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
2626 do { \
2627 volatile OrigFn _orig = (orig); \
2628 volatile unsigned long _argvec[3]; \
2629 volatile unsigned long _res; \
2630 _argvec[0] = (unsigned long)_orig.nraddr; \
2631 _argvec[1] = (unsigned long)(arg1); \
2632 _argvec[2] = (unsigned long)(arg2); \
2633 __asm__ volatile(VALGRIND_ALIGN_STACK "ldr r0, [%1, #4] \n\t" \
2634 "ldr r1, [%1, #8] \n\t" \
2635 "ldr r4, [%1] \n\t" /* target->r4 */ \
2636 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0\n" \
2637 : /*out*/ "=r"(_res) \
2638 : /*in*/ "0"(&_argvec[0]) \
2639 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2640 lval = (__typeof__(lval))_res; \
2641 } while (0)
2642
2643#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
2644 do { \
2645 volatile OrigFn _orig = (orig); \
2646 volatile unsigned long _argvec[4]; \
2647 volatile unsigned long _res; \
2648 _argvec[0] = (unsigned long)_orig.nraddr; \
2649 _argvec[1] = (unsigned long)(arg1); \
2650 _argvec[2] = (unsigned long)(arg2); \
2651 _argvec[3] = (unsigned long)(arg3); \
2652 __asm__ volatile(VALGRIND_ALIGN_STACK "ldr r0, [%1, #4] \n\t" \
2653 "ldr r1, [%1, #8] \n\t" \
2654 "ldr r2, [%1, #12] \n\t" \
2655 "ldr r4, [%1] \n\t" /* target->r4 */ \
2656 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0\n" \
2657 : /*out*/ "=r"(_res) \
2658 : /*in*/ "0"(&_argvec[0]) \
2659 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2660 lval = (__typeof__(lval))_res; \
2661 } while (0)
2662
2663#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
2664 do { \
2665 volatile OrigFn _orig = (orig); \
2666 volatile unsigned long _argvec[5]; \
2667 volatile unsigned long _res; \
2668 _argvec[0] = (unsigned long)_orig.nraddr; \
2669 _argvec[1] = (unsigned long)(arg1); \
2670 _argvec[2] = (unsigned long)(arg2); \
2671 _argvec[3] = (unsigned long)(arg3); \
2672 _argvec[4] = (unsigned long)(arg4); \
2673 __asm__ volatile(VALGRIND_ALIGN_STACK "ldr r0, [%1, #4] \n\t" \
2674 "ldr r1, [%1, #8] \n\t" \
2675 "ldr r2, [%1, #12] \n\t" \
2676 "ldr r3, [%1, #16] \n\t" \
2677 "ldr r4, [%1] \n\t" /* target->r4 */ \
2678 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2679 : /*out*/ "=r"(_res) \
2680 : /*in*/ "0"(&_argvec[0]) \
2681 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2682 lval = (__typeof__(lval))_res; \
2683 } while (0)
2684
2685#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
2686 do { \
2687 volatile OrigFn _orig = (orig); \
2688 volatile unsigned long _argvec[6]; \
2689 volatile unsigned long _res; \
2690 _argvec[0] = (unsigned long)_orig.nraddr; \
2691 _argvec[1] = (unsigned long)(arg1); \
2692 _argvec[2] = (unsigned long)(arg2); \
2693 _argvec[3] = (unsigned long)(arg3); \
2694 _argvec[4] = (unsigned long)(arg4); \
2695 _argvec[5] = (unsigned long)(arg5); \
2696 __asm__ volatile(VALGRIND_ALIGN_STACK "sub sp, sp, #4 \n\t" \
2697 "ldr r0, [%1, #20] \n\t" \
2698 "push {r0} \n\t" \
2699 "ldr r0, [%1, #4] \n\t" \
2700 "ldr r1, [%1, #8] \n\t" \
2701 "ldr r2, [%1, #12] \n\t" \
2702 "ldr r3, [%1, #16] \n\t" \
2703 "ldr r4, [%1] \n\t" /* target->r4 */ \
2704 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2705 : /*out*/ "=r"(_res) \
2706 : /*in*/ "0"(&_argvec[0]) \
2707 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2708 lval = (__typeof__(lval))_res; \
2709 } while (0)
2710
2711#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6) \
2712 do { \
2713 volatile OrigFn _orig = (orig); \
2714 volatile unsigned long _argvec[7]; \
2715 volatile unsigned long _res; \
2716 _argvec[0] = (unsigned long)_orig.nraddr; \
2717 _argvec[1] = (unsigned long)(arg1); \
2718 _argvec[2] = (unsigned long)(arg2); \
2719 _argvec[3] = (unsigned long)(arg3); \
2720 _argvec[4] = (unsigned long)(arg4); \
2721 _argvec[5] = (unsigned long)(arg5); \
2722 _argvec[6] = (unsigned long)(arg6); \
2723 __asm__ volatile(VALGRIND_ALIGN_STACK "ldr r0, [%1, #20] \n\t" \
2724 "ldr r1, [%1, #24] \n\t" \
2725 "push {r0, r1} \n\t" \
2726 "ldr r0, [%1, #4] \n\t" \
2727 "ldr r1, [%1, #8] \n\t" \
2728 "ldr r2, [%1, #12] \n\t" \
2729 "ldr r3, [%1, #16] \n\t" \
2730 "ldr r4, [%1] \n\t" /* target->r4 */ \
2731 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2732 : /*out*/ "=r"(_res) \
2733 : /*in*/ "0"(&_argvec[0]) \
2734 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2735 lval = (__typeof__(lval))_res; \
2736 } while (0)
2737
2738#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
2739 do { \
2740 volatile OrigFn _orig = (orig); \
2741 volatile unsigned long _argvec[8]; \
2742 volatile unsigned long _res; \
2743 _argvec[0] = (unsigned long)_orig.nraddr; \
2744 _argvec[1] = (unsigned long)(arg1); \
2745 _argvec[2] = (unsigned long)(arg2); \
2746 _argvec[3] = (unsigned long)(arg3); \
2747 _argvec[4] = (unsigned long)(arg4); \
2748 _argvec[5] = (unsigned long)(arg5); \
2749 _argvec[6] = (unsigned long)(arg6); \
2750 _argvec[7] = (unsigned long)(arg7); \
2751 __asm__ volatile(VALGRIND_ALIGN_STACK "sub sp, sp, #4 \n\t" \
2752 "ldr r0, [%1, #20] \n\t" \
2753 "ldr r1, [%1, #24] \n\t" \
2754 "ldr r2, [%1, #28] \n\t" \
2755 "push {r0, r1, r2} \n\t" \
2756 "ldr r0, [%1, #4] \n\t" \
2757 "ldr r1, [%1, #8] \n\t" \
2758 "ldr r2, [%1, #12] \n\t" \
2759 "ldr r3, [%1, #16] \n\t" \
2760 "ldr r4, [%1] \n\t" /* target->r4 */ \
2761 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2762 : /*out*/ "=r"(_res) \
2763 : /*in*/ "0"(&_argvec[0]) \
2764 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2765 lval = (__typeof__(lval))_res; \
2766 } while (0)
2767
2768#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \
2769 do { \
2770 volatile OrigFn _orig = (orig); \
2771 volatile unsigned long _argvec[9]; \
2772 volatile unsigned long _res; \
2773 _argvec[0] = (unsigned long)_orig.nraddr; \
2774 _argvec[1] = (unsigned long)(arg1); \
2775 _argvec[2] = (unsigned long)(arg2); \
2776 _argvec[3] = (unsigned long)(arg3); \
2777 _argvec[4] = (unsigned long)(arg4); \
2778 _argvec[5] = (unsigned long)(arg5); \
2779 _argvec[6] = (unsigned long)(arg6); \
2780 _argvec[7] = (unsigned long)(arg7); \
2781 _argvec[8] = (unsigned long)(arg8); \
2782 __asm__ volatile(VALGRIND_ALIGN_STACK "ldr r0, [%1, #20] \n\t" \
2783 "ldr r1, [%1, #24] \n\t" \
2784 "ldr r2, [%1, #28] \n\t" \
2785 "ldr r3, [%1, #32] \n\t" \
2786 "push {r0, r1, r2, r3} \n\t" \
2787 "ldr r0, [%1, #4] \n\t" \
2788 "ldr r1, [%1, #8] \n\t" \
2789 "ldr r2, [%1, #12] \n\t" \
2790 "ldr r3, [%1, #16] \n\t" \
2791 "ldr r4, [%1] \n\t" /* target->r4 */ \
2792 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2793 : /*out*/ "=r"(_res) \
2794 : /*in*/ "0"(&_argvec[0]) \
2795 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2796 lval = (__typeof__(lval))_res; \
2797 } while (0)
2798
2799#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \
2800 do { \
2801 volatile OrigFn _orig = (orig); \
2802 volatile unsigned long _argvec[10]; \
2803 volatile unsigned long _res; \
2804 _argvec[0] = (unsigned long)_orig.nraddr; \
2805 _argvec[1] = (unsigned long)(arg1); \
2806 _argvec[2] = (unsigned long)(arg2); \
2807 _argvec[3] = (unsigned long)(arg3); \
2808 _argvec[4] = (unsigned long)(arg4); \
2809 _argvec[5] = (unsigned long)(arg5); \
2810 _argvec[6] = (unsigned long)(arg6); \
2811 _argvec[7] = (unsigned long)(arg7); \
2812 _argvec[8] = (unsigned long)(arg8); \
2813 _argvec[9] = (unsigned long)(arg9); \
2814 __asm__ volatile(VALGRIND_ALIGN_STACK "sub sp, sp, #4 \n\t" \
2815 "ldr r0, [%1, #20] \n\t" \
2816 "ldr r1, [%1, #24] \n\t" \
2817 "ldr r2, [%1, #28] \n\t" \
2818 "ldr r3, [%1, #32] \n\t" \
2819 "ldr r4, [%1, #36] \n\t" \
2820 "push {r0, r1, r2, r3, r4} \n\t" \
2821 "ldr r0, [%1, #4] \n\t" \
2822 "ldr r1, [%1, #8] \n\t" \
2823 "ldr r2, [%1, #12] \n\t" \
2824 "ldr r3, [%1, #16] \n\t" \
2825 "ldr r4, [%1] \n\t" /* target->r4 */ \
2826 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2827 : /*out*/ "=r"(_res) \
2828 : /*in*/ "0"(&_argvec[0]) \
2829 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2830 lval = (__typeof__(lval))_res; \
2831 } while (0)
2832
2833#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) \
2834 do { \
2835 volatile OrigFn _orig = (orig); \
2836 volatile unsigned long _argvec[11]; \
2837 volatile unsigned long _res; \
2838 _argvec[0] = (unsigned long)_orig.nraddr; \
2839 _argvec[1] = (unsigned long)(arg1); \
2840 _argvec[2] = (unsigned long)(arg2); \
2841 _argvec[3] = (unsigned long)(arg3); \
2842 _argvec[4] = (unsigned long)(arg4); \
2843 _argvec[5] = (unsigned long)(arg5); \
2844 _argvec[6] = (unsigned long)(arg6); \
2845 _argvec[7] = (unsigned long)(arg7); \
2846 _argvec[8] = (unsigned long)(arg8); \
2847 _argvec[9] = (unsigned long)(arg9); \
2848 _argvec[10] = (unsigned long)(arg10); \
2849 __asm__ volatile(VALGRIND_ALIGN_STACK "ldr r0, [%1, #40] \n\t" \
2850 "push {r0} \n\t" \
2851 "ldr r0, [%1, #20] \n\t" \
2852 "ldr r1, [%1, #24] \n\t" \
2853 "ldr r2, [%1, #28] \n\t" \
2854 "ldr r3, [%1, #32] \n\t" \
2855 "ldr r4, [%1, #36] \n\t" \
2856 "push {r0, r1, r2, r3, r4} \n\t" \
2857 "ldr r0, [%1, #4] \n\t" \
2858 "ldr r1, [%1, #8] \n\t" \
2859 "ldr r2, [%1, #12] \n\t" \
2860 "ldr r3, [%1, #16] \n\t" \
2861 "ldr r4, [%1] \n\t" /* target->r4 */ \
2862 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2863 : /*out*/ "=r"(_res) \
2864 : /*in*/ "0"(&_argvec[0]) \
2865 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2866 lval = (__typeof__(lval))_res; \
2867 } while (0)
2868
2869#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) \
2870 do { \
2871 volatile OrigFn _orig = (orig); \
2872 volatile unsigned long _argvec[12]; \
2873 volatile unsigned long _res; \
2874 _argvec[0] = (unsigned long)_orig.nraddr; \
2875 _argvec[1] = (unsigned long)(arg1); \
2876 _argvec[2] = (unsigned long)(arg2); \
2877 _argvec[3] = (unsigned long)(arg3); \
2878 _argvec[4] = (unsigned long)(arg4); \
2879 _argvec[5] = (unsigned long)(arg5); \
2880 _argvec[6] = (unsigned long)(arg6); \
2881 _argvec[7] = (unsigned long)(arg7); \
2882 _argvec[8] = (unsigned long)(arg8); \
2883 _argvec[9] = (unsigned long)(arg9); \
2884 _argvec[10] = (unsigned long)(arg10); \
2885 _argvec[11] = (unsigned long)(arg11); \
2886 __asm__ volatile(VALGRIND_ALIGN_STACK "sub sp, sp, #4 \n\t" \
2887 "ldr r0, [%1, #40] \n\t" \
2888 "ldr r1, [%1, #44] \n\t" \
2889 "push {r0, r1} \n\t" \
2890 "ldr r0, [%1, #20] \n\t" \
2891 "ldr r1, [%1, #24] \n\t" \
2892 "ldr r2, [%1, #28] \n\t" \
2893 "ldr r3, [%1, #32] \n\t" \
2894 "ldr r4, [%1, #36] \n\t" \
2895 "push {r0, r1, r2, r3, r4} \n\t" \
2896 "ldr r0, [%1, #4] \n\t" \
2897 "ldr r1, [%1, #8] \n\t" \
2898 "ldr r2, [%1, #12] \n\t" \
2899 "ldr r3, [%1, #16] \n\t" \
2900 "ldr r4, [%1] \n\t" /* target->r4 */ \
2901 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2902 : /*out*/ "=r"(_res) \
2903 : /*in*/ "0"(&_argvec[0]) \
2904 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2905 lval = (__typeof__(lval))_res; \
2906 } while (0)
2907
2908#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12) \
2909 do { \
2910 volatile OrigFn _orig = (orig); \
2911 volatile unsigned long _argvec[13]; \
2912 volatile unsigned long _res; \
2913 _argvec[0] = (unsigned long)_orig.nraddr; \
2914 _argvec[1] = (unsigned long)(arg1); \
2915 _argvec[2] = (unsigned long)(arg2); \
2916 _argvec[3] = (unsigned long)(arg3); \
2917 _argvec[4] = (unsigned long)(arg4); \
2918 _argvec[5] = (unsigned long)(arg5); \
2919 _argvec[6] = (unsigned long)(arg6); \
2920 _argvec[7] = (unsigned long)(arg7); \
2921 _argvec[8] = (unsigned long)(arg8); \
2922 _argvec[9] = (unsigned long)(arg9); \
2923 _argvec[10] = (unsigned long)(arg10); \
2924 _argvec[11] = (unsigned long)(arg11); \
2925 _argvec[12] = (unsigned long)(arg12); \
2926 __asm__ volatile(VALGRIND_ALIGN_STACK "ldr r0, [%1, #40] \n\t" \
2927 "ldr r1, [%1, #44] \n\t" \
2928 "ldr r2, [%1, #48] \n\t" \
2929 "push {r0, r1, r2} \n\t" \
2930 "ldr r0, [%1, #20] \n\t" \
2931 "ldr r1, [%1, #24] \n\t" \
2932 "ldr r2, [%1, #28] \n\t" \
2933 "ldr r3, [%1, #32] \n\t" \
2934 "ldr r4, [%1, #36] \n\t" \
2935 "push {r0, r1, r2, r3, r4} \n\t" \
2936 "ldr r0, [%1, #4] \n\t" \
2937 "ldr r1, [%1, #8] \n\t" \
2938 "ldr r2, [%1, #12] \n\t" \
2939 "ldr r3, [%1, #16] \n\t" \
2940 "ldr r4, [%1] \n\t" /* target->r4 */ \
2941 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 VALGRIND_RESTORE_STACK "mov %0, r0" \
2942 : /*out*/ "=r"(_res) \
2943 : /*in*/ "0"(&_argvec[0]) \
2944 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10"); \
2945 lval = (__typeof__(lval))_res; \
2946 } while (0)
2947
2948#endif /* PLAT_arm_linux */
2949
2950/* ------------------------- s390x-linux ------------------------- */
2951
2952#if defined(PLAT_s390x_linux)
2953
2954/* Similar workaround as amd64 (see above), but we use r11 as frame
2955 pointer and save the old r11 in r7. r11 might be used for
2956 argvec, therefore we copy argvec in r1 since r1 is clobbered
2957 after the call anyway. */
2958#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
2959#define __FRAME_POINTER , "d"(__builtin_dwarf_cfa())
2960#define VALGRIND_CFI_PROLOGUE \
2961 ".cfi_remember_state\n\t" \
2962 "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
2963 "lgr 7,11\n\t" \
2964 "lgr 11,%2\n\t" \
2965 ".cfi_def_cfa r11, 0\n\t"
2966#define VALGRIND_CFI_EPILOGUE \
2967 "lgr 11, 7\n\t" \
2968 ".cfi_restore_state\n\t"
2969#else
2970#define __FRAME_POINTER
2971#define VALGRIND_CFI_PROLOGUE "lgr 1,%1\n\t"
2972#define VALGRIND_CFI_EPILOGUE
2973#endif
2974
2975/* Nb: On s390 the stack pointer is properly aligned *at all times*
2976 according to the s390 GCC maintainer. (The ABI specification is not
2977 precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and
2978 VALGRIND_RESTORE_STACK are not defined here. */
2979
2980/* These regs are trashed by the hidden call. Note that we overwrite
2981 r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
2982 function a proper return address. All others are ABI defined call
2983 clobbers. */
2984#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7"
2985
2986/* Nb: Although r11 is modified in the asm snippets below (inside
2987 VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for
2988 two reasons:
2989 (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not
2990 modified
2991 (2) GCC will complain that r11 cannot appear inside a clobber section,
2992 when compiled with -O -fno-omit-frame-pointer
2993 */
2994
2995#define CALL_FN_W_v(lval, orig) \
2996 do { \
2997 volatile OrigFn _orig = (orig); \
2998 volatile unsigned long _argvec[1]; \
2999 volatile unsigned long _res; \
3000 _argvec[0] = (unsigned long)_orig.nraddr; \
3001 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3002 "lg 1, 0(1)\n\t" /* target->r1 */ \
3003 VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3004 "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3005 : /*out*/ "=d"(_res) \
3006 : /*in*/ "d"(&_argvec[0])__FRAME_POINTER \
3007 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7"); \
3008 lval = (__typeof__(lval))_res; \
3009 } while (0)
3010
3011/* The call abi has the arguments in r2-r6 and stack */
3012#define CALL_FN_W_W(lval, orig, arg1) \
3013 do { \
3014 volatile OrigFn _orig = (orig); \
3015 volatile unsigned long _argvec[2]; \
3016 volatile unsigned long _res; \
3017 _argvec[0] = (unsigned long)_orig.nraddr; \
3018 _argvec[1] = (unsigned long)arg1; \
3019 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3020 "lg 2, 8(1)\n\t" \
3021 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3022 "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3023 : /*out*/ "=d"(_res) \
3024 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3025 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7"); \
3026 lval = (__typeof__(lval))_res; \
3027 } while (0)
3028
3029#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
3030 do { \
3031 volatile OrigFn _orig = (orig); \
3032 volatile unsigned long _argvec[3]; \
3033 volatile unsigned long _res; \
3034 _argvec[0] = (unsigned long)_orig.nraddr; \
3035 _argvec[1] = (unsigned long)arg1; \
3036 _argvec[2] = (unsigned long)arg2; \
3037 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3038 "lg 2, 8(1)\n\t" \
3039 "lg 3,16(1)\n\t" \
3040 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3041 "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3042 : /*out*/ "=d"(_res) \
3043 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3044 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7"); \
3045 lval = (__typeof__(lval))_res; \
3046 } while (0)
3047
3048#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
3049 do { \
3050 volatile OrigFn _orig = (orig); \
3051 volatile unsigned long _argvec[4]; \
3052 volatile unsigned long _res; \
3053 _argvec[0] = (unsigned long)_orig.nraddr; \
3054 _argvec[1] = (unsigned long)arg1; \
3055 _argvec[2] = (unsigned long)arg2; \
3056 _argvec[3] = (unsigned long)arg3; \
3057 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3058 "lg 2, 8(1)\n\t" \
3059 "lg 3,16(1)\n\t" \
3060 "lg 4,24(1)\n\t" \
3061 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3062 "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3063 : /*out*/ "=d"(_res) \
3064 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3065 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7"); \
3066 lval = (__typeof__(lval))_res; \
3067 } while (0)
3068
3069#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
3070 do { \
3071 volatile OrigFn _orig = (orig); \
3072 volatile unsigned long _argvec[5]; \
3073 volatile unsigned long _res; \
3074 _argvec[0] = (unsigned long)_orig.nraddr; \
3075 _argvec[1] = (unsigned long)arg1; \
3076 _argvec[2] = (unsigned long)arg2; \
3077 _argvec[3] = (unsigned long)arg3; \
3078 _argvec[4] = (unsigned long)arg4; \
3079 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3080 "lg 2, 8(1)\n\t" \
3081 "lg 3,16(1)\n\t" \
3082 "lg 4,24(1)\n\t" \
3083 "lg 5,32(1)\n\t" \
3084 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3085 "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3086 : /*out*/ "=d"(_res) \
3087 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3088 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "7"); \
3089 lval = (__typeof__(lval))_res; \
3090 } while (0)
3091
3092#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
3093 do { \
3094 volatile OrigFn _orig = (orig); \
3095 volatile unsigned long _argvec[6]; \
3096 volatile unsigned long _res; \
3097 _argvec[0] = (unsigned long)_orig.nraddr; \
3098 _argvec[1] = (unsigned long)arg1; \
3099 _argvec[2] = (unsigned long)arg2; \
3100 _argvec[3] = (unsigned long)arg3; \
3101 _argvec[4] = (unsigned long)arg4; \
3102 _argvec[5] = (unsigned long)arg5; \
3103 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-160\n\t" \
3104 "lg 2, 8(1)\n\t" \
3105 "lg 3,16(1)\n\t" \
3106 "lg 4,24(1)\n\t" \
3107 "lg 5,32(1)\n\t" \
3108 "lg 6,40(1)\n\t" \
3109 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3110 "aghi 15,160\n\t" VALGRIND_CFI_EPILOGUE \
3111 : /*out*/ "=d"(_res) \
3112 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3113 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7"); \
3114 lval = (__typeof__(lval))_res; \
3115 } while (0)
3116
3117#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6) \
3118 do { \
3119 volatile OrigFn _orig = (orig); \
3120 volatile unsigned long _argvec[7]; \
3121 volatile unsigned long _res; \
3122 _argvec[0] = (unsigned long)_orig.nraddr; \
3123 _argvec[1] = (unsigned long)arg1; \
3124 _argvec[2] = (unsigned long)arg2; \
3125 _argvec[3] = (unsigned long)arg3; \
3126 _argvec[4] = (unsigned long)arg4; \
3127 _argvec[5] = (unsigned long)arg5; \
3128 _argvec[6] = (unsigned long)arg6; \
3129 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-168\n\t" \
3130 "lg 2, 8(1)\n\t" \
3131 "lg 3,16(1)\n\t" \
3132 "lg 4,24(1)\n\t" \
3133 "lg 5,32(1)\n\t" \
3134 "lg 6,40(1)\n\t" \
3135 "mvc 160(8,15), 48(1)\n\t" \
3136 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3137 "aghi 15,168\n\t" VALGRIND_CFI_EPILOGUE \
3138 : /*out*/ "=d"(_res) \
3139 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3140 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7"); \
3141 lval = (__typeof__(lval))_res; \
3142 } while (0)
3143
3144#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
3145 do { \
3146 volatile OrigFn _orig = (orig); \
3147 volatile unsigned long _argvec[8]; \
3148 volatile unsigned long _res; \
3149 _argvec[0] = (unsigned long)_orig.nraddr; \
3150 _argvec[1] = (unsigned long)arg1; \
3151 _argvec[2] = (unsigned long)arg2; \
3152 _argvec[3] = (unsigned long)arg3; \
3153 _argvec[4] = (unsigned long)arg4; \
3154 _argvec[5] = (unsigned long)arg5; \
3155 _argvec[6] = (unsigned long)arg6; \
3156 _argvec[7] = (unsigned long)arg7; \
3157 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-176\n\t" \
3158 "lg 2, 8(1)\n\t" \
3159 "lg 3,16(1)\n\t" \
3160 "lg 4,24(1)\n\t" \
3161 "lg 5,32(1)\n\t" \
3162 "lg 6,40(1)\n\t" \
3163 "mvc 160(8,15), 48(1)\n\t" \
3164 "mvc 168(8,15), 56(1)\n\t" \
3165 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3166 "aghi 15,176\n\t" VALGRIND_CFI_EPILOGUE \
3167 : /*out*/ "=d"(_res) \
3168 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3169 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7"); \
3170 lval = (__typeof__(lval))_res; \
3171 } while (0)
3172
3173#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \
3174 do { \
3175 volatile OrigFn _orig = (orig); \
3176 volatile unsigned long _argvec[9]; \
3177 volatile unsigned long _res; \
3178 _argvec[0] = (unsigned long)_orig.nraddr; \
3179 _argvec[1] = (unsigned long)arg1; \
3180 _argvec[2] = (unsigned long)arg2; \
3181 _argvec[3] = (unsigned long)arg3; \
3182 _argvec[4] = (unsigned long)arg4; \
3183 _argvec[5] = (unsigned long)arg5; \
3184 _argvec[6] = (unsigned long)arg6; \
3185 _argvec[7] = (unsigned long)arg7; \
3186 _argvec[8] = (unsigned long)arg8; \
3187 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-184\n\t" \
3188 "lg 2, 8(1)\n\t" \
3189 "lg 3,16(1)\n\t" \
3190 "lg 4,24(1)\n\t" \
3191 "lg 5,32(1)\n\t" \
3192 "lg 6,40(1)\n\t" \
3193 "mvc 160(8,15), 48(1)\n\t" \
3194 "mvc 168(8,15), 56(1)\n\t" \
3195 "mvc 176(8,15), 64(1)\n\t" \
3196 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3197 "aghi 15,184\n\t" VALGRIND_CFI_EPILOGUE \
3198 : /*out*/ "=d"(_res) \
3199 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3200 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7"); \
3201 lval = (__typeof__(lval))_res; \
3202 } while (0)
3203
3204#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \
3205 do { \
3206 volatile OrigFn _orig = (orig); \
3207 volatile unsigned long _argvec[10]; \
3208 volatile unsigned long _res; \
3209 _argvec[0] = (unsigned long)_orig.nraddr; \
3210 _argvec[1] = (unsigned long)arg1; \
3211 _argvec[2] = (unsigned long)arg2; \
3212 _argvec[3] = (unsigned long)arg3; \
3213 _argvec[4] = (unsigned long)arg4; \
3214 _argvec[5] = (unsigned long)arg5; \
3215 _argvec[6] = (unsigned long)arg6; \
3216 _argvec[7] = (unsigned long)arg7; \
3217 _argvec[8] = (unsigned long)arg8; \
3218 _argvec[9] = (unsigned long)arg9; \
3219 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-192\n\t" \
3220 "lg 2, 8(1)\n\t" \
3221 "lg 3,16(1)\n\t" \
3222 "lg 4,24(1)\n\t" \
3223 "lg 5,32(1)\n\t" \
3224 "lg 6,40(1)\n\t" \
3225 "mvc 160(8,15), 48(1)\n\t" \
3226 "mvc 168(8,15), 56(1)\n\t" \
3227 "mvc 176(8,15), 64(1)\n\t" \
3228 "mvc 184(8,15), 72(1)\n\t" \
3229 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3230 "aghi 15,192\n\t" VALGRIND_CFI_EPILOGUE \
3231 : /*out*/ "=d"(_res) \
3232 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3233 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7"); \
3234 lval = (__typeof__(lval))_res; \
3235 } while (0)
3236
3237#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) \
3238 do { \
3239 volatile OrigFn _orig = (orig); \
3240 volatile unsigned long _argvec[11]; \
3241 volatile unsigned long _res; \
3242 _argvec[0] = (unsigned long)_orig.nraddr; \
3243 _argvec[1] = (unsigned long)arg1; \
3244 _argvec[2] = (unsigned long)arg2; \
3245 _argvec[3] = (unsigned long)arg3; \
3246 _argvec[4] = (unsigned long)arg4; \
3247 _argvec[5] = (unsigned long)arg5; \
3248 _argvec[6] = (unsigned long)arg6; \
3249 _argvec[7] = (unsigned long)arg7; \
3250 _argvec[8] = (unsigned long)arg8; \
3251 _argvec[9] = (unsigned long)arg9; \
3252 _argvec[10] = (unsigned long)arg10; \
3253 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-200\n\t" \
3254 "lg 2, 8(1)\n\t" \
3255 "lg 3,16(1)\n\t" \
3256 "lg 4,24(1)\n\t" \
3257 "lg 5,32(1)\n\t" \
3258 "lg 6,40(1)\n\t" \
3259 "mvc 160(8,15), 48(1)\n\t" \
3260 "mvc 168(8,15), 56(1)\n\t" \
3261 "mvc 176(8,15), 64(1)\n\t" \
3262 "mvc 184(8,15), 72(1)\n\t" \
3263 "mvc 192(8,15), 80(1)\n\t" \
3264 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3265 "aghi 15,200\n\t" VALGRIND_CFI_EPILOGUE \
3266 : /*out*/ "=d"(_res) \
3267 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3268 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7"); \
3269 lval = (__typeof__(lval))_res; \
3270 } while (0)
3271
3272#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) \
3273 do { \
3274 volatile OrigFn _orig = (orig); \
3275 volatile unsigned long _argvec[12]; \
3276 volatile unsigned long _res; \
3277 _argvec[0] = (unsigned long)_orig.nraddr; \
3278 _argvec[1] = (unsigned long)arg1; \
3279 _argvec[2] = (unsigned long)arg2; \
3280 _argvec[3] = (unsigned long)arg3; \
3281 _argvec[4] = (unsigned long)arg4; \
3282 _argvec[5] = (unsigned long)arg5; \
3283 _argvec[6] = (unsigned long)arg6; \
3284 _argvec[7] = (unsigned long)arg7; \
3285 _argvec[8] = (unsigned long)arg8; \
3286 _argvec[9] = (unsigned long)arg9; \
3287 _argvec[10] = (unsigned long)arg10; \
3288 _argvec[11] = (unsigned long)arg11; \
3289 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-208\n\t" \
3290 "lg 2, 8(1)\n\t" \
3291 "lg 3,16(1)\n\t" \
3292 "lg 4,24(1)\n\t" \
3293 "lg 5,32(1)\n\t" \
3294 "lg 6,40(1)\n\t" \
3295 "mvc 160(8,15), 48(1)\n\t" \
3296 "mvc 168(8,15), 56(1)\n\t" \
3297 "mvc 176(8,15), 64(1)\n\t" \
3298 "mvc 184(8,15), 72(1)\n\t" \
3299 "mvc 192(8,15), 80(1)\n\t" \
3300 "mvc 200(8,15), 88(1)\n\t" \
3301 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3302 "aghi 15,208\n\t" VALGRIND_CFI_EPILOGUE \
3303 : /*out*/ "=d"(_res) \
3304 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3305 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7"); \
3306 lval = (__typeof__(lval))_res; \
3307 } while (0)
3308
3309#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12) \
3310 do { \
3311 volatile OrigFn _orig = (orig); \
3312 volatile unsigned long _argvec[13]; \
3313 volatile unsigned long _res; \
3314 _argvec[0] = (unsigned long)_orig.nraddr; \
3315 _argvec[1] = (unsigned long)arg1; \
3316 _argvec[2] = (unsigned long)arg2; \
3317 _argvec[3] = (unsigned long)arg3; \
3318 _argvec[4] = (unsigned long)arg4; \
3319 _argvec[5] = (unsigned long)arg5; \
3320 _argvec[6] = (unsigned long)arg6; \
3321 _argvec[7] = (unsigned long)arg7; \
3322 _argvec[8] = (unsigned long)arg8; \
3323 _argvec[9] = (unsigned long)arg9; \
3324 _argvec[10] = (unsigned long)arg10; \
3325 _argvec[11] = (unsigned long)arg11; \
3326 _argvec[12] = (unsigned long)arg12; \
3327 __asm__ volatile(VALGRIND_CFI_PROLOGUE "aghi 15,-216\n\t" \
3328 "lg 2, 8(1)\n\t" \
3329 "lg 3,16(1)\n\t" \
3330 "lg 4,24(1)\n\t" \
3331 "lg 5,32(1)\n\t" \
3332 "lg 6,40(1)\n\t" \
3333 "mvc 160(8,15), 48(1)\n\t" \
3334 "mvc 168(8,15), 56(1)\n\t" \
3335 "mvc 176(8,15), 64(1)\n\t" \
3336 "mvc 184(8,15), 72(1)\n\t" \
3337 "mvc 192(8,15), 80(1)\n\t" \
3338 "mvc 200(8,15), 88(1)\n\t" \
3339 "mvc 208(8,15), 96(1)\n\t" \
3340 "lg 1, 0(1)\n\t" VALGRIND_CALL_NOREDIR_R1 "lgr %0, 2\n\t" \
3341 "aghi 15,216\n\t" VALGRIND_CFI_EPILOGUE \
3342 : /*out*/ "=d"(_res) \
3343 : /*in*/ "a"(&_argvec[0])__FRAME_POINTER \
3344 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "6", "7"); \
3345 lval = (__typeof__(lval))_res; \
3346 } while (0)
3347
3348
3349#endif /* PLAT_s390x_linux */
3350
3351/* ------------------------- mips-linux ------------------------- */
3352
3353#if defined(PLAT_mips32_linux)
3354
3355/* These regs are trashed by the hidden call. */
3356#define __CALLER_SAVED_REGS \
3357 "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "$31"
3358
3359/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
3360 long) == 4. */
3361
3362#define CALL_FN_W_v(lval, orig) \
3363 do { \
3364 volatile OrigFn _orig = (orig); \
3365 volatile unsigned long _argvec[1]; \
3366 volatile unsigned long _res; \
3367 _argvec[0] = (unsigned long)_orig.nraddr; \
3368 __asm__ volatile("subu $29, $29, 8 \n\t" \
3369 "sw $gp, 0($sp) \n\t" \
3370 "sw $ra, 4($sp) \n\t" \
3371 "subu $29, $29, 16 \n\t" \
3372 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3373 VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16\n\t" \
3374 "lw $gp, 0($sp) \n\t" \
3375 "lw $ra, 4($sp) \n\t" \
3376 "addu $29, $29, 8 \n\t" \
3377 "move %0, $v0\n" \
3378 : /*out*/ "=r"(_res) \
3379 : /*in*/ "0"(&_argvec[0]) \
3380 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3381 lval = (__typeof__(lval))_res; \
3382 } while (0)
3383
3384#define CALL_FN_W_W(lval, orig, arg1) \
3385 do { \
3386 volatile OrigFn _orig = (orig); \
3387 volatile unsigned long _argvec[2]; \
3388 volatile unsigned long _res; \
3389 _argvec[0] = (unsigned long)_orig.nraddr; \
3390 _argvec[1] = (unsigned long)(arg1); \
3391 __asm__ volatile("subu $29, $29, 8 \n\t" \
3392 "sw $gp, 0($sp) \n\t" \
3393 "sw $ra, 4($sp) \n\t" \
3394 "subu $29, $29, 16 \n\t" \
3395 "lw $a0, 4(%1) \n\t" /* arg1*/ \
3396 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3397 VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16 \n\t" \
3398 "lw $gp, 0($sp) \n\t" \
3399 "lw $ra, 4($sp) \n\t" \
3400 "addu $29, $29, 8 \n\t" \
3401 "move %0, $v0\n" \
3402 : /*out*/ "=r"(_res) \
3403 : /*in*/ "0"(&_argvec[0]) \
3404 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3405 lval = (__typeof__(lval))_res; \
3406 } while (0)
3407
3408#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
3409 do { \
3410 volatile OrigFn _orig = (orig); \
3411 volatile unsigned long _argvec[3]; \
3412 volatile unsigned long _res; \
3413 _argvec[0] = (unsigned long)_orig.nraddr; \
3414 _argvec[1] = (unsigned long)(arg1); \
3415 _argvec[2] = (unsigned long)(arg2); \
3416 __asm__ volatile("subu $29, $29, 8 \n\t" \
3417 "sw $gp, 0($sp) \n\t" \
3418 "sw $ra, 4($sp) \n\t" \
3419 "subu $29, $29, 16 \n\t" \
3420 "lw $a0, 4(%1) \n\t" \
3421 "lw $a1, 8(%1) \n\t" \
3422 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3423 VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16 \n\t" \
3424 "lw $gp, 0($sp) \n\t" \
3425 "lw $ra, 4($sp) \n\t" \
3426 "addu $29, $29, 8 \n\t" \
3427 "move %0, $v0\n" \
3428 : /*out*/ "=r"(_res) \
3429 : /*in*/ "0"(&_argvec[0]) \
3430 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3431 lval = (__typeof__(lval))_res; \
3432 } while (0)
3433
3434#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
3435 do { \
3436 volatile OrigFn _orig = (orig); \
3437 volatile unsigned long _argvec[4]; \
3438 volatile unsigned long _res; \
3439 _argvec[0] = (unsigned long)_orig.nraddr; \
3440 _argvec[1] = (unsigned long)(arg1); \
3441 _argvec[2] = (unsigned long)(arg2); \
3442 _argvec[3] = (unsigned long)(arg3); \
3443 __asm__ volatile("subu $29, $29, 8 \n\t" \
3444 "sw $gp, 0($sp) \n\t" \
3445 "sw $ra, 4($sp) \n\t" \
3446 "subu $29, $29, 16 \n\t" \
3447 "lw $a0, 4(%1) \n\t" \
3448 "lw $a1, 8(%1) \n\t" \
3449 "lw $a2, 12(%1) \n\t" \
3450 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3451 VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16 \n\t" \
3452 "lw $gp, 0($sp) \n\t" \
3453 "lw $ra, 4($sp) \n\t" \
3454 "addu $29, $29, 8 \n\t" \
3455 "move %0, $v0\n" \
3456 : /*out*/ "=r"(_res) \
3457 : /*in*/ "0"(&_argvec[0]) \
3458 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3459 lval = (__typeof__(lval))_res; \
3460 } while (0)
3461
3462#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
3463 do { \
3464 volatile OrigFn _orig = (orig); \
3465 volatile unsigned long _argvec[5]; \
3466 volatile unsigned long _res; \
3467 _argvec[0] = (unsigned long)_orig.nraddr; \
3468 _argvec[1] = (unsigned long)(arg1); \
3469 _argvec[2] = (unsigned long)(arg2); \
3470 _argvec[3] = (unsigned long)(arg3); \
3471 _argvec[4] = (unsigned long)(arg4); \
3472 __asm__ volatile("subu $29, $29, 8 \n\t" \
3473 "sw $gp, 0($sp) \n\t" \
3474 "sw $ra, 4($sp) \n\t" \
3475 "subu $29, $29, 16 \n\t" \
3476 "lw $a0, 4(%1) \n\t" \
3477 "lw $a1, 8(%1) \n\t" \
3478 "lw $a2, 12(%1) \n\t" \
3479 "lw $a3, 16(%1) \n\t" \
3480 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3481 VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 16 \n\t" \
3482 "lw $gp, 0($sp) \n\t" \
3483 "lw $ra, 4($sp) \n\t" \
3484 "addu $29, $29, 8 \n\t" \
3485 "move %0, $v0\n" \
3486 : /*out*/ "=r"(_res) \
3487 : /*in*/ "0"(&_argvec[0]) \
3488 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3489 lval = (__typeof__(lval))_res; \
3490 } while (0)
3491
3492#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
3493 do { \
3494 volatile OrigFn _orig = (orig); \
3495 volatile unsigned long _argvec[6]; \
3496 volatile unsigned long _res; \
3497 _argvec[0] = (unsigned long)_orig.nraddr; \
3498 _argvec[1] = (unsigned long)(arg1); \
3499 _argvec[2] = (unsigned long)(arg2); \
3500 _argvec[3] = (unsigned long)(arg3); \
3501 _argvec[4] = (unsigned long)(arg4); \
3502 _argvec[5] = (unsigned long)(arg5); \
3503 __asm__ volatile("subu $29, $29, 8 \n\t" \
3504 "sw $gp, 0($sp) \n\t" \
3505 "sw $ra, 4($sp) \n\t" \
3506 "lw $a0, 20(%1) \n\t" \
3507 "subu $sp, $sp, 24\n\t" \
3508 "sw $a0, 16($sp) \n\t" \
3509 "lw $a0, 4(%1) \n\t" \
3510 "lw $a1, 8(%1) \n\t" \
3511 "lw $a2, 12(%1) \n\t" \
3512 "lw $a3, 16(%1) \n\t" \
3513 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3514 VALGRIND_CALL_NOREDIR_T9 "addu $29, $29, 24 \n\t" \
3515 "lw $gp, 0($sp) \n\t" \
3516 "lw $ra, 4($sp) \n\t" \
3517 "addu $sp, $sp, 8 \n\t" \
3518 "move %0, $v0\n" \
3519 : /*out*/ "=r"(_res) \
3520 : /*in*/ "0"(&_argvec[0]) \
3521 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3522 lval = (__typeof__(lval))_res; \
3523 } while (0)
3524#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6) \
3525 do { \
3526 volatile OrigFn _orig = (orig); \
3527 volatile unsigned long _argvec[7]; \
3528 volatile unsigned long _res; \
3529 _argvec[0] = (unsigned long)_orig.nraddr; \
3530 _argvec[1] = (unsigned long)(arg1); \
3531 _argvec[2] = (unsigned long)(arg2); \
3532 _argvec[3] = (unsigned long)(arg3); \
3533 _argvec[4] = (unsigned long)(arg4); \
3534 _argvec[5] = (unsigned long)(arg5); \
3535 _argvec[6] = (unsigned long)(arg6); \
3536 __asm__ volatile("subu $29, $29, 8 \n\t" \
3537 "sw $gp, 0($sp) \n\t" \
3538 "sw $ra, 4($sp) \n\t" \
3539 "lw $a0, 20(%1) \n\t" \
3540 "subu $sp, $sp, 32\n\t" \
3541 "sw $a0, 16($sp) \n\t" \
3542 "lw $a0, 24(%1) \n\t" \
3543 "nop\n\t" \
3544 "sw $a0, 20($sp) \n\t" \
3545 "lw $a0, 4(%1) \n\t" \
3546 "lw $a1, 8(%1) \n\t" \
3547 "lw $a2, 12(%1) \n\t" \
3548 "lw $a3, 16(%1) \n\t" \
3549 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3550 VALGRIND_CALL_NOREDIR_T9 "addu $sp, $sp, 32 \n\t" \
3551 "lw $gp, 0($sp) \n\t" \
3552 "lw $ra, 4($sp) \n\t" \
3553 "addu $sp, $sp, 8 \n\t" \
3554 "move %0, $v0\n" \
3555 : /*out*/ "=r"(_res) \
3556 : /*in*/ "0"(&_argvec[0]) \
3557 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3558 lval = (__typeof__(lval))_res; \
3559 } while (0)
3560
3561#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
3562 do { \
3563 volatile OrigFn _orig = (orig); \
3564 volatile unsigned long _argvec[8]; \
3565 volatile unsigned long _res; \
3566 _argvec[0] = (unsigned long)_orig.nraddr; \
3567 _argvec[1] = (unsigned long)(arg1); \
3568 _argvec[2] = (unsigned long)(arg2); \
3569 _argvec[3] = (unsigned long)(arg3); \
3570 _argvec[4] = (unsigned long)(arg4); \
3571 _argvec[5] = (unsigned long)(arg5); \
3572 _argvec[6] = (unsigned long)(arg6); \
3573 _argvec[7] = (unsigned long)(arg7); \
3574 __asm__ volatile("subu $29, $29, 8 \n\t" \
3575 "sw $gp, 0($sp) \n\t" \
3576 "sw $ra, 4($sp) \n\t" \
3577 "lw $a0, 20(%1) \n\t" \
3578 "subu $sp, $sp, 32\n\t" \
3579 "sw $a0, 16($sp) \n\t" \
3580 "lw $a0, 24(%1) \n\t" \
3581 "sw $a0, 20($sp) \n\t" \
3582 "lw $a0, 28(%1) \n\t" \
3583 "sw $a0, 24($sp) \n\t" \
3584 "lw $a0, 4(%1) \n\t" \
3585 "lw $a1, 8(%1) \n\t" \
3586 "lw $a2, 12(%1) \n\t" \
3587 "lw $a3, 16(%1) \n\t" \
3588 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3589 VALGRIND_CALL_NOREDIR_T9 "addu $sp, $sp, 32 \n\t" \
3590 "lw $gp, 0($sp) \n\t" \
3591 "lw $ra, 4($sp) \n\t" \
3592 "addu $sp, $sp, 8 \n\t" \
3593 "move %0, $v0\n" \
3594 : /*out*/ "=r"(_res) \
3595 : /*in*/ "0"(&_argvec[0]) \
3596 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3597 lval = (__typeof__(lval))_res; \
3598 } while (0)
3599
3600#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \
3601 do { \
3602 volatile OrigFn _orig = (orig); \
3603 volatile unsigned long _argvec[9]; \
3604 volatile unsigned long _res; \
3605 _argvec[0] = (unsigned long)_orig.nraddr; \
3606 _argvec[1] = (unsigned long)(arg1); \
3607 _argvec[2] = (unsigned long)(arg2); \
3608 _argvec[3] = (unsigned long)(arg3); \
3609 _argvec[4] = (unsigned long)(arg4); \
3610 _argvec[5] = (unsigned long)(arg5); \
3611 _argvec[6] = (unsigned long)(arg6); \
3612 _argvec[7] = (unsigned long)(arg7); \
3613 _argvec[8] = (unsigned long)(arg8); \
3614 __asm__ volatile("subu $29, $29, 8 \n\t" \
3615 "sw $gp, 0($sp) \n\t" \
3616 "sw $ra, 4($sp) \n\t" \
3617 "lw $a0, 20(%1) \n\t" \
3618 "subu $sp, $sp, 40\n\t" \
3619 "sw $a0, 16($sp) \n\t" \
3620 "lw $a0, 24(%1) \n\t" \
3621 "sw $a0, 20($sp) \n\t" \
3622 "lw $a0, 28(%1) \n\t" \
3623 "sw $a0, 24($sp) \n\t" \
3624 "lw $a0, 32(%1) \n\t" \
3625 "sw $a0, 28($sp) \n\t" \
3626 "lw $a0, 4(%1) \n\t" \
3627 "lw $a1, 8(%1) \n\t" \
3628 "lw $a2, 12(%1) \n\t" \
3629 "lw $a3, 16(%1) \n\t" \
3630 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3631 VALGRIND_CALL_NOREDIR_T9 "addu $sp, $sp, 40 \n\t" \
3632 "lw $gp, 0($sp) \n\t" \
3633 "lw $ra, 4($sp) \n\t" \
3634 "addu $sp, $sp, 8 \n\t" \
3635 "move %0, $v0\n" \
3636 : /*out*/ "=r"(_res) \
3637 : /*in*/ "0"(&_argvec[0]) \
3638 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3639 lval = (__typeof__(lval))_res; \
3640 } while (0)
3641
3642#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) \
3643 do { \
3644 volatile OrigFn _orig = (orig); \
3645 volatile unsigned long _argvec[10]; \
3646 volatile unsigned long _res; \
3647 _argvec[0] = (unsigned long)_orig.nraddr; \
3648 _argvec[1] = (unsigned long)(arg1); \
3649 _argvec[2] = (unsigned long)(arg2); \
3650 _argvec[3] = (unsigned long)(arg3); \
3651 _argvec[4] = (unsigned long)(arg4); \
3652 _argvec[5] = (unsigned long)(arg5); \
3653 _argvec[6] = (unsigned long)(arg6); \
3654 _argvec[7] = (unsigned long)(arg7); \
3655 _argvec[8] = (unsigned long)(arg8); \
3656 _argvec[9] = (unsigned long)(arg9); \
3657 __asm__ volatile("subu $29, $29, 8 \n\t" \
3658 "sw $gp, 0($sp) \n\t" \
3659 "sw $ra, 4($sp) \n\t" \
3660 "lw $a0, 20(%1) \n\t" \
3661 "subu $sp, $sp, 40\n\t" \
3662 "sw $a0, 16($sp) \n\t" \
3663 "lw $a0, 24(%1) \n\t" \
3664 "sw $a0, 20($sp) \n\t" \
3665 "lw $a0, 28(%1) \n\t" \
3666 "sw $a0, 24($sp) \n\t" \
3667 "lw $a0, 32(%1) \n\t" \
3668 "sw $a0, 28($sp) \n\t" \
3669 "lw $a0, 36(%1) \n\t" \
3670 "sw $a0, 32($sp) \n\t" \
3671 "lw $a0, 4(%1) \n\t" \
3672 "lw $a1, 8(%1) \n\t" \
3673 "lw $a2, 12(%1) \n\t" \
3674 "lw $a3, 16(%1) \n\t" \
3675 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3676 VALGRIND_CALL_NOREDIR_T9 "addu $sp, $sp, 40 \n\t" \
3677 "lw $gp, 0($sp) \n\t" \
3678 "lw $ra, 4($sp) \n\t" \
3679 "addu $sp, $sp, 8 \n\t" \
3680 "move %0, $v0\n" \
3681 : /*out*/ "=r"(_res) \
3682 : /*in*/ "0"(&_argvec[0]) \
3683 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3684 lval = (__typeof__(lval))_res; \
3685 } while (0)
3686
3687#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) \
3688 do { \
3689 volatile OrigFn _orig = (orig); \
3690 volatile unsigned long _argvec[11]; \
3691 volatile unsigned long _res; \
3692 _argvec[0] = (unsigned long)_orig.nraddr; \
3693 _argvec[1] = (unsigned long)(arg1); \
3694 _argvec[2] = (unsigned long)(arg2); \
3695 _argvec[3] = (unsigned long)(arg3); \
3696 _argvec[4] = (unsigned long)(arg4); \
3697 _argvec[5] = (unsigned long)(arg5); \
3698 _argvec[6] = (unsigned long)(arg6); \
3699 _argvec[7] = (unsigned long)(arg7); \
3700 _argvec[8] = (unsigned long)(arg8); \
3701 _argvec[9] = (unsigned long)(arg9); \
3702 _argvec[10] = (unsigned long)(arg10); \
3703 __asm__ volatile("subu $29, $29, 8 \n\t" \
3704 "sw $gp, 0($sp) \n\t" \
3705 "sw $ra, 4($sp) \n\t" \
3706 "lw $a0, 20(%1) \n\t" \
3707 "subu $sp, $sp, 48\n\t" \
3708 "sw $a0, 16($sp) \n\t" \
3709 "lw $a0, 24(%1) \n\t" \
3710 "sw $a0, 20($sp) \n\t" \
3711 "lw $a0, 28(%1) \n\t" \
3712 "sw $a0, 24($sp) \n\t" \
3713 "lw $a0, 32(%1) \n\t" \
3714 "sw $a0, 28($sp) \n\t" \
3715 "lw $a0, 36(%1) \n\t" \
3716 "sw $a0, 32($sp) \n\t" \
3717 "lw $a0, 40(%1) \n\t" \
3718 "sw $a0, 36($sp) \n\t" \
3719 "lw $a0, 4(%1) \n\t" \
3720 "lw $a1, 8(%1) \n\t" \
3721 "lw $a2, 12(%1) \n\t" \
3722 "lw $a3, 16(%1) \n\t" \
3723 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3724 VALGRIND_CALL_NOREDIR_T9 "addu $sp, $sp, 48 \n\t" \
3725 "lw $gp, 0($sp) \n\t" \
3726 "lw $ra, 4($sp) \n\t" \
3727 "addu $sp, $sp, 8 \n\t" \
3728 "move %0, $v0\n" \
3729 : /*out*/ "=r"(_res) \
3730 : /*in*/ "0"(&_argvec[0]) \
3731 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3732 lval = (__typeof__(lval))_res; \
3733 } while (0)
3734
3735#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) \
3736 do { \
3737 volatile OrigFn _orig = (orig); \
3738 volatile unsigned long _argvec[12]; \
3739 volatile unsigned long _res; \
3740 _argvec[0] = (unsigned long)_orig.nraddr; \
3741 _argvec[1] = (unsigned long)(arg1); \
3742 _argvec[2] = (unsigned long)(arg2); \
3743 _argvec[3] = (unsigned long)(arg3); \
3744 _argvec[4] = (unsigned long)(arg4); \
3745 _argvec[5] = (unsigned long)(arg5); \
3746 _argvec[6] = (unsigned long)(arg6); \
3747 _argvec[7] = (unsigned long)(arg7); \
3748 _argvec[8] = (unsigned long)(arg8); \
3749 _argvec[9] = (unsigned long)(arg9); \
3750 _argvec[10] = (unsigned long)(arg10); \
3751 _argvec[11] = (unsigned long)(arg11); \
3752 __asm__ volatile("subu $29, $29, 8 \n\t" \
3753 "sw $gp, 0($sp) \n\t" \
3754 "sw $ra, 4($sp) \n\t" \
3755 "lw $a0, 20(%1) \n\t" \
3756 "subu $sp, $sp, 48\n\t" \
3757 "sw $a0, 16($sp) \n\t" \
3758 "lw $a0, 24(%1) \n\t" \
3759 "sw $a0, 20($sp) \n\t" \
3760 "lw $a0, 28(%1) \n\t" \
3761 "sw $a0, 24($sp) \n\t" \
3762 "lw $a0, 32(%1) \n\t" \
3763 "sw $a0, 28($sp) \n\t" \
3764 "lw $a0, 36(%1) \n\t" \
3765 "sw $a0, 32($sp) \n\t" \
3766 "lw $a0, 40(%1) \n\t" \
3767 "sw $a0, 36($sp) \n\t" \
3768 "lw $a0, 44(%1) \n\t" \
3769 "sw $a0, 40($sp) \n\t" \
3770 "lw $a0, 4(%1) \n\t" \
3771 "lw $a1, 8(%1) \n\t" \
3772 "lw $a2, 12(%1) \n\t" \
3773 "lw $a3, 16(%1) \n\t" \
3774 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3775 VALGRIND_CALL_NOREDIR_T9 "addu $sp, $sp, 48 \n\t" \
3776 "lw $gp, 0($sp) \n\t" \
3777 "lw $ra, 4($sp) \n\t" \
3778 "addu $sp, $sp, 8 \n\t" \
3779 "move %0, $v0\n" \
3780 : /*out*/ "=r"(_res) \
3781 : /*in*/ "0"(&_argvec[0]) \
3782 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3783 lval = (__typeof__(lval))_res; \
3784 } while (0)
3785
3786#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12) \
3787 do { \
3788 volatile OrigFn _orig = (orig); \
3789 volatile unsigned long _argvec[13]; \
3790 volatile unsigned long _res; \
3791 _argvec[0] = (unsigned long)_orig.nraddr; \
3792 _argvec[1] = (unsigned long)(arg1); \
3793 _argvec[2] = (unsigned long)(arg2); \
3794 _argvec[3] = (unsigned long)(arg3); \
3795 _argvec[4] = (unsigned long)(arg4); \
3796 _argvec[5] = (unsigned long)(arg5); \
3797 _argvec[6] = (unsigned long)(arg6); \
3798 _argvec[7] = (unsigned long)(arg7); \
3799 _argvec[8] = (unsigned long)(arg8); \
3800 _argvec[9] = (unsigned long)(arg9); \
3801 _argvec[10] = (unsigned long)(arg10); \
3802 _argvec[11] = (unsigned long)(arg11); \
3803 _argvec[12] = (unsigned long)(arg12); \
3804 __asm__ volatile("subu $29, $29, 8 \n\t" \
3805 "sw $gp, 0($sp) \n\t" \
3806 "sw $ra, 4($sp) \n\t" \
3807 "lw $a0, 20(%1) \n\t" \
3808 "subu $sp, $sp, 56\n\t" \
3809 "sw $a0, 16($sp) \n\t" \
3810 "lw $a0, 24(%1) \n\t" \
3811 "sw $a0, 20($sp) \n\t" \
3812 "lw $a0, 28(%1) \n\t" \
3813 "sw $a0, 24($sp) \n\t" \
3814 "lw $a0, 32(%1) \n\t" \
3815 "sw $a0, 28($sp) \n\t" \
3816 "lw $a0, 36(%1) \n\t" \
3817 "sw $a0, 32($sp) \n\t" \
3818 "lw $a0, 40(%1) \n\t" \
3819 "sw $a0, 36($sp) \n\t" \
3820 "lw $a0, 44(%1) \n\t" \
3821 "sw $a0, 40($sp) \n\t" \
3822 "lw $a0, 48(%1) \n\t" \
3823 "sw $a0, 44($sp) \n\t" \
3824 "lw $a0, 4(%1) \n\t" \
3825 "lw $a1, 8(%1) \n\t" \
3826 "lw $a2, 12(%1) \n\t" \
3827 "lw $a3, 16(%1) \n\t" \
3828 "lw $t9, 0(%1) \n\t" /* target->t9 */ \
3829 VALGRIND_CALL_NOREDIR_T9 "addu $sp, $sp, 56 \n\t" \
3830 "lw $gp, 0($sp) \n\t" \
3831 "lw $ra, 4($sp) \n\t" \
3832 "addu $sp, $sp, 8 \n\t" \
3833 "move %0, $v0\n" \
3834 : /*out*/ "=r"(_res) \
3835 : /*in*/ "0"(&_argvec[0]) \
3836 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS); \
3837 lval = (__typeof__(lval))_res; \
3838 } while (0)
3839
3840#endif /* PLAT_mips32_linux */
3841
3842
3843/* ------------------------------------------------------------------ */
3844/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
3845/* */
3846/* ------------------------------------------------------------------ */
3847
3848/* Some request codes. There are many more of these, but most are not
3849 exposed to end-user view. These are the public ones, all of the
3850 form 0x1000 + small_number.
3851
3852 Core ones are in the range 0x00000000--0x0000ffff. The non-public
3853 ones start at 0x2000.
3854*/
3855
3856/* These macros are used by tools -- they must be public, but don't
3857 embed them into other programs. */
3858#define VG_USERREQ_TOOL_BASE(a, b) ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
3859#define VG_IS_TOOL_USERREQ(a, b, v) (VG_USERREQ_TOOL_BASE(a, b) == ((v)&0xffff0000))
3860
3861/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
3862 This enum comprises an ABI exported by Valgrind to programs
3863 which use client requests. DO NOT CHANGE THE ORDER OF THESE
3864 ENTRIES, NOR DELETE ANY -- add new ones at the end. */
3865typedef enum {
3868
3869 /* These allow any function to be called from the simulated
3870 CPU but run on the real CPU. Nb: the first arg passed to
3871 the function is always the ThreadId of the running
3872 thread! So CLIENT_CALL0 actually requires a 1 arg
3873 function, etc. */
3878
3879 /* Can be useful in regression testing suites -- eg. can
3880 send Valgrind's output to /dev/null and still count
3881 errors. */
3883
3884 /* Allows a string (gdb monitor command) to be passed to the tool
3885 Used for interaction with vgdb/gdb */
3887
3888 /* These are useful and can be interpreted by any tool that
3889 tracks malloc() et al, by using vg_replace_malloc.c. */
3893 /* Memory pool support. */
3902
3903 /* Allow printfs to valgrind log. */
3904 /* The first two pass the va_list argument by value, which
3905 assumes it is the same size as or smaller than a UWord,
3906 which generally isn't the case. Hence are deprecated.
3907 The second two pass the vargs by reference and so are
3908 immune to this problem. */
3909 /* both :: char* fmt, va_list vargs (DEPRECATED) */
3912 /* both :: char* fmt, va_list* vargs */
3915
3916 /* Stack support. */
3920
3921 /* Wine support */
3923
3924 /* Querying of debug info. */
3926
3927 /* Disable/enable error reporting level. Takes a single
3928 Word arg which is the delta to this thread's error
3929 disablement indicator. Hence 1 disables or further
3930 disables errors, and -1 moves back towards enablement.
3931 Other values are not allowed. */
3933
3934 /* Initialise IR injection */
3937
3938#if !defined(__GNUC__)
3939#define __extension__ /* */
3940#endif
3941
3942
3943/* Returns the number of Valgrinds this code is running under. That
3944 is, 0 if running natively, 1 if running under Valgrind, 2 if
3945 running under Valgrind which is running under another Valgrind,
3946 etc. */
3947#define RUNNING_ON_VALGRIND \
3948 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, VG_USERREQ__RUNNING_ON_VALGRIND, 0, 0, 0, 0, 0)
3949
3950
3951/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
3952 _qzz_len - 1]. Useful if you are debugging a JITter or some such,
3953 since it provides a way to make sure valgrind will retranslate the
3954 invalidated area. Returns no value. */
3955#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr, _qzz_len) \
3956 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, _qzz_addr, _qzz_len, 0, 0, 0)
3957
3958
3959/* These requests are for getting Valgrind itself to print something.
3960 Possibly with a backtrace. This is a really ugly hack. The return value
3961 is the number of characters printed, excluding the "**<pid>** " part at the
3962 start and the backtrace (if present). */
3963
3964#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
3965/* Modern GCC will optimize the static routine out if unused,
3966 and unused attribute will shut down warnings about it. */
3967static int VALGRIND_PRINTF(const char *format, ...) __attribute__((format(__printf__, 1, 2), __unused__));
3968#endif
3969static int
3970#if defined(_MSC_VER)
3971 __inline
3972#endif
3973 VALGRIND_PRINTF(const char *format, ...)
3974{
3975#if defined(NVALGRIND)
3976 return 0;
3977#else /* NVALGRIND */
3978#if defined(_MSC_VER)
3979 uintptr_t _qzz_res;
3980#else
3981 unsigned long _qzz_res;
3982#endif
3983 va_list vargs;
3984 va_start(vargs, format);
3985#if defined(_MSC_VER)
3987 (uintptr_t)&vargs, 0, 0, 0);
3988#else
3989 _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__PRINTF_VALIST_BY_REF, (unsigned long)format,
3990 (unsigned long)&vargs, 0, 0, 0);
3991#endif
3992 va_end(vargs);
3993 return (int)_qzz_res;
3994#endif /* NVALGRIND */
3995}
3996
3997#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
3998static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) __attribute__((format(__printf__, 1, 2), __unused__));
3999#endif
4000static int
4001#if defined(_MSC_VER)
4002 __inline
4003#endif
4004 VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
4005{
4006#if defined(NVALGRIND)
4007 return 0;
4008#else /* NVALGRIND */
4009#if defined(_MSC_VER)
4010 uintptr_t _qzz_res;
4011#else
4012 unsigned long _qzz_res;
4013#endif
4014 va_list vargs;
4015 va_start(vargs, format);
4016#if defined(_MSC_VER)
4018 (uintptr_t)&vargs, 0, 0, 0);
4019#else
4021 (unsigned long)&vargs, 0, 0, 0);
4022#endif
4023 va_end(vargs);
4024 return (int)_qzz_res;
4025#endif /* NVALGRIND */
4026}
4027
4028
4029/* These requests allow control to move from the simulated CPU to the
4030 real CPU, calling an arbitrary function.
4031
4032 Note that the current ThreadId is inserted as the first argument.
4033 So this call:
4034
4035 VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
4036
4037 requires f to have this signature:
4038
4039 Word f(Word tid, Word arg1, Word arg2)
4040
4041 where "Word" is a word-sized type.
4042
4043 Note that these client requests are not entirely reliable. For example,
4044 if you call a function with them that subsequently calls printf(),
4045 there's a high chance Valgrind will crash. Generally, your prospects of
4046 these working are made higher if the called function does not refer to
4047 any global variables, and does not refer to any libc or other functions
4048 (printf et al). Any kind of entanglement with libc or dynamic linking is
4049 likely to have a bad outcome, for tricky reasons which we've grappled
4050 with a lot in the past.
4051*/
4052#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
4053 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, VG_USERREQ__CLIENT_CALL0, _qyy_fn, 0, 0, 0, 0)
4054
4055#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
4056 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, VG_USERREQ__CLIENT_CALL1, _qyy_fn, _qyy_arg1, 0, 0, 0)
4057
4058#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
4059 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, VG_USERREQ__CLIENT_CALL2, _qyy_fn, _qyy_arg1, _qyy_arg2, \
4060 0, 0)
4061
4062#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
4063 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, VG_USERREQ__CLIENT_CALL3, _qyy_fn, _qyy_arg1, _qyy_arg2, \
4064 _qyy_arg3, 0)
4065
4066
4067/* Counts the number of errors that have been recorded by a tool. Nb:
4068 the tool must record the errors with VG_(maybe_record_error)() or
4069 VG_(unique_error)() for them to be counted. */
4070#define VALGRIND_COUNT_ERRORS \
4071 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, VG_USERREQ__COUNT_ERRORS, 0, 0, 0, 0, 0)
4072
4073/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
4074 when heap blocks are allocated in order to give accurate results. This
4075 happens automatically for the standard allocator functions such as
4076 malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
4077 delete[], etc.
4078
4079 But if your program uses a custom allocator, this doesn't automatically
4080 happen, and Valgrind will not do as well. For example, if you allocate
4081 superblocks with mmap() and then allocates chunks of the superblocks, all
4082 Valgrind's observations will be at the mmap() level and it won't know that
4083 the chunks should be considered separate entities. In Memcheck's case,
4084 that means you probably won't get heap block overrun detection (because
4085 there won't be redzones marked as unaddressable) and you definitely won't
4086 get any leak detection.
4087
4088 The following client requests allow a custom allocator to be annotated so
4089 that it can be handled accurately by Valgrind.
4090
4091 VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
4092 by a malloc()-like function. For Memcheck (an illustrative case), this
4093 does two things:
4094
4095 - It records that the block has been allocated. This means any addresses
4096 within the block mentioned in error messages will be
4097 identified as belonging to the block. It also means that if the block
4098 isn't freed it will be detected by the leak checker.
4099
4100 - It marks the block as being addressable and undefined (if 'is_zeroed' is
4101 not set), or addressable and defined (if 'is_zeroed' is set). This
4102 controls how accesses to the block by the program are handled.
4103
4104 'addr' is the start of the usable block (ie. after any
4105 redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
4106 can apply redzones -- these are blocks of padding at the start and end of
4107 each block. Adding redzones is recommended as it makes it much more likely
4108 Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
4109 zeroed (or filled with another predictable value), as is the case for
4110 calloc().
4111
4112 VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
4113 heap block -- that will be used by the client program -- is allocated.
4114 It's best to put it at the outermost level of the allocator if possible;
4115 for example, if you have a function my_alloc() which calls
4116 internal_alloc(), and the client request is put inside internal_alloc(),
4117 stack traces relating to the heap block will contain entries for both
4118 my_alloc() and internal_alloc(), which is probably not what you want.
4119
4120 For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
4121 custom blocks from within a heap block, B, that has been allocated with
4122 malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
4123 -- the custom blocks will take precedence.
4124
4125 VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
4126 Memcheck, it does two things:
4127
4128 - It records that the block has been deallocated. This assumes that the
4129 block was annotated as having been allocated via
4130 VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
4131
4132 - It marks the block as being unaddressable.
4133
4134 VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
4135 heap block is deallocated.
4136
4137 VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
4138 Memcheck, it does four things:
4139
4140 - It records that the size of a block has been changed. This assumes that
4141 the block was annotated as having been allocated via
4142 VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
4143
4144 - If the block shrunk, it marks the freed memory as being unaddressable.
4145
4146 - If the block grew, it marks the new area as undefined and defines a red
4147 zone past the end of the new block.
4148
4149 - The V-bits of the overlap between the old and the new block are preserved.
4150
4151 VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
4152 and before deallocation of the old block.
4153
4154 In many cases, these three client requests will not be enough to get your
4155 allocator working well with Memcheck. More specifically, if your allocator
4156 writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
4157 will be necessary to mark the memory as addressable just before the zeroing
4158 occurs, otherwise you'll get a lot of invalid write errors. For example,
4159 you'll need to do this if your allocator recycles freed blocks, but it
4160 zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
4161 Alternatively, if your allocator reuses freed blocks for allocator-internal
4162 data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
4163
4164 Really, what's happening is a blurring of the lines between the client
4165 program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
4166 memory should be considered unaddressable to the client program, but the
4167 allocator knows more than the rest of the client program and so may be able
4168 to safely access it. Extra client requests are necessary for Valgrind to
4169 understand the distinction between the allocator and the rest of the
4170 program.
4171
4172 Ignored if addr == 0.
4173*/
4174#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
4175 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, addr, sizeB, rzB, is_zeroed, 0)
4176
4177/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
4178 Ignored if addr == 0.
4179*/
4180#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
4181 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, addr, oldSizeB, newSizeB, rzB, 0)
4182
4183/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
4184 Ignored if addr == 0.
4185*/
4186#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
4187 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, addr, rzB, 0, 0, 0)
4188
4189/* Create a memory pool. */
4190#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
4191 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, pool, rzB, is_zeroed, 0, 0)
4192
4193/* Destroy a memory pool. */
4194#define VALGRIND_DESTROY_MEMPOOL(pool) VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, pool, 0, 0, 0, 0)
4195
4196/* Associate a piece of memory with a memory pool. */
4197#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
4198 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, pool, addr, size, 0, 0)
4199
4200/* Disassociate a piece of memory from a memory pool. */
4201#define VALGRIND_MEMPOOL_FREE(pool, addr) VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, pool, addr, 0, 0, 0)
4202
4203/* Disassociate any pieces outside a particular range. */
4204#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
4205 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, pool, addr, size, 0, 0)
4206
4207/* Resize and/or move a piece associated with a memory pool. */
4208#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
4209 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, poolA, poolB, 0, 0, 0)
4210
4211/* Resize and/or move a piece associated with a memory pool. */
4212#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
4213 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, pool, addrA, addrB, size, 0)
4214
4215/* Return 1 if a mempool exists, else 0. */
4216#define VALGRIND_MEMPOOL_EXISTS(pool) \
4217 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__MEMPOOL_EXISTS, pool, 0, 0, 0, 0)
4218
4219/* Mark a piece of memory as being a stack. Returns a stack id. */
4220#define VALGRIND_STACK_REGISTER(start, end) \
4221 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__STACK_REGISTER, start, end, 0, 0, 0)
4222
4223/* Unmark the piece of memory associated with a stack id as being a
4224 stack. */
4225#define VALGRIND_STACK_DEREGISTER(id) VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, id, 0, 0, 0, 0)
4226
4227/* Change the start and end address of the stack id. */
4228#define VALGRIND_STACK_CHANGE(id, start, end) \
4229 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, id, start, end, 0, 0)
4230
4231/* Load PDB debug info for Wine PE image_map. */
4232#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
4233 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, fd, ptr, total_size, delta, 0)
4234
4235/* Map a code address to a source file name and line number. buf64
4236 must point to a 64-byte buffer in the caller's address space. The
4237 result will be dumped in there and is guaranteed to be zero
4238 terminated. If no info is found, the first byte is set to zero. */
4239#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
4240 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__MAP_IP_TO_SRCLOC, addr, buf64, 0, 0, 0)
4241
4242/* Disable error reporting for this thread. Behaves in a stack like
4243 way, so you can safely call this multiple times provided that
4244 VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times
4245 to re-enable reporting. The first call of this macro disables
4246 reporting. Subsequent calls have no effect except to increase the
4247 number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable
4248 reporting. Child threads do not inherit this setting from their
4249 parents -- they are always created with reporting enabled. */
4250#define VALGRIND_DISABLE_ERROR_REPORTING \
4251 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, 1, 0, 0, 0, 0)
4252
4253/* Re-enable error reporting, as per comments on
4254 VALGRIND_DISABLE_ERROR_REPORTING. */
4255#define VALGRIND_ENABLE_ERROR_REPORTING \
4256 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, -1, 0, 0, 0, 0)
4257
4258#undef PLAT_x86_darwin
4259#undef PLAT_amd64_darwin
4260#undef PLAT_x86_win32
4261#undef PLAT_x86_linux
4262#undef PLAT_amd64_linux
4263#undef PLAT_ppc32_linux
4264#undef PLAT_ppc64_linux
4265#undef PLAT_arm_linux
4266#undef PLAT_s390x_linux
4267#undef PLAT_mips32_linux
4268
4269#endif /* __VALGRIND_H */
static int VALGRIND_PRINTF_BACKTRACE(const char *format,...)
Definition: valgrind.h:4004
Vg_ClientRequest
Definition: valgrind.h:3865
@ VG_USERREQ__DESTROY_MEMPOOL
Definition: valgrind.h:3895
@ VG_USERREQ__MAP_IP_TO_SRCLOC
Definition: valgrind.h:3925
@ VG_USERREQ__LOAD_PDB_DEBUGINFO
Definition: valgrind.h:3922
@ VG_USERREQ__VEX_INIT_FOR_IRI
Definition: valgrind.h:3935
@ VG_USERREQ__PRINTF_BACKTRACE
Definition: valgrind.h:3911
@ VG_USERREQ__GDB_MONITOR_COMMAND
Definition: valgrind.h:3886
@ VG_USERREQ__MEMPOOL_ALLOC
Definition: valgrind.h:3896
@ VG_USERREQ__RESIZEINPLACE_BLOCK
Definition: valgrind.h:3891
@ VG_USERREQ__MALLOCLIKE_BLOCK
Definition: valgrind.h:3890
@ VG_USERREQ__COUNT_ERRORS
Definition: valgrind.h:3882
@ VG_USERREQ__STACK_REGISTER
Definition: valgrind.h:3917
@ VG_USERREQ__MEMPOOL_CHANGE
Definition: valgrind.h:3900
@ VG_USERREQ__PRINTF_VALIST_BY_REF
Definition: valgrind.h:3913
@ VG_USERREQ__RUNNING_ON_VALGRIND
Definition: valgrind.h:3866
@ VG_USERREQ__CLIENT_CALL0
Definition: valgrind.h:3874
@ VG_USERREQ__DISCARD_TRANSLATIONS
Definition: valgrind.h:3867
@ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF
Definition: valgrind.h:3914
@ VG_USERREQ__FREELIKE_BLOCK
Definition: valgrind.h:3892
@ VG_USERREQ__CREATE_MEMPOOL
Definition: valgrind.h:3894
@ VG_USERREQ__MOVE_MEMPOOL
Definition: valgrind.h:3899
@ VG_USERREQ__CLIENT_CALL3
Definition: valgrind.h:3877
@ VG_USERREQ__MEMPOOL_TRIM
Definition: valgrind.h:3898
@ VG_USERREQ__CLIENT_CALL2
Definition: valgrind.h:3876
@ VG_USERREQ__PRINTF
Definition: valgrind.h:3910
@ VG_USERREQ__CHANGE_ERR_DISABLEMENT
Definition: valgrind.h:3932
@ VG_USERREQ__STACK_CHANGE
Definition: valgrind.h:3919
@ VG_USERREQ__STACK_DEREGISTER
Definition: valgrind.h:3918
@ VG_USERREQ__MEMPOOL_FREE
Definition: valgrind.h:3897
@ VG_USERREQ__MEMPOOL_EXISTS
Definition: valgrind.h:3901
@ VG_USERREQ__CLIENT_CALL1
Definition: valgrind.h:3875
static int VALGRIND_PRINTF(const char *format,...)
Definition: valgrind.h:3973
#define VALGRIND_DO_CLIENT_REQUEST_EXPR(_zzq_default, _zzq_request, _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)
Definition: valgrind.h:188