gdb vendor branch: Bring in additional source files
[dragonfly.git] / contrib / gdb-7 / gdb / dwarf2expr.c
CommitLineData
5796c8dc
SS
1/* DWARF 2 Expression Evaluator.
2
a45ae5f8
JM
3 Copyright (C) 2001-2003, 2005, 2007-2012 Free Software Foundation,
4 Inc.
5796c8dc
SS
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23#include "defs.h"
24#include "symtab.h"
25#include "gdbtypes.h"
26#include "value.h"
27#include "gdbcore.h"
28#include "dwarf2.h"
29#include "dwarf2expr.h"
30#include "gdb_assert.h"
31
32/* Local prototypes. */
33
34static void execute_stack_op (struct dwarf_expr_context *,
cf7f2e2d 35 const gdb_byte *, const gdb_byte *);
5796c8dc 36
a45ae5f8
JM
37/* Cookie for gdbarch data. */
38
39static struct gdbarch_data *dwarf_arch_cookie;
40
41/* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
43
44struct dwarf_gdbarch_types
45{
46 struct type *dw_types[3];
47};
48
49/* Allocate and fill in dwarf_gdbarch_types for an arch. */
50
51static void *
52dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
53{
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
56
57 /* The types themselves are lazily initialized. */
58
59 return types;
60}
61
62/* Return the type used for DWARF operations where the type is
63 unspecified in the DWARF spec. Only certain sizes are
64 supported. */
65
66static struct type *
67dwarf_expr_address_type (struct dwarf_expr_context *ctx)
68{
69 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90}
91
5796c8dc
SS
92/* Create a new context for the expression evaluator. */
93
94struct dwarf_expr_context *
95new_dwarf_expr_context (void)
96{
97 struct dwarf_expr_context *retval;
cf7f2e2d 98
5796c8dc
SS
99 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
cf7f2e2d
JM
102 retval->stack = xmalloc (retval->stack_allocated
103 * sizeof (struct dwarf_stack_value));
5796c8dc
SS
104 retval->num_pieces = 0;
105 retval->pieces = 0;
106 retval->max_recursion_depth = 0x100;
107 return retval;
108}
109
110/* Release the memory allocated to CTX. */
111
112void
113free_dwarf_expr_context (struct dwarf_expr_context *ctx)
114{
115 xfree (ctx->stack);
116 xfree (ctx->pieces);
117 xfree (ctx);
118}
119
120/* Helper for make_cleanup_free_dwarf_expr_context. */
121
122static void
123free_dwarf_expr_context_cleanup (void *arg)
124{
125 free_dwarf_expr_context (arg);
126}
127
128/* Return a cleanup that calls free_dwarf_expr_context. */
129
130struct cleanup *
131make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
132{
133 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
134}
135
136/* Expand the memory allocated to CTX's stack to contain at least
137 NEED more elements than are currently used. */
138
139static void
140dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
141{
142 if (ctx->stack_len + need > ctx->stack_allocated)
143 {
144 size_t newlen = ctx->stack_len + need + 10;
cf7f2e2d 145
5796c8dc
SS
146 ctx->stack = xrealloc (ctx->stack,
147 newlen * sizeof (struct dwarf_stack_value));
148 ctx->stack_allocated = newlen;
149 }
150}
151
152/* Push VALUE onto CTX's stack. */
153
a45ae5f8
JM
154static void
155dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
5796c8dc
SS
156 int in_stack_memory)
157{
158 struct dwarf_stack_value *v;
159
160 dwarf_expr_grow_stack (ctx, 1);
161 v = &ctx->stack[ctx->stack_len++];
162 v->value = value;
163 v->in_stack_memory = in_stack_memory;
164}
165
a45ae5f8 166/* Push VALUE onto CTX's stack. */
5796c8dc
SS
167
168void
a45ae5f8
JM
169dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
170 int in_stack_memory)
171{
172 dwarf_expr_push (ctx,
173 value_from_ulongest (dwarf_expr_address_type (ctx), value),
174 in_stack_memory);
175}
176
177/* Pop the top item off of CTX's stack. */
178
179static void
5796c8dc
SS
180dwarf_expr_pop (struct dwarf_expr_context *ctx)
181{
182 if (ctx->stack_len <= 0)
183 error (_("dwarf expression stack underflow"));
184 ctx->stack_len--;
185}
186
187/* Retrieve the N'th item on CTX's stack. */
188
a45ae5f8 189struct value *
5796c8dc
SS
190dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
191{
192 if (ctx->stack_len <= n)
c50c785c
JM
193 error (_("Asked for position %d of stack, "
194 "stack only has %d elements on it."),
5796c8dc
SS
195 n, ctx->stack_len);
196 return ctx->stack[ctx->stack_len - (1 + n)].value;
a45ae5f8 197}
5796c8dc 198
a45ae5f8
JM
199/* Require that TYPE be an integral type; throw an exception if not. */
200
201static void
202dwarf_require_integral (struct type *type)
203{
204 if (TYPE_CODE (type) != TYPE_CODE_INT
205 && TYPE_CODE (type) != TYPE_CODE_CHAR
206 && TYPE_CODE (type) != TYPE_CODE_BOOL)
207 error (_("integral type expected in DWARF expression"));
208}
209
210/* Return the unsigned form of TYPE. TYPE is necessarily an integral
211 type. */
212
213static struct type *
214get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
215{
216 switch (TYPE_LENGTH (type))
217 {
218 case 1:
219 return builtin_type (gdbarch)->builtin_uint8;
220 case 2:
221 return builtin_type (gdbarch)->builtin_uint16;
222 case 4:
223 return builtin_type (gdbarch)->builtin_uint32;
224 case 8:
225 return builtin_type (gdbarch)->builtin_uint64;
226 default:
227 error (_("no unsigned variant found for type, while evaluating "
228 "DWARF expression"));
229 }
230}
231
232/* Return the signed form of TYPE. TYPE is necessarily an integral
233 type. */
234
235static struct type *
236get_signed_type (struct gdbarch *gdbarch, struct type *type)
237{
238 switch (TYPE_LENGTH (type))
239 {
240 case 1:
241 return builtin_type (gdbarch)->builtin_int8;
242 case 2:
243 return builtin_type (gdbarch)->builtin_int16;
244 case 4:
245 return builtin_type (gdbarch)->builtin_int32;
246 case 8:
247 return builtin_type (gdbarch)->builtin_int64;
248 default:
249 error (_("no signed variant found for type, while evaluating "
250 "DWARF expression"));
251 }
5796c8dc
SS
252}
253
cf7f2e2d
JM
254/* Retrieve the N'th item on CTX's stack, converted to an address. */
255
256CORE_ADDR
257dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
258{
a45ae5f8
JM
259 struct value *result_val = dwarf_expr_fetch (ctx, n);
260 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
261 ULONGEST result;
262
263 dwarf_require_integral (value_type (result_val));
264 result = extract_unsigned_integer (value_contents (result_val),
265 TYPE_LENGTH (value_type (result_val)),
266 byte_order);
cf7f2e2d
JM
267
268 /* For most architectures, calling extract_unsigned_integer() alone
269 is sufficient for extracting an address. However, some
270 architectures (e.g. MIPS) use signed addresses and using
271 extract_unsigned_integer() will not produce a correct
272 result. Make sure we invoke gdbarch_integer_to_address()
273 for those architectures which require it. */
274 if (gdbarch_integer_to_address_p (ctx->gdbarch))
275 {
cf7f2e2d 276 gdb_byte *buf = alloca (ctx->addr_size);
a45ae5f8
JM
277 struct type *int_type = get_unsigned_type (ctx->gdbarch,
278 value_type (result_val));
cf7f2e2d
JM
279
280 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
281 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
282 }
283
284 return (CORE_ADDR) result;
285}
286
5796c8dc
SS
287/* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
288
289int
290dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
291{
292 if (ctx->stack_len <= n)
c50c785c
JM
293 error (_("Asked for position %d of stack, "
294 "stack only has %d elements on it."),
5796c8dc
SS
295 n, ctx->stack_len);
296 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
5796c8dc
SS
297}
298
cf7f2e2d
JM
299/* Return true if the expression stack is empty. */
300
301static int
302dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
303{
304 return ctx->stack_len == 0;
305}
306
5796c8dc
SS
307/* Add a new piece to CTX's piece list. */
308static void
cf7f2e2d 309add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
5796c8dc
SS
310{
311 struct dwarf_expr_piece *p;
312
313 ctx->num_pieces++;
314
cf7f2e2d
JM
315 ctx->pieces = xrealloc (ctx->pieces,
316 (ctx->num_pieces
317 * sizeof (struct dwarf_expr_piece)));
5796c8dc
SS
318
319 p = &ctx->pieces[ctx->num_pieces - 1];
320 p->location = ctx->location;
321 p->size = size;
cf7f2e2d
JM
322 p->offset = offset;
323
5796c8dc
SS
324 if (p->location == DWARF_VALUE_LITERAL)
325 {
326 p->v.literal.data = ctx->data;
327 p->v.literal.length = ctx->len;
328 }
cf7f2e2d
JM
329 else if (dwarf_expr_stack_empty_p (ctx))
330 {
331 p->location = DWARF_VALUE_OPTIMIZED_OUT;
332 /* Also reset the context's location, for our callers. This is
333 a somewhat strange approach, but this lets us avoid setting
334 the location to DWARF_VALUE_MEMORY in all the individual
335 cases in the evaluator. */
336 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
337 }
338 else if (p->location == DWARF_VALUE_MEMORY)
339 {
340 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
341 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
342 }
c50c785c
JM
343 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
344 {
345 p->v.ptr.die = ctx->len;
a45ae5f8 346 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
c50c785c 347 }
a45ae5f8
JM
348 else if (p->location == DWARF_VALUE_REGISTER)
349 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
5796c8dc
SS
350 else
351 {
cf7f2e2d 352 p->v.value = dwarf_expr_fetch (ctx, 0);
5796c8dc
SS
353 }
354}
355
356/* Evaluate the expression at ADDR (LEN bytes long) using the context
357 CTX. */
358
359void
cf7f2e2d
JM
360dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
361 size_t len)
5796c8dc
SS
362{
363 int old_recursion_depth = ctx->recursion_depth;
364
365 execute_stack_op (ctx, addr, addr + len);
366
367 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
368
369 gdb_assert (ctx->recursion_depth == old_recursion_depth);
370}
371
372/* Decode the unsigned LEB128 constant at BUF into the variable pointed to
373 by R, and return the new value of BUF. Verify that it doesn't extend
a45ae5f8 374 past BUF_END. R can be NULL, the constant is then only skipped. */
5796c8dc 375
cf7f2e2d
JM
376const gdb_byte *
377read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end, ULONGEST * r)
5796c8dc
SS
378{
379 unsigned shift = 0;
380 ULONGEST result = 0;
381 gdb_byte byte;
382
383 while (1)
384 {
385 if (buf >= buf_end)
386 error (_("read_uleb128: Corrupted DWARF expression."));
387
388 byte = *buf++;
c50c785c 389 result |= ((ULONGEST) (byte & 0x7f)) << shift;
5796c8dc
SS
390 if ((byte & 0x80) == 0)
391 break;
392 shift += 7;
393 }
a45ae5f8
JM
394 if (r)
395 *r = result;
5796c8dc
SS
396 return buf;
397}
398
399/* Decode the signed LEB128 constant at BUF into the variable pointed to
400 by R, and return the new value of BUF. Verify that it doesn't extend
a45ae5f8 401 past BUF_END. R can be NULL, the constant is then only skipped. */
5796c8dc 402
cf7f2e2d
JM
403const gdb_byte *
404read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end, LONGEST * r)
5796c8dc
SS
405{
406 unsigned shift = 0;
407 LONGEST result = 0;
408 gdb_byte byte;
409
410 while (1)
411 {
412 if (buf >= buf_end)
413 error (_("read_sleb128: Corrupted DWARF expression."));
414
415 byte = *buf++;
c50c785c 416 result |= ((ULONGEST) (byte & 0x7f)) << shift;
5796c8dc
SS
417 shift += 7;
418 if ((byte & 0x80) == 0)
419 break;
420 }
421 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
a45ae5f8 422 result |= -(((LONGEST) 1) << shift);
5796c8dc 423
a45ae5f8
JM
424 if (r)
425 *r = result;
5796c8dc
SS
426 return buf;
427}
5796c8dc
SS
428\f
429
430/* Check that the current operator is either at the end of an
431 expression, or that it is followed by a composition operator. */
432
cf7f2e2d
JM
433void
434dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
435 const char *op_name)
5796c8dc
SS
436{
437 /* It seems like DW_OP_GNU_uninit should be handled here. However,
438 it doesn't seem to make sense for DW_OP_*_value, and it was not
439 checked at the other place that this function is called. */
440 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
441 error (_("DWARF-2 expression error: `%s' operations must be "
a45ae5f8 442 "used either alone or in conjunction with DW_OP_piece "
5796c8dc
SS
443 "or DW_OP_bit_piece."),
444 op_name);
445}
446
a45ae5f8
JM
447/* Return true iff the types T1 and T2 are "the same". This only does
448 checks that might reasonably be needed to compare DWARF base
449 types. */
450
451static int
452base_types_equal_p (struct type *t1, struct type *t2)
453{
454 if (TYPE_CODE (t1) != TYPE_CODE (t2))
455 return 0;
456 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
457 return 0;
458 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
459}
460
461/* A convenience function to call get_base_type on CTX and return the
462 result. DIE is the DIE whose type we need. SIZE is non-zero if
463 this function should verify that the resulting type has the correct
464 size. */
465
466static struct type *
467dwarf_get_base_type (struct dwarf_expr_context *ctx, ULONGEST die, int size)
468{
469 struct type *result;
470
471 if (ctx->funcs->get_base_type)
472 {
473 result = ctx->funcs->get_base_type (ctx, die);
474 if (result == NULL)
475 error (_("Could not find type for DW_OP_GNU_const_type"));
476 if (size != 0 && TYPE_LENGTH (result) != size)
477 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
478 }
479 else
480 /* Anything will do. */
481 result = builtin_type (ctx->gdbarch)->builtin_int;
482
483 return result;
484}
485
486/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
487 DWARF register number. Otherwise return -1. */
488
489int
490dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
491{
492 ULONGEST dwarf_reg;
493
494 if (buf_end <= buf)
495 return -1;
496 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
497 {
498 if (buf_end - buf != 1)
499 return -1;
500 return *buf - DW_OP_reg0;
501 }
502
503 if (*buf == DW_OP_GNU_regval_type)
504 {
505 buf++;
506 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
507 buf = read_uleb128 (buf, buf_end, NULL);
508 }
509 else if (*buf == DW_OP_regx)
510 {
511 buf++;
512 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
513 }
514 else
515 return -1;
516 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
517 return -1;
518 return dwarf_reg;
519}
520
521/* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
522 DW_OP_deref* return the DWARF register number. Otherwise return -1.
523 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
524 size from DW_OP_deref_size. */
525
526int
527dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
528 CORE_ADDR *deref_size_return)
529{
530 ULONGEST dwarf_reg;
531 LONGEST offset;
532
533 if (buf_end <= buf)
534 return -1;
535 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
536 {
537 dwarf_reg = *buf - DW_OP_breg0;
538 buf++;
539 }
540 else if (*buf == DW_OP_bregx)
541 {
542 buf++;
543 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
544 if ((int) dwarf_reg != dwarf_reg)
545 return -1;
546 }
547 else
548 return -1;
549
550 buf = read_sleb128 (buf, buf_end, &offset);
551 if (offset != 0)
552 return -1;
553
554 if (buf >= buf_end)
555 return -1;
556
557 if (*buf == DW_OP_deref)
558 {
559 buf++;
560 *deref_size_return = -1;
561 }
562 else if (*buf == DW_OP_deref_size)
563 {
564 buf++;
565 if (buf >= buf_end)
566 return -1;
567 *deref_size_return = *buf++;
568 }
569 else
570 return -1;
571
572 if (buf != buf_end)
573 return -1;
574
575 return dwarf_reg;
576}
577
578/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
579 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
580
581int
582dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
583 CORE_ADDR *fb_offset_return)
584{
585 LONGEST fb_offset;
586
587 if (buf_end <= buf)
588 return 0;
589
590 if (*buf != DW_OP_fbreg)
591 return 0;
592 buf++;
593
594 buf = read_sleb128 (buf, buf_end, &fb_offset);
595 *fb_offset_return = fb_offset;
596 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
597 return 0;
598
599 return 1;
600}
601
602/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
603 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
604 The matched SP register number depends on GDBARCH. */
605
606int
607dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
608 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
609{
610 ULONGEST dwarf_reg;
611 LONGEST sp_offset;
612
613 if (buf_end <= buf)
614 return 0;
615 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
616 {
617 dwarf_reg = *buf - DW_OP_breg0;
618 buf++;
619 }
620 else
621 {
622 if (*buf != DW_OP_bregx)
623 return 0;
624 buf++;
625 buf = read_uleb128 (buf, buf_end, &dwarf_reg);
626 }
627
628 if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg)
629 != gdbarch_sp_regnum (gdbarch))
630 return 0;
631
632 buf = read_sleb128 (buf, buf_end, &sp_offset);
633 *sp_offset_return = sp_offset;
634 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
635 return 0;
636
637 return 1;
638}
639
5796c8dc
SS
640/* The engine for the expression evaluator. Using the context in CTX,
641 evaluate the expression between OP_PTR and OP_END. */
642
643static void
644execute_stack_op (struct dwarf_expr_context *ctx,
cf7f2e2d 645 const gdb_byte *op_ptr, const gdb_byte *op_end)
5796c8dc
SS
646{
647 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
a45ae5f8
JM
648 /* Old-style "untyped" DWARF values need special treatment in a
649 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
650 a special type for these values so we can distinguish them from
651 values that have an explicit type, because explicitly-typed
652 values do not need special treatment. This special type must be
653 different (in the `==' sense) from any base type coming from the
654 CU. */
655 struct type *address_type = dwarf_expr_address_type (ctx);
cf7f2e2d 656
5796c8dc
SS
657 ctx->location = DWARF_VALUE_MEMORY;
658 ctx->initialized = 1; /* Default is initialized. */
659
660 if (ctx->recursion_depth > ctx->max_recursion_depth)
661 error (_("DWARF-2 expression error: Loop detected (%d)."),
662 ctx->recursion_depth);
663 ctx->recursion_depth++;
664
665 while (op_ptr < op_end)
666 {
667 enum dwarf_location_atom op = *op_ptr++;
cf7f2e2d 668 ULONGEST result;
5796c8dc
SS
669 /* Assume the value is not in stack memory.
670 Code that knows otherwise sets this to 1.
671 Some arithmetic on stack addresses can probably be assumed to still
672 be a stack address, but we skip this complication for now.
673 This is just an optimization, so it's always ok to punt
674 and leave this as 0. */
675 int in_stack_memory = 0;
676 ULONGEST uoffset, reg;
677 LONGEST offset;
a45ae5f8
JM
678 struct value *result_val = NULL;
679
680 /* The DWARF expression might have a bug causing an infinite
681 loop. In that case, quitting is the only way out. */
682 QUIT;
5796c8dc
SS
683
684 switch (op)
685 {
686 case DW_OP_lit0:
687 case DW_OP_lit1:
688 case DW_OP_lit2:
689 case DW_OP_lit3:
690 case DW_OP_lit4:
691 case DW_OP_lit5:
692 case DW_OP_lit6:
693 case DW_OP_lit7:
694 case DW_OP_lit8:
695 case DW_OP_lit9:
696 case DW_OP_lit10:
697 case DW_OP_lit11:
698 case DW_OP_lit12:
699 case DW_OP_lit13:
700 case DW_OP_lit14:
701 case DW_OP_lit15:
702 case DW_OP_lit16:
703 case DW_OP_lit17:
704 case DW_OP_lit18:
705 case DW_OP_lit19:
706 case DW_OP_lit20:
707 case DW_OP_lit21:
708 case DW_OP_lit22:
709 case DW_OP_lit23:
710 case DW_OP_lit24:
711 case DW_OP_lit25:
712 case DW_OP_lit26:
713 case DW_OP_lit27:
714 case DW_OP_lit28:
715 case DW_OP_lit29:
716 case DW_OP_lit30:
717 case DW_OP_lit31:
718 result = op - DW_OP_lit0;
a45ae5f8 719 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
720 break;
721
722 case DW_OP_addr:
cf7f2e2d
JM
723 result = extract_unsigned_integer (op_ptr,
724 ctx->addr_size, byte_order);
5796c8dc 725 op_ptr += ctx->addr_size;
cf7f2e2d
JM
726 /* Some versions of GCC emit DW_OP_addr before
727 DW_OP_GNU_push_tls_address. In this case the value is an
728 index, not an address. We don't support things like
729 branching between the address and the TLS op. */
730 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
731 result += ctx->offset;
a45ae5f8 732 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
733 break;
734
735 case DW_OP_const1u:
736 result = extract_unsigned_integer (op_ptr, 1, byte_order);
a45ae5f8 737 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
738 op_ptr += 1;
739 break;
740 case DW_OP_const1s:
741 result = extract_signed_integer (op_ptr, 1, byte_order);
a45ae5f8 742 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
743 op_ptr += 1;
744 break;
745 case DW_OP_const2u:
746 result = extract_unsigned_integer (op_ptr, 2, byte_order);
a45ae5f8 747 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
748 op_ptr += 2;
749 break;
750 case DW_OP_const2s:
751 result = extract_signed_integer (op_ptr, 2, byte_order);
a45ae5f8 752 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
753 op_ptr += 2;
754 break;
755 case DW_OP_const4u:
756 result = extract_unsigned_integer (op_ptr, 4, byte_order);
a45ae5f8 757 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
758 op_ptr += 4;
759 break;
760 case DW_OP_const4s:
761 result = extract_signed_integer (op_ptr, 4, byte_order);
a45ae5f8 762 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
763 op_ptr += 4;
764 break;
765 case DW_OP_const8u:
766 result = extract_unsigned_integer (op_ptr, 8, byte_order);
a45ae5f8 767 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
768 op_ptr += 8;
769 break;
770 case DW_OP_const8s:
771 result = extract_signed_integer (op_ptr, 8, byte_order);
a45ae5f8 772 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
773 op_ptr += 8;
774 break;
775 case DW_OP_constu:
776 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
777 result = uoffset;
a45ae5f8 778 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
779 break;
780 case DW_OP_consts:
781 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
782 result = offset;
a45ae5f8 783 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
784 break;
785
786 /* The DW_OP_reg operations are required to occur alone in
787 location expressions. */
788 case DW_OP_reg0:
789 case DW_OP_reg1:
790 case DW_OP_reg2:
791 case DW_OP_reg3:
792 case DW_OP_reg4:
793 case DW_OP_reg5:
794 case DW_OP_reg6:
795 case DW_OP_reg7:
796 case DW_OP_reg8:
797 case DW_OP_reg9:
798 case DW_OP_reg10:
799 case DW_OP_reg11:
800 case DW_OP_reg12:
801 case DW_OP_reg13:
802 case DW_OP_reg14:
803 case DW_OP_reg15:
804 case DW_OP_reg16:
805 case DW_OP_reg17:
806 case DW_OP_reg18:
807 case DW_OP_reg19:
808 case DW_OP_reg20:
809 case DW_OP_reg21:
810 case DW_OP_reg22:
811 case DW_OP_reg23:
812 case DW_OP_reg24:
813 case DW_OP_reg25:
814 case DW_OP_reg26:
815 case DW_OP_reg27:
816 case DW_OP_reg28:
817 case DW_OP_reg29:
818 case DW_OP_reg30:
819 case DW_OP_reg31:
820 if (op_ptr != op_end
821 && *op_ptr != DW_OP_piece
cf7f2e2d 822 && *op_ptr != DW_OP_bit_piece
5796c8dc
SS
823 && *op_ptr != DW_OP_GNU_uninit)
824 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
a45ae5f8 825 "used either alone or in conjunction with DW_OP_piece "
cf7f2e2d 826 "or DW_OP_bit_piece."));
5796c8dc
SS
827
828 result = op - DW_OP_reg0;
a45ae5f8 829 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
830 ctx->location = DWARF_VALUE_REGISTER;
831 break;
832
833 case DW_OP_regx:
834 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
cf7f2e2d 835 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
5796c8dc
SS
836
837 result = reg;
a45ae5f8 838 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
839 ctx->location = DWARF_VALUE_REGISTER;
840 break;
841
842 case DW_OP_implicit_value:
843 {
844 ULONGEST len;
cf7f2e2d 845
5796c8dc
SS
846 op_ptr = read_uleb128 (op_ptr, op_end, &len);
847 if (op_ptr + len > op_end)
848 error (_("DW_OP_implicit_value: too few bytes available."));
849 ctx->len = len;
850 ctx->data = op_ptr;
851 ctx->location = DWARF_VALUE_LITERAL;
852 op_ptr += len;
cf7f2e2d
JM
853 dwarf_expr_require_composition (op_ptr, op_end,
854 "DW_OP_implicit_value");
5796c8dc
SS
855 }
856 goto no_push;
857
858 case DW_OP_stack_value:
859 ctx->location = DWARF_VALUE_STACK;
cf7f2e2d 860 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
5796c8dc
SS
861 goto no_push;
862
c50c785c
JM
863 case DW_OP_GNU_implicit_pointer:
864 {
865 ULONGEST die;
866 LONGEST len;
867
a45ae5f8
JM
868 if (ctx->ref_addr_size == -1)
869 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
870 "is not allowed in frame context"));
871
c50c785c 872 /* The referred-to DIE. */
a45ae5f8 873 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
c50c785c 874 byte_order);
a45ae5f8 875 op_ptr += ctx->ref_addr_size;
c50c785c
JM
876
877 /* The byte offset into the data. */
878 op_ptr = read_sleb128 (op_ptr, op_end, &len);
879 result = (ULONGEST) len;
a45ae5f8 880 result_val = value_from_ulongest (address_type, result);
c50c785c
JM
881
882 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
883 dwarf_expr_require_composition (op_ptr, op_end,
884 "DW_OP_GNU_implicit_pointer");
885 }
886 break;
887
5796c8dc
SS
888 case DW_OP_breg0:
889 case DW_OP_breg1:
890 case DW_OP_breg2:
891 case DW_OP_breg3:
892 case DW_OP_breg4:
893 case DW_OP_breg5:
894 case DW_OP_breg6:
895 case DW_OP_breg7:
896 case DW_OP_breg8:
897 case DW_OP_breg9:
898 case DW_OP_breg10:
899 case DW_OP_breg11:
900 case DW_OP_breg12:
901 case DW_OP_breg13:
902 case DW_OP_breg14:
903 case DW_OP_breg15:
904 case DW_OP_breg16:
905 case DW_OP_breg17:
906 case DW_OP_breg18:
907 case DW_OP_breg19:
908 case DW_OP_breg20:
909 case DW_OP_breg21:
910 case DW_OP_breg22:
911 case DW_OP_breg23:
912 case DW_OP_breg24:
913 case DW_OP_breg25:
914 case DW_OP_breg26:
915 case DW_OP_breg27:
916 case DW_OP_breg28:
917 case DW_OP_breg29:
918 case DW_OP_breg30:
919 case DW_OP_breg31:
920 {
921 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
a45ae5f8 922 result = (ctx->funcs->read_reg) (ctx->baton, op - DW_OP_breg0);
5796c8dc 923 result += offset;
a45ae5f8 924 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
925 }
926 break;
927 case DW_OP_bregx:
928 {
929 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
930 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
a45ae5f8 931 result = (ctx->funcs->read_reg) (ctx->baton, reg);
5796c8dc 932 result += offset;
a45ae5f8 933 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
934 }
935 break;
936 case DW_OP_fbreg:
937 {
cf7f2e2d 938 const gdb_byte *datastart;
5796c8dc
SS
939 size_t datalen;
940 unsigned int before_stack_len;
941
942 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
943 /* Rather than create a whole new context, we simply
944 record the stack length before execution, then reset it
945 afterwards, effectively erasing whatever the recursive
946 call put there. */
947 before_stack_len = ctx->stack_len;
948 /* FIXME: cagney/2003-03-26: This code should be using
949 get_frame_base_address(), and then implement a dwarf2
950 specific this_base method. */
a45ae5f8 951 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
5796c8dc 952 dwarf_expr_eval (ctx, datastart, datalen);
cf7f2e2d
JM
953 if (ctx->location == DWARF_VALUE_MEMORY)
954 result = dwarf_expr_fetch_address (ctx, 0);
955 else if (ctx->location == DWARF_VALUE_REGISTER)
a45ae5f8
JM
956 result = (ctx->funcs->read_reg) (ctx->baton,
957 value_as_long (dwarf_expr_fetch (ctx, 0)));
cf7f2e2d 958 else
c50c785c
JM
959 error (_("Not implemented: computing frame "
960 "base using explicit value operator"));
5796c8dc 961 result = result + offset;
a45ae5f8 962 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
963 in_stack_memory = 1;
964 ctx->stack_len = before_stack_len;
965 ctx->location = DWARF_VALUE_MEMORY;
966 }
967 break;
968
969 case DW_OP_dup:
a45ae5f8 970 result_val = dwarf_expr_fetch (ctx, 0);
5796c8dc
SS
971 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
972 break;
973
974 case DW_OP_drop:
975 dwarf_expr_pop (ctx);
976 goto no_push;
977
978 case DW_OP_pick:
979 offset = *op_ptr++;
a45ae5f8 980 result_val = dwarf_expr_fetch (ctx, offset);
5796c8dc
SS
981 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
982 break;
983
984 case DW_OP_swap:
985 {
986 struct dwarf_stack_value t1, t2;
987
988 if (ctx->stack_len < 2)
c50c785c
JM
989 error (_("Not enough elements for "
990 "DW_OP_swap. Need 2, have %d."),
5796c8dc
SS
991 ctx->stack_len);
992 t1 = ctx->stack[ctx->stack_len - 1];
993 t2 = ctx->stack[ctx->stack_len - 2];
994 ctx->stack[ctx->stack_len - 1] = t2;
995 ctx->stack[ctx->stack_len - 2] = t1;
996 goto no_push;
997 }
998
999 case DW_OP_over:
a45ae5f8 1000 result_val = dwarf_expr_fetch (ctx, 1);
5796c8dc
SS
1001 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
1002 break;
1003
1004 case DW_OP_rot:
1005 {
1006 struct dwarf_stack_value t1, t2, t3;
1007
1008 if (ctx->stack_len < 3)
c50c785c
JM
1009 error (_("Not enough elements for "
1010 "DW_OP_rot. Need 3, have %d."),
5796c8dc
SS
1011 ctx->stack_len);
1012 t1 = ctx->stack[ctx->stack_len - 1];
1013 t2 = ctx->stack[ctx->stack_len - 2];
1014 t3 = ctx->stack[ctx->stack_len - 3];
1015 ctx->stack[ctx->stack_len - 1] = t2;
1016 ctx->stack[ctx->stack_len - 2] = t3;
1017 ctx->stack[ctx->stack_len - 3] = t1;
1018 goto no_push;
1019 }
1020
1021 case DW_OP_deref:
1022 case DW_OP_deref_size:
a45ae5f8 1023 case DW_OP_GNU_deref_type:
cf7f2e2d
JM
1024 {
1025 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
1026 gdb_byte *buf = alloca (addr_size);
1027 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
a45ae5f8
JM
1028 struct type *type;
1029
cf7f2e2d
JM
1030 dwarf_expr_pop (ctx);
1031
a45ae5f8
JM
1032 if (op == DW_OP_GNU_deref_type)
1033 {
1034 ULONGEST type_die;
1035
1036 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1037 type = dwarf_get_base_type (ctx, type_die, 0);
1038 }
1039 else
1040 type = address_type;
1041
1042 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
1043
1044 /* If the size of the object read from memory is different
1045 from the type length, we need to zero-extend it. */
1046 if (TYPE_LENGTH (type) != addr_size)
1047 {
1048 ULONGEST result =
1049 extract_unsigned_integer (buf, addr_size, byte_order);
1050
1051 buf = alloca (TYPE_LENGTH (type));
1052 store_unsigned_integer (buf, TYPE_LENGTH (type),
1053 byte_order, result);
1054 }
1055
1056 result_val = value_from_contents_and_address (type, buf, addr);
cf7f2e2d
JM
1057 break;
1058 }
1059
5796c8dc
SS
1060 case DW_OP_abs:
1061 case DW_OP_neg:
1062 case DW_OP_not:
1063 case DW_OP_plus_uconst:
a45ae5f8
JM
1064 {
1065 /* Unary operations. */
1066 result_val = dwarf_expr_fetch (ctx, 0);
1067 dwarf_expr_pop (ctx);
5796c8dc 1068
a45ae5f8
JM
1069 switch (op)
1070 {
1071 case DW_OP_abs:
1072 if (value_less (result_val,
1073 value_zero (value_type (result_val), not_lval)))
1074 result_val = value_neg (result_val);
1075 break;
1076 case DW_OP_neg:
1077 result_val = value_neg (result_val);
1078 break;
1079 case DW_OP_not:
1080 dwarf_require_integral (value_type (result_val));
1081 result_val = value_complement (result_val);
1082 break;
1083 case DW_OP_plus_uconst:
1084 dwarf_require_integral (value_type (result_val));
1085 result = value_as_long (result_val);
1086 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1087 result += reg;
1088 result_val = value_from_ulongest (address_type, result);
1089 break;
1090 }
1091 }
5796c8dc
SS
1092 break;
1093
1094 case DW_OP_and:
1095 case DW_OP_div:
1096 case DW_OP_minus:
1097 case DW_OP_mod:
1098 case DW_OP_mul:
1099 case DW_OP_or:
1100 case DW_OP_plus:
1101 case DW_OP_shl:
1102 case DW_OP_shr:
1103 case DW_OP_shra:
1104 case DW_OP_xor:
1105 case DW_OP_le:
1106 case DW_OP_ge:
1107 case DW_OP_eq:
1108 case DW_OP_lt:
1109 case DW_OP_gt:
1110 case DW_OP_ne:
1111 {
cf7f2e2d 1112 /* Binary operations. */
a45ae5f8 1113 struct value *first, *second;
5796c8dc
SS
1114
1115 second = dwarf_expr_fetch (ctx, 0);
1116 dwarf_expr_pop (ctx);
1117
1118 first = dwarf_expr_fetch (ctx, 0);
1119 dwarf_expr_pop (ctx);
1120
a45ae5f8
JM
1121 if (! base_types_equal_p (value_type (first), value_type (second)))
1122 error (_("Incompatible types on DWARF stack"));
1123
5796c8dc
SS
1124 switch (op)
1125 {
1126 case DW_OP_and:
a45ae5f8
JM
1127 dwarf_require_integral (value_type (first));
1128 dwarf_require_integral (value_type (second));
1129 result_val = value_binop (first, second, BINOP_BITWISE_AND);
5796c8dc
SS
1130 break;
1131 case DW_OP_div:
a45ae5f8 1132 result_val = value_binop (first, second, BINOP_DIV);
5796c8dc
SS
1133 break;
1134 case DW_OP_minus:
a45ae5f8 1135 result_val = value_binop (first, second, BINOP_SUB);
5796c8dc
SS
1136 break;
1137 case DW_OP_mod:
a45ae5f8
JM
1138 {
1139 int cast_back = 0;
1140 struct type *orig_type = value_type (first);
1141
1142 /* We have to special-case "old-style" untyped values
1143 -- these must have mod computed using unsigned
1144 math. */
1145 if (orig_type == address_type)
1146 {
1147 struct type *utype
1148 = get_unsigned_type (ctx->gdbarch, orig_type);
1149
1150 cast_back = 1;
1151 first = value_cast (utype, first);
1152 second = value_cast (utype, second);
1153 }
1154 /* Note that value_binop doesn't handle float or
1155 decimal float here. This seems unimportant. */
1156 result_val = value_binop (first, second, BINOP_MOD);
1157 if (cast_back)
1158 result_val = value_cast (orig_type, result_val);
1159 }
5796c8dc
SS
1160 break;
1161 case DW_OP_mul:
a45ae5f8 1162 result_val = value_binop (first, second, BINOP_MUL);
5796c8dc
SS
1163 break;
1164 case DW_OP_or:
a45ae5f8
JM
1165 dwarf_require_integral (value_type (first));
1166 dwarf_require_integral (value_type (second));
1167 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
5796c8dc
SS
1168 break;
1169 case DW_OP_plus:
a45ae5f8 1170 result_val = value_binop (first, second, BINOP_ADD);
5796c8dc
SS
1171 break;
1172 case DW_OP_shl:
a45ae5f8
JM
1173 dwarf_require_integral (value_type (first));
1174 dwarf_require_integral (value_type (second));
1175 result_val = value_binop (first, second, BINOP_LSH);
5796c8dc
SS
1176 break;
1177 case DW_OP_shr:
a45ae5f8
JM
1178 dwarf_require_integral (value_type (first));
1179 dwarf_require_integral (value_type (second));
1180 if (!TYPE_UNSIGNED (value_type (first)))
1181 {
1182 struct type *utype
1183 = get_unsigned_type (ctx->gdbarch, value_type (first));
1184
1185 first = value_cast (utype, first);
1186 }
1187
1188 result_val = value_binop (first, second, BINOP_RSH);
1189 /* Make sure we wind up with the same type we started
1190 with. */
1191 if (value_type (result_val) != value_type (second))
1192 result_val = value_cast (value_type (second), result_val);
5796c8dc
SS
1193 break;
1194 case DW_OP_shra:
a45ae5f8
JM
1195 dwarf_require_integral (value_type (first));
1196 dwarf_require_integral (value_type (second));
1197 if (TYPE_UNSIGNED (value_type (first)))
1198 {
1199 struct type *stype
1200 = get_signed_type (ctx->gdbarch, value_type (first));
1201
1202 first = value_cast (stype, first);
1203 }
1204
1205 result_val = value_binop (first, second, BINOP_RSH);
1206 /* Make sure we wind up with the same type we started
1207 with. */
1208 if (value_type (result_val) != value_type (second))
1209 result_val = value_cast (value_type (second), result_val);
5796c8dc
SS
1210 break;
1211 case DW_OP_xor:
a45ae5f8
JM
1212 dwarf_require_integral (value_type (first));
1213 dwarf_require_integral (value_type (second));
1214 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
5796c8dc
SS
1215 break;
1216 case DW_OP_le:
a45ae5f8
JM
1217 /* A <= B is !(B < A). */
1218 result = ! value_less (second, first);
1219 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1220 break;
1221 case DW_OP_ge:
a45ae5f8
JM
1222 /* A >= B is !(A < B). */
1223 result = ! value_less (first, second);
1224 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1225 break;
1226 case DW_OP_eq:
a45ae5f8
JM
1227 result = value_equal (first, second);
1228 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1229 break;
1230 case DW_OP_lt:
a45ae5f8
JM
1231 result = value_less (first, second);
1232 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1233 break;
1234 case DW_OP_gt:
a45ae5f8
JM
1235 /* A > B is B < A. */
1236 result = value_less (second, first);
1237 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1238 break;
1239 case DW_OP_ne:
a45ae5f8
JM
1240 result = ! value_equal (first, second);
1241 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1242 break;
1243 default:
1244 internal_error (__FILE__, __LINE__,
1245 _("Can't be reached."));
1246 }
5796c8dc
SS
1247 }
1248 break;
1249
1250 case DW_OP_call_frame_cfa:
a45ae5f8
JM
1251 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1252 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1253 in_stack_memory = 1;
1254 break;
1255
1256 case DW_OP_GNU_push_tls_address:
1257 /* Variable is at a constant offset in the thread-local
1258 storage block into the objfile for the current thread and
c50c785c 1259 the dynamic linker module containing this expression. Here
5796c8dc
SS
1260 we return returns the offset from that base. The top of the
1261 stack has the offset from the beginning of the thread
1262 control block at which the variable is located. Nothing
1263 should follow this operator, so the top of stack would be
1264 returned. */
a45ae5f8 1265 result = value_as_long (dwarf_expr_fetch (ctx, 0));
5796c8dc 1266 dwarf_expr_pop (ctx);
a45ae5f8
JM
1267 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1268 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1269 break;
1270
1271 case DW_OP_skip:
1272 offset = extract_signed_integer (op_ptr, 2, byte_order);
1273 op_ptr += 2;
1274 op_ptr += offset;
1275 goto no_push;
1276
1277 case DW_OP_bra:
a45ae5f8
JM
1278 {
1279 struct value *val;
1280
1281 offset = extract_signed_integer (op_ptr, 2, byte_order);
1282 op_ptr += 2;
1283 val = dwarf_expr_fetch (ctx, 0);
1284 dwarf_require_integral (value_type (val));
1285 if (value_as_long (val) != 0)
1286 op_ptr += offset;
1287 dwarf_expr_pop (ctx);
1288 }
5796c8dc
SS
1289 goto no_push;
1290
1291 case DW_OP_nop:
1292 goto no_push;
1293
1294 case DW_OP_piece:
1295 {
1296 ULONGEST size;
1297
1298 /* Record the piece. */
1299 op_ptr = read_uleb128 (op_ptr, op_end, &size);
cf7f2e2d 1300 add_piece (ctx, 8 * size, 0);
5796c8dc
SS
1301
1302 /* Pop off the address/regnum, and reset the location
1303 type. */
cf7f2e2d
JM
1304 if (ctx->location != DWARF_VALUE_LITERAL
1305 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
5796c8dc
SS
1306 dwarf_expr_pop (ctx);
1307 ctx->location = DWARF_VALUE_MEMORY;
1308 }
1309 goto no_push;
1310
cf7f2e2d
JM
1311 case DW_OP_bit_piece:
1312 {
1313 ULONGEST size, offset;
1314
1315 /* Record the piece. */
1316 op_ptr = read_uleb128 (op_ptr, op_end, &size);
1317 op_ptr = read_uleb128 (op_ptr, op_end, &offset);
1318 add_piece (ctx, size, offset);
1319
1320 /* Pop off the address/regnum, and reset the location
1321 type. */
1322 if (ctx->location != DWARF_VALUE_LITERAL
1323 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1324 dwarf_expr_pop (ctx);
1325 ctx->location = DWARF_VALUE_MEMORY;
1326 }
1327 goto no_push;
1328
5796c8dc
SS
1329 case DW_OP_GNU_uninit:
1330 if (op_ptr != op_end)
1331 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1332 "be the very last op."));
1333
1334 ctx->initialized = 0;
1335 goto no_push;
1336
cf7f2e2d
JM
1337 case DW_OP_call2:
1338 result = extract_unsigned_integer (op_ptr, 2, byte_order);
1339 op_ptr += 2;
a45ae5f8 1340 ctx->funcs->dwarf_call (ctx, result);
cf7f2e2d
JM
1341 goto no_push;
1342
1343 case DW_OP_call4:
1344 result = extract_unsigned_integer (op_ptr, 4, byte_order);
1345 op_ptr += 4;
a45ae5f8 1346 ctx->funcs->dwarf_call (ctx, result);
cf7f2e2d 1347 goto no_push;
c50c785c
JM
1348
1349 case DW_OP_GNU_entry_value:
a45ae5f8
JM
1350 {
1351 ULONGEST len;
1352 int dwarf_reg;
1353 CORE_ADDR deref_size;
1354
1355 op_ptr = read_uleb128 (op_ptr, op_end, &len);
1356 if (op_ptr + len > op_end)
1357 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1358
1359 dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1360 if (dwarf_reg != -1)
1361 {
1362 op_ptr += len;
1363 ctx->funcs->push_dwarf_reg_entry_value (ctx, dwarf_reg,
1364 0 /* unused */,
1365 -1 /* deref_size */);
1366 goto no_push;
1367 }
1368
1369 dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr, op_ptr + len,
1370 &deref_size);
1371 if (dwarf_reg != -1)
1372 {
1373 if (deref_size == -1)
1374 deref_size = ctx->addr_size;
1375 op_ptr += len;
1376 ctx->funcs->push_dwarf_reg_entry_value (ctx, dwarf_reg,
1377 0 /* unused */,
1378 deref_size);
1379 goto no_push;
1380 }
1381
1382 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1383 "supported only for single DW_OP_reg* "
1384 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1385 }
1386
1387 case DW_OP_GNU_const_type:
1388 {
1389 ULONGEST type_die;
1390 int n;
1391 const gdb_byte *data;
1392 struct type *type;
1393
1394 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1395 n = *op_ptr++;
1396 data = op_ptr;
1397 op_ptr += n;
1398
1399 type = dwarf_get_base_type (ctx, type_die, n);
1400 result_val = value_from_contents (type, data);
1401 }
1402 break;
1403
1404 case DW_OP_GNU_regval_type:
1405 {
1406 ULONGEST type_die;
1407 struct type *type;
1408
1409 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
1410 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1411
1412 type = dwarf_get_base_type (ctx, type_die, 0);
1413 result = (ctx->funcs->read_reg) (ctx->baton, reg);
1414 result_val = value_from_ulongest (address_type, result);
1415 result_val = value_from_contents (type,
1416 value_contents_all (result_val));
1417 }
1418 break;
1419
1420 case DW_OP_GNU_convert:
1421 case DW_OP_GNU_reinterpret:
1422 {
1423 ULONGEST type_die;
1424 struct type *type;
1425
1426 op_ptr = read_uleb128 (op_ptr, op_end, &type_die);
1427
1428 if (type_die == 0)
1429 type = address_type;
1430 else
1431 type = dwarf_get_base_type (ctx, type_die, 0);
1432
1433 result_val = dwarf_expr_fetch (ctx, 0);
1434 dwarf_expr_pop (ctx);
1435
1436 if (op == DW_OP_GNU_convert)
1437 result_val = value_cast (type, result_val);
1438 else if (type == value_type (result_val))
1439 {
1440 /* Nothing. */
1441 }
1442 else if (TYPE_LENGTH (type)
1443 != TYPE_LENGTH (value_type (result_val)))
1444 error (_("DW_OP_GNU_reinterpret has wrong size"));
1445 else
1446 result_val
1447 = value_from_contents (type,
1448 value_contents_all (result_val));
1449 }
1450 break;
cf7f2e2d 1451
5796c8dc
SS
1452 default:
1453 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1454 }
1455
1456 /* Most things push a result value. */
a45ae5f8
JM
1457 gdb_assert (result_val != NULL);
1458 dwarf_expr_push (ctx, result_val, in_stack_memory);
c50c785c
JM
1459 no_push:
1460 ;
5796c8dc
SS
1461 }
1462
c50c785c
JM
1463 /* To simplify our main caller, if the result is an implicit
1464 pointer, then make a pieced value. This is ok because we can't
1465 have implicit pointers in contexts where pieces are invalid. */
1466 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1467 add_piece (ctx, 8 * ctx->addr_size, 0);
1468
1469abort_expression:
5796c8dc
SS
1470 ctx->recursion_depth--;
1471 gdb_assert (ctx->recursion_depth >= 0);
a45ae5f8
JM
1472}
1473
1474/* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1475
1476void
1477ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1478{
1479 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1480}
1481
1482/* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1483
1484CORE_ADDR
1485ctx_no_get_frame_cfa (void *baton)
1486{
1487 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1488}
1489
1490/* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1491
1492CORE_ADDR
1493ctx_no_get_frame_pc (void *baton)
1494{
1495 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1496}
1497
1498/* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1499
1500CORE_ADDR
1501ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1502{
1503 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1504}
1505
1506/* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1507
1508void
1509ctx_no_dwarf_call (struct dwarf_expr_context *ctx, size_t die_offset)
1510{
1511 error (_("%s is invalid in this context"), "DW_OP_call*");
1512}
1513
1514/* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1515
1516struct type *
1517ctx_no_get_base_type (struct dwarf_expr_context *ctx, size_t die)
1518{
1519 error (_("Support for typed DWARF is not supported in this context"));
1520}
1521
1522/* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1523 implementation. */
1524
1525void
1526ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1527 int dwarf_reg, CORE_ADDR fb_offset,
1528 int deref_size)
1529{
1530 internal_error (__FILE__, __LINE__,
1531 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1532}
1533
1534void
1535_initialize_dwarf2expr (void)
1536{
1537 dwarf_arch_cookie
1538 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
5796c8dc 1539}