Upgrade GDB from 7.4.1 to 7.6.1 on the vendor branch
[dragonfly.git] / contrib / gdb-7 / gdb / dwarf2expr.c
CommitLineData
5796c8dc
SS
1/* DWARF 2 Expression Evaluator.
2
ef5ccd6c 3 Copyright (C) 2001-2013 Free Software Foundation, Inc.
5796c8dc
SS
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "symtab.h"
24#include "gdbtypes.h"
25#include "value.h"
26#include "gdbcore.h"
27#include "dwarf2.h"
28#include "dwarf2expr.h"
29#include "gdb_assert.h"
30
31/* Local prototypes. */
32
33static void execute_stack_op (struct dwarf_expr_context *,
cf7f2e2d 34 const gdb_byte *, const gdb_byte *);
5796c8dc 35
a45ae5f8
JM
36/* Cookie for gdbarch data. */
37
38static struct gdbarch_data *dwarf_arch_cookie;
39
40/* This holds gdbarch-specific types used by the DWARF expression
41 evaluator. See comments in execute_stack_op. */
42
43struct dwarf_gdbarch_types
44{
45 struct type *dw_types[3];
46};
47
48/* Allocate and fill in dwarf_gdbarch_types for an arch. */
49
50static void *
51dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
52{
53 struct dwarf_gdbarch_types *types
54 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
55
56 /* The types themselves are lazily initialized. */
57
58 return types;
59}
60
61/* Return the type used for DWARF operations where the type is
62 unspecified in the DWARF spec. Only certain sizes are
63 supported. */
64
65static struct type *
66dwarf_expr_address_type (struct dwarf_expr_context *ctx)
67{
68 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
69 dwarf_arch_cookie);
70 int ndx;
71
72 if (ctx->addr_size == 2)
73 ndx = 0;
74 else if (ctx->addr_size == 4)
75 ndx = 1;
76 else if (ctx->addr_size == 8)
77 ndx = 2;
78 else
79 error (_("Unsupported address size in DWARF expressions: %d bits"),
80 8 * ctx->addr_size);
81
82 if (types->dw_types[ndx] == NULL)
83 types->dw_types[ndx]
84 = arch_integer_type (ctx->gdbarch,
85 8 * ctx->addr_size,
86 0, "<signed DWARF address type>");
87
88 return types->dw_types[ndx];
89}
90
5796c8dc
SS
91/* Create a new context for the expression evaluator. */
92
93struct dwarf_expr_context *
94new_dwarf_expr_context (void)
95{
96 struct dwarf_expr_context *retval;
cf7f2e2d 97
5796c8dc
SS
98 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
99 retval->stack_len = 0;
100 retval->stack_allocated = 10;
cf7f2e2d
JM
101 retval->stack = xmalloc (retval->stack_allocated
102 * sizeof (struct dwarf_stack_value));
5796c8dc
SS
103 retval->num_pieces = 0;
104 retval->pieces = 0;
105 retval->max_recursion_depth = 0x100;
106 return retval;
107}
108
109/* Release the memory allocated to CTX. */
110
111void
112free_dwarf_expr_context (struct dwarf_expr_context *ctx)
113{
114 xfree (ctx->stack);
115 xfree (ctx->pieces);
116 xfree (ctx);
117}
118
119/* Helper for make_cleanup_free_dwarf_expr_context. */
120
121static void
122free_dwarf_expr_context_cleanup (void *arg)
123{
124 free_dwarf_expr_context (arg);
125}
126
127/* Return a cleanup that calls free_dwarf_expr_context. */
128
129struct cleanup *
130make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
131{
132 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
133}
134
135/* Expand the memory allocated to CTX's stack to contain at least
136 NEED more elements than are currently used. */
137
138static void
139dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
140{
141 if (ctx->stack_len + need > ctx->stack_allocated)
142 {
143 size_t newlen = ctx->stack_len + need + 10;
cf7f2e2d 144
5796c8dc
SS
145 ctx->stack = xrealloc (ctx->stack,
146 newlen * sizeof (struct dwarf_stack_value));
147 ctx->stack_allocated = newlen;
148 }
149}
150
151/* Push VALUE onto CTX's stack. */
152
a45ae5f8
JM
153static void
154dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
5796c8dc
SS
155 int in_stack_memory)
156{
157 struct dwarf_stack_value *v;
158
159 dwarf_expr_grow_stack (ctx, 1);
160 v = &ctx->stack[ctx->stack_len++];
161 v->value = value;
162 v->in_stack_memory = in_stack_memory;
163}
164
a45ae5f8 165/* Push VALUE onto CTX's stack. */
5796c8dc
SS
166
167void
a45ae5f8
JM
168dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
169 int in_stack_memory)
170{
171 dwarf_expr_push (ctx,
172 value_from_ulongest (dwarf_expr_address_type (ctx), value),
173 in_stack_memory);
174}
175
176/* Pop the top item off of CTX's stack. */
177
178static void
5796c8dc
SS
179dwarf_expr_pop (struct dwarf_expr_context *ctx)
180{
181 if (ctx->stack_len <= 0)
182 error (_("dwarf expression stack underflow"));
183 ctx->stack_len--;
184}
185
186/* Retrieve the N'th item on CTX's stack. */
187
a45ae5f8 188struct value *
5796c8dc
SS
189dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
190{
191 if (ctx->stack_len <= n)
c50c785c
JM
192 error (_("Asked for position %d of stack, "
193 "stack only has %d elements on it."),
5796c8dc
SS
194 n, ctx->stack_len);
195 return ctx->stack[ctx->stack_len - (1 + n)].value;
a45ae5f8 196}
5796c8dc 197
a45ae5f8
JM
198/* Require that TYPE be an integral type; throw an exception if not. */
199
200static void
201dwarf_require_integral (struct type *type)
202{
203 if (TYPE_CODE (type) != TYPE_CODE_INT
204 && TYPE_CODE (type) != TYPE_CODE_CHAR
205 && TYPE_CODE (type) != TYPE_CODE_BOOL)
206 error (_("integral type expected in DWARF expression"));
207}
208
209/* Return the unsigned form of TYPE. TYPE is necessarily an integral
210 type. */
211
212static struct type *
213get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
214{
215 switch (TYPE_LENGTH (type))
216 {
217 case 1:
218 return builtin_type (gdbarch)->builtin_uint8;
219 case 2:
220 return builtin_type (gdbarch)->builtin_uint16;
221 case 4:
222 return builtin_type (gdbarch)->builtin_uint32;
223 case 8:
224 return builtin_type (gdbarch)->builtin_uint64;
225 default:
226 error (_("no unsigned variant found for type, while evaluating "
227 "DWARF expression"));
228 }
229}
230
231/* Return the signed form of TYPE. TYPE is necessarily an integral
232 type. */
233
234static struct type *
235get_signed_type (struct gdbarch *gdbarch, struct type *type)
236{
237 switch (TYPE_LENGTH (type))
238 {
239 case 1:
240 return builtin_type (gdbarch)->builtin_int8;
241 case 2:
242 return builtin_type (gdbarch)->builtin_int16;
243 case 4:
244 return builtin_type (gdbarch)->builtin_int32;
245 case 8:
246 return builtin_type (gdbarch)->builtin_int64;
247 default:
248 error (_("no signed variant found for type, while evaluating "
249 "DWARF expression"));
250 }
5796c8dc
SS
251}
252
cf7f2e2d
JM
253/* Retrieve the N'th item on CTX's stack, converted to an address. */
254
255CORE_ADDR
256dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
257{
a45ae5f8
JM
258 struct value *result_val = dwarf_expr_fetch (ctx, n);
259 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
260 ULONGEST result;
261
262 dwarf_require_integral (value_type (result_val));
263 result = extract_unsigned_integer (value_contents (result_val),
264 TYPE_LENGTH (value_type (result_val)),
265 byte_order);
cf7f2e2d
JM
266
267 /* For most architectures, calling extract_unsigned_integer() alone
268 is sufficient for extracting an address. However, some
269 architectures (e.g. MIPS) use signed addresses and using
270 extract_unsigned_integer() will not produce a correct
271 result. Make sure we invoke gdbarch_integer_to_address()
272 for those architectures which require it. */
273 if (gdbarch_integer_to_address_p (ctx->gdbarch))
274 {
cf7f2e2d 275 gdb_byte *buf = alloca (ctx->addr_size);
a45ae5f8
JM
276 struct type *int_type = get_unsigned_type (ctx->gdbarch,
277 value_type (result_val));
cf7f2e2d
JM
278
279 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
280 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
281 }
282
283 return (CORE_ADDR) result;
284}
285
5796c8dc
SS
286/* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
287
288int
289dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
290{
291 if (ctx->stack_len <= n)
c50c785c
JM
292 error (_("Asked for position %d of stack, "
293 "stack only has %d elements on it."),
5796c8dc
SS
294 n, ctx->stack_len);
295 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
5796c8dc
SS
296}
297
cf7f2e2d
JM
298/* Return true if the expression stack is empty. */
299
300static int
301dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
302{
303 return ctx->stack_len == 0;
304}
305
5796c8dc
SS
306/* Add a new piece to CTX's piece list. */
307static void
cf7f2e2d 308add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
5796c8dc
SS
309{
310 struct dwarf_expr_piece *p;
311
312 ctx->num_pieces++;
313
cf7f2e2d
JM
314 ctx->pieces = xrealloc (ctx->pieces,
315 (ctx->num_pieces
316 * sizeof (struct dwarf_expr_piece)));
5796c8dc
SS
317
318 p = &ctx->pieces[ctx->num_pieces - 1];
319 p->location = ctx->location;
320 p->size = size;
cf7f2e2d
JM
321 p->offset = offset;
322
5796c8dc
SS
323 if (p->location == DWARF_VALUE_LITERAL)
324 {
325 p->v.literal.data = ctx->data;
326 p->v.literal.length = ctx->len;
327 }
cf7f2e2d
JM
328 else if (dwarf_expr_stack_empty_p (ctx))
329 {
330 p->location = DWARF_VALUE_OPTIMIZED_OUT;
331 /* Also reset the context's location, for our callers. This is
332 a somewhat strange approach, but this lets us avoid setting
333 the location to DWARF_VALUE_MEMORY in all the individual
334 cases in the evaluator. */
335 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
336 }
337 else if (p->location == DWARF_VALUE_MEMORY)
338 {
339 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
340 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
341 }
c50c785c
JM
342 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
343 {
ef5ccd6c 344 p->v.ptr.die.sect_off = ctx->len;
a45ae5f8 345 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
c50c785c 346 }
a45ae5f8
JM
347 else if (p->location == DWARF_VALUE_REGISTER)
348 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
5796c8dc
SS
349 else
350 {
cf7f2e2d 351 p->v.value = dwarf_expr_fetch (ctx, 0);
5796c8dc
SS
352 }
353}
354
355/* Evaluate the expression at ADDR (LEN bytes long) using the context
356 CTX. */
357
358void
cf7f2e2d
JM
359dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
360 size_t len)
5796c8dc
SS
361{
362 int old_recursion_depth = ctx->recursion_depth;
363
364 execute_stack_op (ctx, addr, addr + len);
365
366 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
367
368 gdb_assert (ctx->recursion_depth == old_recursion_depth);
369}
370
ef5ccd6c 371/* Helper to read a uleb128 value or throw an error. */
5796c8dc 372
cf7f2e2d 373const gdb_byte *
ef5ccd6c
JM
374safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
375 uint64_t *r)
5796c8dc 376{
ef5ccd6c
JM
377 buf = gdb_read_uleb128 (buf, buf_end, r);
378 if (buf == NULL)
379 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
5796c8dc
SS
380 return buf;
381}
382
ef5ccd6c 383/* Helper to read a sleb128 value or throw an error. */
5796c8dc 384
cf7f2e2d 385const gdb_byte *
ef5ccd6c
JM
386safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
387 int64_t *r)
5796c8dc 388{
ef5ccd6c
JM
389 buf = gdb_read_sleb128 (buf, buf_end, r);
390 if (buf == NULL)
391 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
392 return buf;
393}
5796c8dc 394
ef5ccd6c
JM
395const gdb_byte *
396safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
397{
398 buf = gdb_skip_leb128 (buf, buf_end);
399 if (buf == NULL)
400 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
5796c8dc
SS
401 return buf;
402}
5796c8dc
SS
403\f
404
405/* Check that the current operator is either at the end of an
406 expression, or that it is followed by a composition operator. */
407
cf7f2e2d
JM
408void
409dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
410 const char *op_name)
5796c8dc
SS
411{
412 /* It seems like DW_OP_GNU_uninit should be handled here. However,
413 it doesn't seem to make sense for DW_OP_*_value, and it was not
414 checked at the other place that this function is called. */
415 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
416 error (_("DWARF-2 expression error: `%s' operations must be "
a45ae5f8 417 "used either alone or in conjunction with DW_OP_piece "
5796c8dc
SS
418 "or DW_OP_bit_piece."),
419 op_name);
420}
421
a45ae5f8
JM
422/* Return true iff the types T1 and T2 are "the same". This only does
423 checks that might reasonably be needed to compare DWARF base
424 types. */
425
426static int
427base_types_equal_p (struct type *t1, struct type *t2)
428{
429 if (TYPE_CODE (t1) != TYPE_CODE (t2))
430 return 0;
431 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
432 return 0;
433 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
434}
435
436/* A convenience function to call get_base_type on CTX and return the
437 result. DIE is the DIE whose type we need. SIZE is non-zero if
438 this function should verify that the resulting type has the correct
439 size. */
440
441static struct type *
ef5ccd6c 442dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size)
a45ae5f8
JM
443{
444 struct type *result;
445
446 if (ctx->funcs->get_base_type)
447 {
448 result = ctx->funcs->get_base_type (ctx, die);
449 if (result == NULL)
450 error (_("Could not find type for DW_OP_GNU_const_type"));
451 if (size != 0 && TYPE_LENGTH (result) != size)
452 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
453 }
454 else
455 /* Anything will do. */
456 result = builtin_type (ctx->gdbarch)->builtin_int;
457
458 return result;
459}
460
461/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
462 DWARF register number. Otherwise return -1. */
463
464int
465dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
466{
ef5ccd6c 467 uint64_t dwarf_reg;
a45ae5f8
JM
468
469 if (buf_end <= buf)
470 return -1;
471 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
472 {
473 if (buf_end - buf != 1)
474 return -1;
475 return *buf - DW_OP_reg0;
476 }
477
478 if (*buf == DW_OP_GNU_regval_type)
479 {
480 buf++;
ef5ccd6c
JM
481 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
482 if (buf == NULL)
483 return -1;
484 buf = gdb_skip_leb128 (buf, buf_end);
485 if (buf == NULL)
486 return -1;
a45ae5f8
JM
487 }
488 else if (*buf == DW_OP_regx)
489 {
490 buf++;
ef5ccd6c
JM
491 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
492 if (buf == NULL)
493 return -1;
a45ae5f8
JM
494 }
495 else
496 return -1;
497 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
498 return -1;
499 return dwarf_reg;
500}
501
502/* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
503 DW_OP_deref* return the DWARF register number. Otherwise return -1.
504 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
505 size from DW_OP_deref_size. */
506
507int
508dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
509 CORE_ADDR *deref_size_return)
510{
ef5ccd6c
JM
511 uint64_t dwarf_reg;
512 int64_t offset;
a45ae5f8
JM
513
514 if (buf_end <= buf)
515 return -1;
ef5ccd6c 516
a45ae5f8
JM
517 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
518 {
519 dwarf_reg = *buf - DW_OP_breg0;
520 buf++;
ef5ccd6c
JM
521 if (buf >= buf_end)
522 return -1;
a45ae5f8
JM
523 }
524 else if (*buf == DW_OP_bregx)
525 {
526 buf++;
ef5ccd6c
JM
527 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
528 if (buf == NULL)
529 return -1;
a45ae5f8
JM
530 if ((int) dwarf_reg != dwarf_reg)
531 return -1;
532 }
533 else
534 return -1;
535
ef5ccd6c
JM
536 buf = gdb_read_sleb128 (buf, buf_end, &offset);
537 if (buf == NULL)
a45ae5f8 538 return -1;
ef5ccd6c 539 if (offset != 0)
a45ae5f8
JM
540 return -1;
541
542 if (*buf == DW_OP_deref)
543 {
544 buf++;
545 *deref_size_return = -1;
546 }
547 else if (*buf == DW_OP_deref_size)
548 {
549 buf++;
550 if (buf >= buf_end)
551 return -1;
552 *deref_size_return = *buf++;
553 }
554 else
555 return -1;
556
557 if (buf != buf_end)
558 return -1;
559
560 return dwarf_reg;
561}
562
563/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
564 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
565
566int
567dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
568 CORE_ADDR *fb_offset_return)
569{
ef5ccd6c 570 int64_t fb_offset;
a45ae5f8
JM
571
572 if (buf_end <= buf)
573 return 0;
574
575 if (*buf != DW_OP_fbreg)
576 return 0;
577 buf++;
578
ef5ccd6c
JM
579 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
580 if (buf == NULL)
581 return 0;
a45ae5f8
JM
582 *fb_offset_return = fb_offset;
583 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
584 return 0;
585
586 return 1;
587}
588
589/* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
590 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
591 The matched SP register number depends on GDBARCH. */
592
593int
594dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
595 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
596{
ef5ccd6c
JM
597 uint64_t dwarf_reg;
598 int64_t sp_offset;
a45ae5f8
JM
599
600 if (buf_end <= buf)
601 return 0;
602 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
603 {
604 dwarf_reg = *buf - DW_OP_breg0;
605 buf++;
606 }
607 else
608 {
609 if (*buf != DW_OP_bregx)
610 return 0;
611 buf++;
ef5ccd6c
JM
612 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
613 if (buf == NULL)
614 return 0;
a45ae5f8
JM
615 }
616
617 if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg)
618 != gdbarch_sp_regnum (gdbarch))
619 return 0;
620
ef5ccd6c
JM
621 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
622 if (buf == NULL)
623 return 0;
a45ae5f8
JM
624 *sp_offset_return = sp_offset;
625 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
626 return 0;
627
628 return 1;
629}
630
5796c8dc
SS
631/* The engine for the expression evaluator. Using the context in CTX,
632 evaluate the expression between OP_PTR and OP_END. */
633
634static void
635execute_stack_op (struct dwarf_expr_context *ctx,
cf7f2e2d 636 const gdb_byte *op_ptr, const gdb_byte *op_end)
5796c8dc
SS
637{
638 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
a45ae5f8
JM
639 /* Old-style "untyped" DWARF values need special treatment in a
640 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
641 a special type for these values so we can distinguish them from
642 values that have an explicit type, because explicitly-typed
643 values do not need special treatment. This special type must be
644 different (in the `==' sense) from any base type coming from the
645 CU. */
646 struct type *address_type = dwarf_expr_address_type (ctx);
cf7f2e2d 647
5796c8dc
SS
648 ctx->location = DWARF_VALUE_MEMORY;
649 ctx->initialized = 1; /* Default is initialized. */
650
651 if (ctx->recursion_depth > ctx->max_recursion_depth)
652 error (_("DWARF-2 expression error: Loop detected (%d)."),
653 ctx->recursion_depth);
654 ctx->recursion_depth++;
655
656 while (op_ptr < op_end)
657 {
658 enum dwarf_location_atom op = *op_ptr++;
cf7f2e2d 659 ULONGEST result;
5796c8dc
SS
660 /* Assume the value is not in stack memory.
661 Code that knows otherwise sets this to 1.
662 Some arithmetic on stack addresses can probably be assumed to still
663 be a stack address, but we skip this complication for now.
664 This is just an optimization, so it's always ok to punt
665 and leave this as 0. */
666 int in_stack_memory = 0;
ef5ccd6c
JM
667 uint64_t uoffset, reg;
668 int64_t offset;
a45ae5f8
JM
669 struct value *result_val = NULL;
670
671 /* The DWARF expression might have a bug causing an infinite
672 loop. In that case, quitting is the only way out. */
673 QUIT;
5796c8dc
SS
674
675 switch (op)
676 {
677 case DW_OP_lit0:
678 case DW_OP_lit1:
679 case DW_OP_lit2:
680 case DW_OP_lit3:
681 case DW_OP_lit4:
682 case DW_OP_lit5:
683 case DW_OP_lit6:
684 case DW_OP_lit7:
685 case DW_OP_lit8:
686 case DW_OP_lit9:
687 case DW_OP_lit10:
688 case DW_OP_lit11:
689 case DW_OP_lit12:
690 case DW_OP_lit13:
691 case DW_OP_lit14:
692 case DW_OP_lit15:
693 case DW_OP_lit16:
694 case DW_OP_lit17:
695 case DW_OP_lit18:
696 case DW_OP_lit19:
697 case DW_OP_lit20:
698 case DW_OP_lit21:
699 case DW_OP_lit22:
700 case DW_OP_lit23:
701 case DW_OP_lit24:
702 case DW_OP_lit25:
703 case DW_OP_lit26:
704 case DW_OP_lit27:
705 case DW_OP_lit28:
706 case DW_OP_lit29:
707 case DW_OP_lit30:
708 case DW_OP_lit31:
709 result = op - DW_OP_lit0;
a45ae5f8 710 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
711 break;
712
713 case DW_OP_addr:
cf7f2e2d
JM
714 result = extract_unsigned_integer (op_ptr,
715 ctx->addr_size, byte_order);
5796c8dc 716 op_ptr += ctx->addr_size;
cf7f2e2d
JM
717 /* Some versions of GCC emit DW_OP_addr before
718 DW_OP_GNU_push_tls_address. In this case the value is an
719 index, not an address. We don't support things like
720 branching between the address and the TLS op. */
721 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
722 result += ctx->offset;
a45ae5f8 723 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
724 break;
725
ef5ccd6c
JM
726 case DW_OP_GNU_addr_index:
727 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
728 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
729 result += ctx->offset;
730 result_val = value_from_ulongest (address_type, result);
731 break;
732 case DW_OP_GNU_const_index:
733 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
734 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
735 result_val = value_from_ulongest (address_type, result);
736 break;
737
5796c8dc
SS
738 case DW_OP_const1u:
739 result = extract_unsigned_integer (op_ptr, 1, byte_order);
a45ae5f8 740 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
741 op_ptr += 1;
742 break;
743 case DW_OP_const1s:
744 result = extract_signed_integer (op_ptr, 1, byte_order);
a45ae5f8 745 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
746 op_ptr += 1;
747 break;
748 case DW_OP_const2u:
749 result = extract_unsigned_integer (op_ptr, 2, byte_order);
a45ae5f8 750 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
751 op_ptr += 2;
752 break;
753 case DW_OP_const2s:
754 result = extract_signed_integer (op_ptr, 2, byte_order);
a45ae5f8 755 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
756 op_ptr += 2;
757 break;
758 case DW_OP_const4u:
759 result = extract_unsigned_integer (op_ptr, 4, byte_order);
a45ae5f8 760 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
761 op_ptr += 4;
762 break;
763 case DW_OP_const4s:
764 result = extract_signed_integer (op_ptr, 4, byte_order);
a45ae5f8 765 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
766 op_ptr += 4;
767 break;
768 case DW_OP_const8u:
769 result = extract_unsigned_integer (op_ptr, 8, byte_order);
a45ae5f8 770 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
771 op_ptr += 8;
772 break;
773 case DW_OP_const8s:
774 result = extract_signed_integer (op_ptr, 8, byte_order);
a45ae5f8 775 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
776 op_ptr += 8;
777 break;
778 case DW_OP_constu:
ef5ccd6c 779 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
5796c8dc 780 result = uoffset;
a45ae5f8 781 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
782 break;
783 case DW_OP_consts:
ef5ccd6c 784 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
5796c8dc 785 result = offset;
a45ae5f8 786 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
787 break;
788
789 /* The DW_OP_reg operations are required to occur alone in
790 location expressions. */
791 case DW_OP_reg0:
792 case DW_OP_reg1:
793 case DW_OP_reg2:
794 case DW_OP_reg3:
795 case DW_OP_reg4:
796 case DW_OP_reg5:
797 case DW_OP_reg6:
798 case DW_OP_reg7:
799 case DW_OP_reg8:
800 case DW_OP_reg9:
801 case DW_OP_reg10:
802 case DW_OP_reg11:
803 case DW_OP_reg12:
804 case DW_OP_reg13:
805 case DW_OP_reg14:
806 case DW_OP_reg15:
807 case DW_OP_reg16:
808 case DW_OP_reg17:
809 case DW_OP_reg18:
810 case DW_OP_reg19:
811 case DW_OP_reg20:
812 case DW_OP_reg21:
813 case DW_OP_reg22:
814 case DW_OP_reg23:
815 case DW_OP_reg24:
816 case DW_OP_reg25:
817 case DW_OP_reg26:
818 case DW_OP_reg27:
819 case DW_OP_reg28:
820 case DW_OP_reg29:
821 case DW_OP_reg30:
822 case DW_OP_reg31:
823 if (op_ptr != op_end
824 && *op_ptr != DW_OP_piece
cf7f2e2d 825 && *op_ptr != DW_OP_bit_piece
5796c8dc
SS
826 && *op_ptr != DW_OP_GNU_uninit)
827 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
a45ae5f8 828 "used either alone or in conjunction with DW_OP_piece "
cf7f2e2d 829 "or DW_OP_bit_piece."));
5796c8dc
SS
830
831 result = op - DW_OP_reg0;
a45ae5f8 832 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
833 ctx->location = DWARF_VALUE_REGISTER;
834 break;
835
836 case DW_OP_regx:
ef5ccd6c 837 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
cf7f2e2d 838 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
5796c8dc
SS
839
840 result = reg;
a45ae5f8 841 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
842 ctx->location = DWARF_VALUE_REGISTER;
843 break;
844
845 case DW_OP_implicit_value:
846 {
ef5ccd6c 847 uint64_t len;
cf7f2e2d 848
ef5ccd6c 849 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
5796c8dc
SS
850 if (op_ptr + len > op_end)
851 error (_("DW_OP_implicit_value: too few bytes available."));
852 ctx->len = len;
853 ctx->data = op_ptr;
854 ctx->location = DWARF_VALUE_LITERAL;
855 op_ptr += len;
cf7f2e2d
JM
856 dwarf_expr_require_composition (op_ptr, op_end,
857 "DW_OP_implicit_value");
5796c8dc
SS
858 }
859 goto no_push;
860
861 case DW_OP_stack_value:
862 ctx->location = DWARF_VALUE_STACK;
cf7f2e2d 863 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
5796c8dc
SS
864 goto no_push;
865
c50c785c
JM
866 case DW_OP_GNU_implicit_pointer:
867 {
ef5ccd6c 868 int64_t len;
c50c785c 869
a45ae5f8
JM
870 if (ctx->ref_addr_size == -1)
871 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
872 "is not allowed in frame context"));
873
ef5ccd6c 874 /* The referred-to DIE of sect_offset kind. */
a45ae5f8 875 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
c50c785c 876 byte_order);
a45ae5f8 877 op_ptr += ctx->ref_addr_size;
c50c785c
JM
878
879 /* The byte offset into the data. */
ef5ccd6c 880 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
c50c785c 881 result = (ULONGEST) len;
a45ae5f8 882 result_val = value_from_ulongest (address_type, result);
c50c785c
JM
883
884 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
885 dwarf_expr_require_composition (op_ptr, op_end,
886 "DW_OP_GNU_implicit_pointer");
887 }
888 break;
889
5796c8dc
SS
890 case DW_OP_breg0:
891 case DW_OP_breg1:
892 case DW_OP_breg2:
893 case DW_OP_breg3:
894 case DW_OP_breg4:
895 case DW_OP_breg5:
896 case DW_OP_breg6:
897 case DW_OP_breg7:
898 case DW_OP_breg8:
899 case DW_OP_breg9:
900 case DW_OP_breg10:
901 case DW_OP_breg11:
902 case DW_OP_breg12:
903 case DW_OP_breg13:
904 case DW_OP_breg14:
905 case DW_OP_breg15:
906 case DW_OP_breg16:
907 case DW_OP_breg17:
908 case DW_OP_breg18:
909 case DW_OP_breg19:
910 case DW_OP_breg20:
911 case DW_OP_breg21:
912 case DW_OP_breg22:
913 case DW_OP_breg23:
914 case DW_OP_breg24:
915 case DW_OP_breg25:
916 case DW_OP_breg26:
917 case DW_OP_breg27:
918 case DW_OP_breg28:
919 case DW_OP_breg29:
920 case DW_OP_breg30:
921 case DW_OP_breg31:
922 {
ef5ccd6c 923 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
a45ae5f8 924 result = (ctx->funcs->read_reg) (ctx->baton, op - DW_OP_breg0);
5796c8dc 925 result += offset;
a45ae5f8 926 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
927 }
928 break;
929 case DW_OP_bregx:
930 {
ef5ccd6c
JM
931 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
932 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
a45ae5f8 933 result = (ctx->funcs->read_reg) (ctx->baton, reg);
5796c8dc 934 result += offset;
a45ae5f8 935 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
936 }
937 break;
938 case DW_OP_fbreg:
939 {
cf7f2e2d 940 const gdb_byte *datastart;
5796c8dc
SS
941 size_t datalen;
942 unsigned int before_stack_len;
943
ef5ccd6c 944 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
5796c8dc
SS
945 /* Rather than create a whole new context, we simply
946 record the stack length before execution, then reset it
947 afterwards, effectively erasing whatever the recursive
948 call put there. */
949 before_stack_len = ctx->stack_len;
950 /* FIXME: cagney/2003-03-26: This code should be using
951 get_frame_base_address(), and then implement a dwarf2
952 specific this_base method. */
a45ae5f8 953 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
5796c8dc 954 dwarf_expr_eval (ctx, datastart, datalen);
cf7f2e2d
JM
955 if (ctx->location == DWARF_VALUE_MEMORY)
956 result = dwarf_expr_fetch_address (ctx, 0);
957 else if (ctx->location == DWARF_VALUE_REGISTER)
a45ae5f8
JM
958 result = (ctx->funcs->read_reg) (ctx->baton,
959 value_as_long (dwarf_expr_fetch (ctx, 0)));
cf7f2e2d 960 else
c50c785c
JM
961 error (_("Not implemented: computing frame "
962 "base using explicit value operator"));
5796c8dc 963 result = result + offset;
a45ae5f8 964 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
965 in_stack_memory = 1;
966 ctx->stack_len = before_stack_len;
967 ctx->location = DWARF_VALUE_MEMORY;
968 }
969 break;
970
971 case DW_OP_dup:
a45ae5f8 972 result_val = dwarf_expr_fetch (ctx, 0);
5796c8dc
SS
973 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
974 break;
975
976 case DW_OP_drop:
977 dwarf_expr_pop (ctx);
978 goto no_push;
979
980 case DW_OP_pick:
981 offset = *op_ptr++;
a45ae5f8 982 result_val = dwarf_expr_fetch (ctx, offset);
5796c8dc
SS
983 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
984 break;
985
986 case DW_OP_swap:
987 {
988 struct dwarf_stack_value t1, t2;
989
990 if (ctx->stack_len < 2)
c50c785c
JM
991 error (_("Not enough elements for "
992 "DW_OP_swap. Need 2, have %d."),
5796c8dc
SS
993 ctx->stack_len);
994 t1 = ctx->stack[ctx->stack_len - 1];
995 t2 = ctx->stack[ctx->stack_len - 2];
996 ctx->stack[ctx->stack_len - 1] = t2;
997 ctx->stack[ctx->stack_len - 2] = t1;
998 goto no_push;
999 }
1000
1001 case DW_OP_over:
a45ae5f8 1002 result_val = dwarf_expr_fetch (ctx, 1);
5796c8dc
SS
1003 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
1004 break;
1005
1006 case DW_OP_rot:
1007 {
1008 struct dwarf_stack_value t1, t2, t3;
1009
1010 if (ctx->stack_len < 3)
c50c785c
JM
1011 error (_("Not enough elements for "
1012 "DW_OP_rot. Need 3, have %d."),
5796c8dc
SS
1013 ctx->stack_len);
1014 t1 = ctx->stack[ctx->stack_len - 1];
1015 t2 = ctx->stack[ctx->stack_len - 2];
1016 t3 = ctx->stack[ctx->stack_len - 3];
1017 ctx->stack[ctx->stack_len - 1] = t2;
1018 ctx->stack[ctx->stack_len - 2] = t3;
1019 ctx->stack[ctx->stack_len - 3] = t1;
1020 goto no_push;
1021 }
1022
1023 case DW_OP_deref:
1024 case DW_OP_deref_size:
a45ae5f8 1025 case DW_OP_GNU_deref_type:
cf7f2e2d
JM
1026 {
1027 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
1028 gdb_byte *buf = alloca (addr_size);
1029 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
a45ae5f8
JM
1030 struct type *type;
1031
cf7f2e2d
JM
1032 dwarf_expr_pop (ctx);
1033
a45ae5f8
JM
1034 if (op == DW_OP_GNU_deref_type)
1035 {
ef5ccd6c 1036 cu_offset type_die;
a45ae5f8 1037
ef5ccd6c
JM
1038 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1039 type_die.cu_off = uoffset;
a45ae5f8
JM
1040 type = dwarf_get_base_type (ctx, type_die, 0);
1041 }
1042 else
1043 type = address_type;
1044
1045 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
1046
1047 /* If the size of the object read from memory is different
1048 from the type length, we need to zero-extend it. */
1049 if (TYPE_LENGTH (type) != addr_size)
1050 {
1051 ULONGEST result =
1052 extract_unsigned_integer (buf, addr_size, byte_order);
1053
1054 buf = alloca (TYPE_LENGTH (type));
1055 store_unsigned_integer (buf, TYPE_LENGTH (type),
1056 byte_order, result);
1057 }
1058
1059 result_val = value_from_contents_and_address (type, buf, addr);
cf7f2e2d
JM
1060 break;
1061 }
1062
5796c8dc
SS
1063 case DW_OP_abs:
1064 case DW_OP_neg:
1065 case DW_OP_not:
1066 case DW_OP_plus_uconst:
a45ae5f8
JM
1067 {
1068 /* Unary operations. */
1069 result_val = dwarf_expr_fetch (ctx, 0);
1070 dwarf_expr_pop (ctx);
5796c8dc 1071
a45ae5f8
JM
1072 switch (op)
1073 {
1074 case DW_OP_abs:
1075 if (value_less (result_val,
1076 value_zero (value_type (result_val), not_lval)))
1077 result_val = value_neg (result_val);
1078 break;
1079 case DW_OP_neg:
1080 result_val = value_neg (result_val);
1081 break;
1082 case DW_OP_not:
1083 dwarf_require_integral (value_type (result_val));
1084 result_val = value_complement (result_val);
1085 break;
1086 case DW_OP_plus_uconst:
1087 dwarf_require_integral (value_type (result_val));
1088 result = value_as_long (result_val);
ef5ccd6c 1089 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
a45ae5f8
JM
1090 result += reg;
1091 result_val = value_from_ulongest (address_type, result);
1092 break;
1093 }
1094 }
5796c8dc
SS
1095 break;
1096
1097 case DW_OP_and:
1098 case DW_OP_div:
1099 case DW_OP_minus:
1100 case DW_OP_mod:
1101 case DW_OP_mul:
1102 case DW_OP_or:
1103 case DW_OP_plus:
1104 case DW_OP_shl:
1105 case DW_OP_shr:
1106 case DW_OP_shra:
1107 case DW_OP_xor:
1108 case DW_OP_le:
1109 case DW_OP_ge:
1110 case DW_OP_eq:
1111 case DW_OP_lt:
1112 case DW_OP_gt:
1113 case DW_OP_ne:
1114 {
cf7f2e2d 1115 /* Binary operations. */
a45ae5f8 1116 struct value *first, *second;
5796c8dc
SS
1117
1118 second = dwarf_expr_fetch (ctx, 0);
1119 dwarf_expr_pop (ctx);
1120
1121 first = dwarf_expr_fetch (ctx, 0);
1122 dwarf_expr_pop (ctx);
1123
a45ae5f8
JM
1124 if (! base_types_equal_p (value_type (first), value_type (second)))
1125 error (_("Incompatible types on DWARF stack"));
1126
5796c8dc
SS
1127 switch (op)
1128 {
1129 case DW_OP_and:
a45ae5f8
JM
1130 dwarf_require_integral (value_type (first));
1131 dwarf_require_integral (value_type (second));
1132 result_val = value_binop (first, second, BINOP_BITWISE_AND);
5796c8dc
SS
1133 break;
1134 case DW_OP_div:
a45ae5f8 1135 result_val = value_binop (first, second, BINOP_DIV);
5796c8dc
SS
1136 break;
1137 case DW_OP_minus:
a45ae5f8 1138 result_val = value_binop (first, second, BINOP_SUB);
5796c8dc
SS
1139 break;
1140 case DW_OP_mod:
a45ae5f8
JM
1141 {
1142 int cast_back = 0;
1143 struct type *orig_type = value_type (first);
1144
1145 /* We have to special-case "old-style" untyped values
1146 -- these must have mod computed using unsigned
1147 math. */
1148 if (orig_type == address_type)
1149 {
1150 struct type *utype
1151 = get_unsigned_type (ctx->gdbarch, orig_type);
1152
1153 cast_back = 1;
1154 first = value_cast (utype, first);
1155 second = value_cast (utype, second);
1156 }
1157 /* Note that value_binop doesn't handle float or
1158 decimal float here. This seems unimportant. */
1159 result_val = value_binop (first, second, BINOP_MOD);
1160 if (cast_back)
1161 result_val = value_cast (orig_type, result_val);
1162 }
5796c8dc
SS
1163 break;
1164 case DW_OP_mul:
a45ae5f8 1165 result_val = value_binop (first, second, BINOP_MUL);
5796c8dc
SS
1166 break;
1167 case DW_OP_or:
a45ae5f8
JM
1168 dwarf_require_integral (value_type (first));
1169 dwarf_require_integral (value_type (second));
1170 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
5796c8dc
SS
1171 break;
1172 case DW_OP_plus:
a45ae5f8 1173 result_val = value_binop (first, second, BINOP_ADD);
5796c8dc
SS
1174 break;
1175 case DW_OP_shl:
a45ae5f8
JM
1176 dwarf_require_integral (value_type (first));
1177 dwarf_require_integral (value_type (second));
1178 result_val = value_binop (first, second, BINOP_LSH);
5796c8dc
SS
1179 break;
1180 case DW_OP_shr:
a45ae5f8
JM
1181 dwarf_require_integral (value_type (first));
1182 dwarf_require_integral (value_type (second));
1183 if (!TYPE_UNSIGNED (value_type (first)))
1184 {
1185 struct type *utype
1186 = get_unsigned_type (ctx->gdbarch, value_type (first));
1187
1188 first = value_cast (utype, first);
1189 }
1190
1191 result_val = value_binop (first, second, BINOP_RSH);
1192 /* Make sure we wind up with the same type we started
1193 with. */
1194 if (value_type (result_val) != value_type (second))
1195 result_val = value_cast (value_type (second), result_val);
5796c8dc
SS
1196 break;
1197 case DW_OP_shra:
a45ae5f8
JM
1198 dwarf_require_integral (value_type (first));
1199 dwarf_require_integral (value_type (second));
1200 if (TYPE_UNSIGNED (value_type (first)))
1201 {
1202 struct type *stype
1203 = get_signed_type (ctx->gdbarch, value_type (first));
1204
1205 first = value_cast (stype, first);
1206 }
1207
1208 result_val = value_binop (first, second, BINOP_RSH);
1209 /* Make sure we wind up with the same type we started
1210 with. */
1211 if (value_type (result_val) != value_type (second))
1212 result_val = value_cast (value_type (second), result_val);
5796c8dc
SS
1213 break;
1214 case DW_OP_xor:
a45ae5f8
JM
1215 dwarf_require_integral (value_type (first));
1216 dwarf_require_integral (value_type (second));
1217 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
5796c8dc
SS
1218 break;
1219 case DW_OP_le:
a45ae5f8
JM
1220 /* A <= B is !(B < A). */
1221 result = ! value_less (second, first);
1222 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1223 break;
1224 case DW_OP_ge:
a45ae5f8
JM
1225 /* A >= B is !(A < B). */
1226 result = ! value_less (first, second);
1227 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1228 break;
1229 case DW_OP_eq:
a45ae5f8
JM
1230 result = value_equal (first, second);
1231 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1232 break;
1233 case DW_OP_lt:
a45ae5f8
JM
1234 result = value_less (first, second);
1235 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1236 break;
1237 case DW_OP_gt:
a45ae5f8
JM
1238 /* A > B is B < A. */
1239 result = value_less (second, first);
1240 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1241 break;
1242 case DW_OP_ne:
a45ae5f8
JM
1243 result = ! value_equal (first, second);
1244 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1245 break;
1246 default:
1247 internal_error (__FILE__, __LINE__,
1248 _("Can't be reached."));
1249 }
5796c8dc
SS
1250 }
1251 break;
1252
1253 case DW_OP_call_frame_cfa:
a45ae5f8
JM
1254 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1255 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1256 in_stack_memory = 1;
1257 break;
1258
1259 case DW_OP_GNU_push_tls_address:
1260 /* Variable is at a constant offset in the thread-local
1261 storage block into the objfile for the current thread and
c50c785c 1262 the dynamic linker module containing this expression. Here
5796c8dc
SS
1263 we return returns the offset from that base. The top of the
1264 stack has the offset from the beginning of the thread
1265 control block at which the variable is located. Nothing
1266 should follow this operator, so the top of stack would be
1267 returned. */
a45ae5f8 1268 result = value_as_long (dwarf_expr_fetch (ctx, 0));
5796c8dc 1269 dwarf_expr_pop (ctx);
a45ae5f8
JM
1270 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1271 result_val = value_from_ulongest (address_type, result);
5796c8dc
SS
1272 break;
1273
1274 case DW_OP_skip:
1275 offset = extract_signed_integer (op_ptr, 2, byte_order);
1276 op_ptr += 2;
1277 op_ptr += offset;
1278 goto no_push;
1279
1280 case DW_OP_bra:
a45ae5f8
JM
1281 {
1282 struct value *val;
1283
1284 offset = extract_signed_integer (op_ptr, 2, byte_order);
1285 op_ptr += 2;
1286 val = dwarf_expr_fetch (ctx, 0);
1287 dwarf_require_integral (value_type (val));
1288 if (value_as_long (val) != 0)
1289 op_ptr += offset;
1290 dwarf_expr_pop (ctx);
1291 }
5796c8dc
SS
1292 goto no_push;
1293
1294 case DW_OP_nop:
1295 goto no_push;
1296
1297 case DW_OP_piece:
1298 {
ef5ccd6c 1299 uint64_t size;
5796c8dc
SS
1300
1301 /* Record the piece. */
ef5ccd6c 1302 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
cf7f2e2d 1303 add_piece (ctx, 8 * size, 0);
5796c8dc
SS
1304
1305 /* Pop off the address/regnum, and reset the location
1306 type. */
cf7f2e2d
JM
1307 if (ctx->location != DWARF_VALUE_LITERAL
1308 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
5796c8dc
SS
1309 dwarf_expr_pop (ctx);
1310 ctx->location = DWARF_VALUE_MEMORY;
1311 }
1312 goto no_push;
1313
cf7f2e2d
JM
1314 case DW_OP_bit_piece:
1315 {
ef5ccd6c 1316 uint64_t size, offset;
cf7f2e2d
JM
1317
1318 /* Record the piece. */
ef5ccd6c
JM
1319 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1320 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset);
cf7f2e2d
JM
1321 add_piece (ctx, size, offset);
1322
1323 /* Pop off the address/regnum, and reset the location
1324 type. */
1325 if (ctx->location != DWARF_VALUE_LITERAL
1326 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1327 dwarf_expr_pop (ctx);
1328 ctx->location = DWARF_VALUE_MEMORY;
1329 }
1330 goto no_push;
1331
5796c8dc
SS
1332 case DW_OP_GNU_uninit:
1333 if (op_ptr != op_end)
1334 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1335 "be the very last op."));
1336
1337 ctx->initialized = 0;
1338 goto no_push;
1339
cf7f2e2d 1340 case DW_OP_call2:
ef5ccd6c
JM
1341 {
1342 cu_offset offset;
1343
1344 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order);
1345 op_ptr += 2;
1346 ctx->funcs->dwarf_call (ctx, offset);
1347 }
cf7f2e2d
JM
1348 goto no_push;
1349
1350 case DW_OP_call4:
ef5ccd6c
JM
1351 {
1352 cu_offset offset;
1353
1354 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order);
1355 op_ptr += 4;
1356 ctx->funcs->dwarf_call (ctx, offset);
1357 }
cf7f2e2d 1358 goto no_push;
c50c785c
JM
1359
1360 case DW_OP_GNU_entry_value:
a45ae5f8 1361 {
ef5ccd6c 1362 uint64_t len;
a45ae5f8 1363 CORE_ADDR deref_size;
ef5ccd6c 1364 union call_site_parameter_u kind_u;
a45ae5f8 1365
ef5ccd6c 1366 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
a45ae5f8
JM
1367 if (op_ptr + len > op_end)
1368 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1369
ef5ccd6c
JM
1370 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1371 if (kind_u.dwarf_reg != -1)
a45ae5f8
JM
1372 {
1373 op_ptr += len;
ef5ccd6c
JM
1374 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1375 CALL_SITE_PARAMETER_DWARF_REG,
1376 kind_u,
a45ae5f8
JM
1377 -1 /* deref_size */);
1378 goto no_push;
1379 }
1380
ef5ccd6c
JM
1381 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1382 op_ptr + len,
1383 &deref_size);
1384 if (kind_u.dwarf_reg != -1)
a45ae5f8
JM
1385 {
1386 if (deref_size == -1)
1387 deref_size = ctx->addr_size;
1388 op_ptr += len;
ef5ccd6c
JM
1389 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1390 CALL_SITE_PARAMETER_DWARF_REG,
1391 kind_u, deref_size);
a45ae5f8
JM
1392 goto no_push;
1393 }
1394
1395 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1396 "supported only for single DW_OP_reg* "
1397 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1398 }
1399
ef5ccd6c
JM
1400 case DW_OP_GNU_parameter_ref:
1401 {
1402 union call_site_parameter_u kind_u;
1403
1404 kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4,
1405 byte_order);
1406 op_ptr += 4;
1407 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1408 CALL_SITE_PARAMETER_PARAM_OFFSET,
1409 kind_u,
1410 -1 /* deref_size */);
1411 }
1412 goto no_push;
1413
a45ae5f8
JM
1414 case DW_OP_GNU_const_type:
1415 {
ef5ccd6c 1416 cu_offset type_die;
a45ae5f8
JM
1417 int n;
1418 const gdb_byte *data;
1419 struct type *type;
1420
ef5ccd6c
JM
1421 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1422 type_die.cu_off = uoffset;
a45ae5f8
JM
1423 n = *op_ptr++;
1424 data = op_ptr;
1425 op_ptr += n;
1426
1427 type = dwarf_get_base_type (ctx, type_die, n);
1428 result_val = value_from_contents (type, data);
1429 }
1430 break;
1431
1432 case DW_OP_GNU_regval_type:
1433 {
ef5ccd6c 1434 cu_offset type_die;
a45ae5f8
JM
1435 struct type *type;
1436
ef5ccd6c
JM
1437 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1438 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1439 type_die.cu_off = uoffset;
a45ae5f8
JM
1440
1441 type = dwarf_get_base_type (ctx, type_die, 0);
1442 result = (ctx->funcs->read_reg) (ctx->baton, reg);
1443 result_val = value_from_ulongest (address_type, result);
1444 result_val = value_from_contents (type,
1445 value_contents_all (result_val));
1446 }
1447 break;
1448
1449 case DW_OP_GNU_convert:
1450 case DW_OP_GNU_reinterpret:
1451 {
ef5ccd6c 1452 cu_offset type_die;
a45ae5f8
JM
1453 struct type *type;
1454
ef5ccd6c
JM
1455 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1456 type_die.cu_off = uoffset;
a45ae5f8 1457
ef5ccd6c 1458 if (type_die.cu_off == 0)
a45ae5f8
JM
1459 type = address_type;
1460 else
1461 type = dwarf_get_base_type (ctx, type_die, 0);
1462
1463 result_val = dwarf_expr_fetch (ctx, 0);
1464 dwarf_expr_pop (ctx);
1465
1466 if (op == DW_OP_GNU_convert)
1467 result_val = value_cast (type, result_val);
1468 else if (type == value_type (result_val))
1469 {
1470 /* Nothing. */
1471 }
1472 else if (TYPE_LENGTH (type)
1473 != TYPE_LENGTH (value_type (result_val)))
1474 error (_("DW_OP_GNU_reinterpret has wrong size"));
1475 else
1476 result_val
1477 = value_from_contents (type,
1478 value_contents_all (result_val));
1479 }
1480 break;
cf7f2e2d 1481
5796c8dc
SS
1482 default:
1483 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1484 }
1485
1486 /* Most things push a result value. */
a45ae5f8
JM
1487 gdb_assert (result_val != NULL);
1488 dwarf_expr_push (ctx, result_val, in_stack_memory);
c50c785c
JM
1489 no_push:
1490 ;
5796c8dc
SS
1491 }
1492
c50c785c
JM
1493 /* To simplify our main caller, if the result is an implicit
1494 pointer, then make a pieced value. This is ok because we can't
1495 have implicit pointers in contexts where pieces are invalid. */
1496 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1497 add_piece (ctx, 8 * ctx->addr_size, 0);
1498
1499abort_expression:
5796c8dc
SS
1500 ctx->recursion_depth--;
1501 gdb_assert (ctx->recursion_depth >= 0);
a45ae5f8
JM
1502}
1503
1504/* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1505
1506void
1507ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1508{
1509 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1510}
1511
1512/* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1513
1514CORE_ADDR
1515ctx_no_get_frame_cfa (void *baton)
1516{
1517 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1518}
1519
1520/* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1521
1522CORE_ADDR
1523ctx_no_get_frame_pc (void *baton)
1524{
1525 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1526}
1527
1528/* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1529
1530CORE_ADDR
1531ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1532{
1533 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1534}
1535
1536/* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1537
1538void
ef5ccd6c 1539ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset)
a45ae5f8
JM
1540{
1541 error (_("%s is invalid in this context"), "DW_OP_call*");
1542}
1543
1544/* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1545
1546struct type *
ef5ccd6c 1547ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die)
a45ae5f8
JM
1548{
1549 error (_("Support for typed DWARF is not supported in this context"));
1550}
1551
1552/* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1553 implementation. */
1554
1555void
1556ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
ef5ccd6c
JM
1557 enum call_site_parameter_kind kind,
1558 union call_site_parameter_u kind_u,
a45ae5f8
JM
1559 int deref_size)
1560{
1561 internal_error (__FILE__, __LINE__,
1562 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1563}
1564
ef5ccd6c
JM
1565/* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1566
1567CORE_ADDR
1568ctx_no_get_addr_index (void *baton, unsigned int index)
1569{
1570 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1571}
1572
1573/* Provide a prototype to silence -Wmissing-prototypes. */
1574extern initialize_file_ftype _initialize_dwarf2expr;
1575
a45ae5f8
JM
1576void
1577_initialize_dwarf2expr (void)
1578{
1579 dwarf_arch_cookie
1580 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
5796c8dc 1581}