/* * RESOLVE.C - Resolve the parser tree and prepare for code generation or * interpretation. * * (c)Copyright 1993-2016, Matthew Dillon, All Rights Reserved. See the * COPYRIGHT file at the base of the distribution. * * Pass1 - ResolveClasses() - Handles superclass/subclass merging for the * entire import set. * * Pass2 - Resolve*() - Resolves identifiers and identifier paths, plus * the size and alignment for Types, Decls, and SemGroups. * * Utilizes a deferred work mechanic to avoid circular loops. This mechanism * allows types to be partially resolved (enough to satisfy the caller), then * finishes up via the deferred work queue. */ #include "defs.h" #include struct ResVis; static void ResolveClasses(Stmt *st, int flags); static void ResolveAlignment(Stmt *st, int flags); static void ResolveStorage(Stmt *st, int flags); static void ResolveSemGroup(SemGroup *sg, int retry); static void errorDottedId(runeid_t *ary, const char *ctl,...); static void ResolveStmt(SemGroup *isg, Stmt *st, int flags); static Type *ResolveType(Type *type, struct ResVis *vis, int retry); static void ResolveDecl(Declaration *d, int retry); static Exp *ResolveExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags); static Type *resolveReturnType(SemGroup *sg, int flags); static Type *resolveArgsType(SemGroup *sg, int flags); static Exp *resolveConstExp(SemGroup *isg, SemGroup *sg, Exp *exp, int flags); static Exp *resolveConstExpBool(SemGroup *isg, SemGroup *sg, Exp *exp, int flags, TmpData *ts); static Exp *resolveCompoundExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags); static Exp *resolveBracketedExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags); static Exp *resolveExpCast(SemGroup *isg, SemGroup *sg, Exp *exp, Type *ltype, int flags); static Exp *resolveExpOper(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags); static void resolveUnresClass(Type *super); static void resolveDeclAlign(Declaration *d, urunesize_t *expalignp, int flags); static void resolveExpAlign(Exp *exp, urunesize_t *expalignp, int flags); static void resolveTypeAlign(Type *type, urunesize_t *expalignp, int flags); static void resolveSemGroupAlign(SemGroup *sg, int flags); static void resolveDeclStorage(Declaration *d, int flags, urunesize_t base, urunesize_t *limitp, urunesize_t gbase, urunesize_t *glimitp); static void resolveExpOnlyStorage(Exp *exp, int flags, urunesize_t base, urunesize_t *limitp); static void resolveExpSubStorage(Exp *exp, int flags, urunesize_t base, urunesize_t *limitp); static void resolveExpStorage(Exp *exp, int flags, urunesize_t base, urunesize_t *limitp); static Declaration *findOper(Type *btype, runeid_t id, Type *ltype, Type *rtype, int flags); static Declaration *findExpOper(Exp *exp, int flags); static Declaration *findCast(Type *btype, Type *ltype, Type *rtype, int flags); static void resolveTypeStorage(Type *type, int flags, urunesize_t base, urunesize_t *limitp); static void resolveSemGroupStorage(SemGroup *sg, int flags, urunesize_t base, urunesize_t *limitp, urunesize_t gbase, urunesize_t *glimitp); static void resolveProcedureInline(SemGroup *isg, SemGroup *sg, Exp *exp, int flags); static void resolveDynamicProcedure(SemGroup *isg, SemGroup *sg, Exp *exp, int flags); static void resolveDynamicProcedureAlign(Exp *exp, urunesize_t *expalignp, int flags); static void resolveDynamicProcedureStorage(Exp *exp, int flags, urunesize_t base, urunesize_t *limitp, urunesize_t gbase, urunesize_t *glimitp); static int SpecialSemGroupGet(runeid_t id); static void ResolveMethodProcedureThisArg(SemGroup *subsg, Declaration *d); static void checkUnrestrictedType(Declaration *d, Type *type); /* * Adjust type to be lvalue but do not modify its relative context for * evaluation. */ #define ADD_LVALUE(type) \ ResolveType(AddTypeQual((type), SF_LVALUE), NULL, 0) #define DEL_LVALUE(type) \ ResolveType(DelTypeQual((type), SF_LVALUE), NULL, 0) #define DEL_LVALUE_CONST(type) \ ResolveType(DelTypeQual((type), SF_LVALUE | SF_CONST), NULL, 0) #define RESOLVE_AUTOCAST 0x0001 /* autocast to expected type */ #define RESOLVE_UNUSED0002 0x0002 #define RESOLVE_CLEAN 0x0004 /* cleanup after const interp */ #define RESOLVE_FAILOK 0x0008 /* cleanup after const interp */ #define RESOLVE_ISGLOB 0x0010 #define RESOLVE_FINALIZE 0x0020 #define BASEALIGN(base, alignmask) \ (((base) + alignmask) & ~(urunesize_t)(alignmask)) #define SIZELIMIT(base, bytes, limitp) \ if ((base) + (bytes) > *(limitp)) \ *(limitp) = ((base) + (bytes)) #define ResolveTypeSimple(type) ResolveType((type), NULL, 0) /* * Deferred work queue */ typedef Type * type_p; typedef Exp * exp_p; typedef struct ResVis { struct ResVis *next; int *visp; } resvis_t; typedef struct ResDefer { struct ResDefer *next; enum { RES_STMT, RES_DECL, RES_TYPE, RES_EXP, RES_SEMGROUP } which; union { struct { SemGroup *isg; Stmt *st; int flags; } stmt; struct { Declaration *d; } decl; struct { Type *type; } type; struct { SemGroup *isg; SemGroup *sg; Exp *exp; Type *itype; int flags; } exp; struct { SemGroup *sg; int flags; } sg; }; } resdelay_t; static resdelay_t *ResDeferBase; static resdelay_t **ResDeferTail = &ResDeferBase; static int ResPass; int RuneInlineComplexity = 20; /* * Do a pass on all deferred work. Returns non-zero if there is more * deferred work after the pass is complete. */ static int runDeferredWork(void) { resdelay_t *res; resdelay_t **last = ResDeferTail; Type *type; Exp *exp; while ((res = ResDeferBase) != NULL) { if ((ResDeferBase = res->next) == NULL) ResDeferTail = &ResDeferBase; switch (res->which) { case RES_STMT: ResolveStmt(res->stmt.isg, res->stmt.st, res->stmt.flags); break; case RES_DECL: ResolveDecl(res->decl.d, 1); break; case RES_TYPE: type = ResolveType(res->type.type, NULL, 1); dassert(type == res->type.type); break; case RES_EXP: exp = ResolveExp(res->exp.isg, res->exp.sg, res->exp.exp, res->exp.itype, res->exp.flags); dassert(exp == res->exp.exp); break; case RES_SEMGROUP: ResolveSemGroup(res->sg.sg, 1); break; default: dassert(0); break; } zfree(res, sizeof(*res)); if (&res->next == last) /* storage freed, ok to test ptr */ break; } return (ResDeferBase != NULL); } __unused static void deferStmt(SemGroup *isg, Stmt *st, int flags) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_STMT; res->stmt.isg = isg; res->stmt.st = st; res->stmt.flags = flags; *ResDeferTail = res; ResDeferTail = &res->next; } __unused static void deferDecl(Declaration *d) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_DECL; res->decl.d = d; *ResDeferTail = res; ResDeferTail = &res->next; } __unused static void deferExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_EXP; res->exp.isg = isg; res->exp.sg = sg; res->exp.exp = exp; res->exp.itype = itype; res->exp.flags = flags; *ResDeferTail = res; ResDeferTail = &res->next; } /* * Note that visibility is set immediately by the call chain, NOT in any * deferral. */ static void deferType(Type *type) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_TYPE; res->type.type = type; *ResDeferTail = res; ResDeferTail = &res->next; } __unused static void deferSG(SemGroup *sg) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_SEMGROUP; res->sg.sg = sg; *ResDeferTail = res; ResDeferTail = &res->next; } void ResolveProject(Parse *p, Stmt *st) { Declaration *d; Stmt *main_st; runeid_t id; int i; int eno; dassert_stmt(st, st->st_Op == ST_Import); /* * Interpreter or Generator may reference our global internal types * directly, so make sure they are all resolved. */ ResolveClasses(st, 0); for (i = 0; BaseTypeAry[i]; ++i) ResolveType(BaseTypeAry[i], NULL, 0); id = RUNEID_MAIN; main_st = RUNE_FIRST(&st->st_List); d = FindDeclId(main_st->st_MyGroup, id, &eno); if (d == NULL) { fprintf(stderr, "Top-level module missing main()\n"); exit(1); } dasserts_stmt(main_st, d->d_Op == DOP_PROC, "main() is not a procedure!\n"); /* * Resolve all dependencies on main (basically everything used by the * project is resolved). */ ResPass = 0; ResolveDecl(d, 0); main_st = d->d_ProcDecl.ed_ProcBody; runDeferredWork(); runDeferredWork(); runDeferredWork(); ResPass = 1; while (runDeferredWork()) ; /* * Resolve all types registered by DLLs */ { TypeRegNode *tr; RUNE_FOREACH(tr, &TypeRegList, tr_Node) ResolveType(tr->tr_Type, NULL, 0); } /* * This runs through and resolves the alignment and storage for * everything that has been primarily resolved above. */ ResolveAlignment(st, 0); ResolveAlignment(st, RESOLVE_FINALIZE); resolveDeclAlign(d, &d->d_MyGroup->sg_TmpAlignMask, 0); resolveDeclAlign(d, &d->d_MyGroup->sg_TmpAlignMask, RESOLVE_FINALIZE); ResolveStorage(st, 0); ResolveStorage(st, RESOLVE_FINALIZE); { SemGroup *sg; urunesize_t base; urunesize_t gbase; urunesize_t limit; urunesize_t glimit; sg = d->d_ImportSemGroup; base = sg->sg_TmpBytes; gbase = sg->sg_GlobalTmpBytes; limit = base; glimit = gbase; for (i = 0; BaseTypeAry[i]; ++i) { Type *type = BaseTypeAry[i]; urunesize_t expalign = 0; resolveTypeAlign(type, &expalign, 0); resolveTypeAlign(type, &expalign, RESOLVE_FINALIZE); resolveTypeStorage(type, 0, 0, &glimit); resolveTypeStorage(type, RESOLVE_FINALIZE, 0, &glimit); #if 0 if (type->ty_Op == TY_CLASS) { resolveSemGroupStorage(type->ty_ClassType.et_SemGroup, 0, 0, NULL, 0, NULL); resolveSemGroupStorage(type->ty_ClassType.et_SemGroup, RESOLVE_FINALIZE, 0, NULL, 0, NULL); resolveSemGroupAlign(type->ty_ClassType.et_SemGroup, 0); resolveSemGroupAlign(type->ty_ClassType.et_SemGroup, RESOLVE_FINALIZE); } #endif } resolveDeclStorage(d, 0, base, &limit, gbase, &glimit); resolveDeclStorage(d, RESOLVE_FINALIZE, base, &limit, gbase, &glimit); } p->p_Format = PFMT_RESOLVED; CollapseProject(st); } /* * ResolveClasses() - Resolve superclasses and do class merge * * This code implements the most complex feature of the language: subclassing * and refinement. * * The hardest thing we have to do is 'dup' declarations and code in order to * implement subclassing and refinement. For example, a procedure defined in * Integer must be dup'd for each subclass of Integer. We have to do this * because storage requirements will change due to both subclassing and * refinement. Even auto variables may wind up with different types between * superclass and subclass. * * We must scan ST_Import and ST_Class statements. */ static void ResolveClasses(Stmt *st, int flags) { SemGroup *sg = st->st_MyGroup; /* * Resolver interlock. Assert that we aren't looping. A loop can occur * if class A embeds class B and class B embeds class A (verses a pointer * to A). */ dassert_stmt(st, (st->st_Flags & STF_RESOLVING) == 0); if (st->st_Flags & STF_RESOLVED) return; st->st_Flags |= STF_RESOLVING; /* * If this is a subclass, integrate the superclass into it */ if (st->st_Op == ST_Class && st->st_ClassStmt.es_Super) { Type *super = st->st_ClassStmt.es_Super; Stmt *sst; Declaration *d; Declaration *rd; SemGroup *tsg; int has_t; /* * Locate the superclass. 'super' does not appear in any other * list.. this is a unique Type structure. */ dassert_stmt(st, super->ty_Op == TY_UNRESOLVED); do { resolveUnresClass(super); } while (super->ty_Op == TY_UNRESOLVED); dassert_stmt(st, super->ty_Op == TY_CLASS); /* * Cleanup (XXX free qualified segments??) */ st->st_ClassStmt.es_Super = super; st->st_ClassStmt.es_Decl->d_ClassDecl.ed_Super = super; /* * Inherit internal unsigned integer and floating point flags and a * few others. */ sg->sg_Flags |= super->ty_ClassType.et_SemGroup->sg_Flags & (SGF_ISINTEGER | SGF_ISUNSIGNED | SGF_ISFLOATING | SGF_ISBOOL | SGF_HASASS | SGF_GHASASS | SGF_HASLVREF | SGF_GHASLVREF | SGF_HASPTR | SGF_GHASPTR | SGF_ABICALL); /* * The subclass's unrestricted scope (or not), must match the * super-class. Otherwise methods pulled-down from the superclass * might not be compatible with the subclass. */ if ((sg->sg_ClassType->ty_SQFlags ^ super->ty_ClassType.et_SemGroup->sg_ClassType->ty_SQFlags) & SF_STRUCT) { dwarn_stmt(sg->sg_Stmt, 0, "subclass %08x", sg->sg_ClassType->ty_SQFlags); dfatal_stmt(super->ty_ClassType.et_SemGroup->sg_Stmt, TOK_ERR_CLASS_STRUCT_COMPAT, "super %08x/%08x", super->ty_SQFlags, super->ty_ClassType.et_SemGroup-> sg_ClassType->ty_SQFlags); } /* * Locate the class statement associated with the superclass and * resolve it. */ sst = super->ty_ClassType.et_SemGroup->sg_Stmt; dassert(sst->st_MyGroup == super->ty_ClassType.et_SemGroup); dassert_stmt(st, sst != NULL); dassert_stmt(st, sst->st_Op == ST_Class); ResolveClasses(sst, flags); /* * Assign the sg_Level for the subclass. This is used for semantic * searches when a subclass is passed to a procedure expecting the * superclass. */ sg->sg_Level = sst->st_MyGroup->sg_Level + 1; /* * XXX Subclasses can inherit locking scope here. Currently we do * not. */ #if 0 if (sst->u.ClassStmt.es_Decl->d_ScopeFlags & SCOPE_HARD) { st->u.ClassStmt.es_Decl->d_ScopeFlags |= SCOPE_HARD; } else if (st->u.ClassStmt.es_Decl->d_ScopeFlags & SCOPE_HARD) { dfatal_stmt(st, TOK_ERR_ILLEGAL_LOCKING_REFINEMENT, NULL); } #endif /* * First move all the declarations from sg to tsg so we can merge the * superclass properly (keep all the d_Index's correct). Note that * tsg is not 100% integrated so we can only use it for search * purposes. We absolutely CANNOT DupDeclaration() into tsg! */ tsg = AllocSemGroup(SG_CLASS, sg->sg_Parse, NULL, sg->sg_Stmt); has_t = 0; while ((d = RUNE_FIRST(&sg->sg_DeclList)) != NULL) { if (d->d_Id == RUNEID__T) has_t = 1; RenameDecl(d, tsg); } /* * If our sub-class does not have a _t type, then automatically * add it in. * * Add to sg then rename so the declaration is properly initialized * for sg (e.g. fields like d_Level). */ if (has_t == 0) { Scope scope = INIT_SCOPE(SCOPE_REFINE); d = AllocDeclaration(sg, DOP_TYPEDEF, &scope); d->d_TypedefDecl.ed_Type = sg->sg_ClassType; /* AllocClassType(&sg->sg_ClassList, super, sg->sg_Stmt->st_MyGroup, SCOPE_ALL_VISIBLE); */ HashDecl(d, RUNEID__T); RenameDecl(d, tsg); } /* * Reset count (index counter) */ sg->sg_DeclCount = 0; /* * Merge the superclass into this class, in sequence. Iterate through * declarations in the superclass and pull them into the subclass. * Figure out compatibility between super and subclasses. * * d - iterates the superclass nd - subclass declaration refining * the superclass decl */ RUNE_FOREACH(d, &sst->st_MyGroup->sg_DeclList, d_Node) { Declaration *nd; int eno = 0; dassert(d->d_Level != NULL && d->d_Level->sg_Level < sg->sg_Level); /* * See if the superclass decl conflicts with a subclass decl. If * there is no conflict pull it into the subclass and adjust the * visibility. Note that the superclass may have duplicate ids, * but they will be at different levels if so. * * The super linkage is required when findDecl() checks * visibility of a declaration hidden relative to our subclass, * but not necessarily hidden relative to the superclass. * * Set d_Search to the (ultimate) superclass when inheriting * elements from the superclass. d_Search is not set for * refinements or extensions. * * XXX overloading */ rd = FindDeclRefineId(tsg, d->d_Id, &eno); if (rd == NULL) { /* XXX proliferates decls/types? */ nd = DupDeclaration(sg, d); dassert(d->d_Index == nd->d_Index); nd->d_ScopeFlags &= ~SCOPE_ALL_VISIBLE | super->ty_Visibility; nd->d_ScopeFlags &= ~SCOPE_REFINE; if (nd->d_Search == NULL) nd->d_Search = sst->st_MyGroup; /* * Superclass decl is brought in unrefined (though it might * be an implied refinement depending on side-effects). */ nd->d_SubNext = d->d_SubBase; d->d_SubBase = nd; continue; } /* * If there is a conflict and we are not refining the superclass * entity, then pull in the superclass entity and make it * invisible to sg_Level searches. This could bring in multiple * levels of the same id. * * Note that this may result in multiple ids, but they will be at * different levels. In this case rd will be at the current * level and nd will be at some prior level. * * Order is important here. */ if ((rd->d_ScopeFlags & SCOPE_REFINE) == 0) { /* XXX proliferates decls/types? */ nd = DupDeclaration(sg, d); dassert(d->d_Index == nd->d_Index); nd->d_ScopeFlags &= ~(SCOPE_ALL_VISIBLE | SCOPE_REFINE); if (nd->d_Search == NULL) nd->d_Search = sst->st_MyGroup; /* * Superclass decl is brought in unrefined (though it might * be an implied refinement depending on side-effects). */ nd->d_SubNext = d->d_SubBase; d->d_SubBase = nd; continue; } /* * Ok, we need to refine. But the superclass may contain * multiple levels of the same id. We only refine the one that * is visible to us. None of these other declarations will be at * the same level. */ if ((d->d_ScopeFlags & SCOPE_ALL_VISIBLE) == 0) { nd = DupDeclaration(sg, d); dassert(d->d_Index == nd->d_Index); nd->d_ScopeFlags &= ~(SCOPE_ALL_VISIBLE | SCOPE_REFINE); if (nd->d_Search == NULL) nd->d_Search = sst->st_MyGroup; /* * Superclass decl is brought in unrefined (though it might * be an implied refinement depending on side-effects). */ nd->d_SubNext = d->d_SubBase; d->d_SubBase = nd; continue; } /* * Whew! Finally, we found the superclass decl that we wish to * refine. We had better not have already refined it or there's * something wrong with the algorithm. * * Since we inherit the superclass method's level our method will * run in the superclass instead of the original, but d_Super * still must be set for findDecl() to track down visibility * relative to the superclass methods. * * Do not set d_Search for refinement overrides, the context * for method lookups should be the subclass, not the superclass * in this case. */ RenameDecl(rd, sg); dassert_decl(rd, rd->d_Super == NULL); dassert(d->d_Index == rd->d_Index); rd->d_Level = d->d_Level; /* XXX */ rd->d_Super = d; /* * super->subclass(es) list */ rd->d_SubNext = d->d_SubBase; d->d_SubBase = rd; /* * This is for the super.field special case method access below. * * XXX This brings in lots of extra procedures that we really * should pare down, but we don't know which ones will * be accessed in pass1. */ if (d->d_Op == DOP_PROC) { d->d_Flags |= DF_SUPERCOPY; } /* * Inherit scope from the superclass if it is not specified in * the REFINE declaration (see AllocDeclaration). */ if ((rd->d_ScopeFlags & SCOPE_ALL_VISIBLE) == 0) { rd->d_ScopeFlags |= d->d_ScopeFlags & SCOPE_ALL_VISIBLE; } } /* * Deal with any remaining elements in tsg. These are 'extensions' * to the superclass. There may also be invisible DOP_PROC's to * handle the special superclass method call case descibed above. */ while ((rd = RUNE_FIRST(&tsg->sg_DeclList)) != NULL) { if (rd->d_ScopeFlags & SCOPE_REFINE) { if (rd->d_Super == NULL) { char buf[RUNE_IDTOSTR_LEN]; fprintf(stderr, "Unable to refine %s, it does not exist " "in superclass\n", runeid_text(rd->d_Id, buf)); dassert_decl(rd, 0); } } RenameDecl(rd, sg); } FreeSemGroup(tsg); /* * Pull in any methods from the superclass that the subclass * explicitly accesses via super.func(). As before, we have * to set d_Search for the visibility context when resolving * these procedures (XXX). * * We have to special case super.method() for a refined method. * Normally this makes the original method inaccessible (for * storage), but we want it to work for a procedure so we make a copy * in tsg. (we can't dup it directly into sg because it will screw * up the d_Index). * * We must not only clear the scope visibility and the temporary * refine flag, we also have to clear constructor/destructor scope in * the copy so only the refined constructor/destructor is called, not * both the refined and the superclass constructor/destructor. * * Also fixup arguments for method procedures. We will set * d_Search for the 'this' argument. */ RUNE_FOREACH(d, &sst->st_MyGroup->sg_DeclList, d_Node) { Declaration *nd; if (d->d_Flags & DF_SUPERCOPY) { d->d_Flags &= ~DF_SUPERCOPY; nd = DupDeclaration(sg, d); nd->d_ScopeFlags &= ~(SCOPE_ALL_VISIBLE | SCOPE_REFINE | SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR); if (nd->d_Search == NULL) nd->d_Search = sst->st_MyGroup; } } } else if (st->st_Op == ST_Class) { Declaration *d; int has_t; sg->sg_Level = 0; has_t = 0; RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { if (d->d_Id == RUNEID__T) { has_t = 1; break; } } /* * If our class does not have a _t type, then automatically * add it in. This is not a sub-class so do not scope it * as a refinement. */ if (has_t == 0) { Scope scope = INIT_SCOPE(0); d = AllocDeclaration(sg, DOP_TYPEDEF, &scope); d->d_TypedefDecl.ed_Type = sg->sg_ClassType; /* AllocClassType(&sg->sg_ClassList, NULL, sg->sg_Stmt->st_MyGroup, SCOPE_ALL_VISIBLE); */ HashDecl(d, RUNEID__T); } } /* * Fixup the method procedures */ if (st->st_Op == ST_Class) { Declaration *d; RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { if (d->d_Op == DOP_PROC && (d->d_ProcDecl.ed_Type->ty_SQFlags & (SF_METHOD | SF_GMETHOD))) { ResolveMethodProcedureThisArg(sg, d); } } } st->st_Flags &= ~STF_RESOLVING; st->st_Flags |= STF_RESOLVED; /* * If this is an ST_Import we must recurse through it. The only * statements under an Import should be Modules. Well, really just one * module. And under that module we only care about ST_Import and * ST_Class statements. * * If this is a shared import the statement list will be empty (later it * may be used for import refinement, I dunno). This is what we want * since we should only resolve a shared import once. */ if (st->st_Op == ST_Import) { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { Stmt *scan2; dassert_stmt(scan, scan->st_Op == ST_Module); RUNE_FOREACH(scan2, &scan->st_List, st_Node) { if (scan2->st_Op == ST_Import || scan2->st_Op == ST_Class) { ResolveClasses(scan2, flags); } } } if (st->st_ImportStmt.es_DLL) { void (*func)(void); func = dlsym(st->st_ImportStmt.es_DLL, "resolveClasses"); if (func) func(); } } } /* * ResolveStmt() - Resolve all types, declarations, and semantic refs * * Resolves all types, declarations, and identifiers. Additionally this * function resolves intermediate types for expressions. Storage sizes are * resolved but offsets are not assigned to declarations. * * Returns a complexity count. */ static void ResolveStmt(SemGroup *isg, Stmt *st, int flags) { /* * Nothing to do if we have already resolved this statement */ dassert_stmt(st, (st->st_Flags & STF_RESOLVING) == 0); if (st->st_Flags & STF_RESOLVED) return; st->st_Flags |= STF_RESOLVING; /* * Process whether we detached as a thread already or not. */ if (st->st_Parent) st->st_Flags |= st->st_Parent->st_Flags & STF_DIDRESULT; /* * Deal with unresolved types here * * If this is an executable layer, flag the SG as resolved. * Note that ResolveSemGroup is never called on executable layers, * they are handled by ResolveStmt() and ResolveDecl(). */ if (st->st_Flags & STF_SEMANTIC) { SemGroup *sg = st->st_MyGroup; Type *type; sg->sg_Flags |= SGF_RESOLVED; RUNE_FOREACH(type, &sg->sg_ClassList, ty_Node) { if (type->ty_Op == TY_UNRESOLVED) { resolveUnresClass(type); } } } /* * Resolve statements. Don't worry about declarations, those are handled * after this switch. */ switch (st->st_Op) { case ST_Import: /* * This will just flag the import declaration as resolved so the code * generator dives it for generation. */ if (st->st_ImportStmt.es_Decl) ResolveDecl(st->st_ImportStmt.es_Decl, 0); /* fall through */ case ST_Module: /* * Recursively resolve contents * * COMMENTED OUT - Unecessary, causes excessive realization of * library elements, etc. */ #if 0 /* if (isg == NULL || (isg->sg_Flags & SGF_ENTRY)) */ { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { /* * XXX pass isg for import, st_MyGroup for module?? */ ResolveStmt(st->st_MyGroup, scan, flags); } if (st->st_Op == ST_Import && st->st_ImportStmt.es_DLL) { void (*func)(void) = dlsym(st->st_ImportStmt.es_DLL, "resolveTypes"); if (func) func(); } } #endif break; case ST_Class: /* * COMMENTED OUT - Unecessary, causes excessive realization of * library elements, etc. */ #if 0 ResolveDecl(st->st_ClassStmt.es_Decl, 0); #endif break; case ST_Typedef: ResolveDecl(st->st_TypedefStmt.es_Decl, 0); break; case ST_Decl: /* * Resolve declarations, skipping any whos context was moved to a * class (e.g. a declaration at the top level of a file like * Fd.setfd(...) also exists in the Fd class). */ { Declaration *d = st->st_DeclStmt.es_Decl; int i; for (i = 0; i < st->st_DeclStmt.es_DeclCount; ++i) { if (st->st_MyGroup == d->d_MyGroup) ResolveDecl(d, 0); d = RUNE_NEXT(d, d_Node); } } break; case ST_Block: { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { ResolveStmt(isg, scan, flags); } } break; case ST_Nop: break; case ST_Loop: if (st->st_LoopStmt.es_Init) ResolveStmt(isg, st->st_LoopStmt.es_Init, flags); if (st->st_LoopStmt.es_BCond) { /* * NOTE: BoolType global implies an rvalue. */ st->st_LoopStmt.es_BCond = ResolveExp(isg, st->st_MyGroup, st->st_LoopStmt.es_BCond, &BoolType, RESOLVE_AUTOCAST); } if (st->st_LoopStmt.es_ACond) { /* * NOTE: BoolType global implies an rvalue. */ st->st_LoopStmt.es_ACond = ResolveExp(isg, st->st_MyGroup, st->st_LoopStmt.es_ACond, &BoolType, RESOLVE_AUTOCAST); } if (st->st_LoopStmt.es_AExp) { /* * NOTE: VoidType global implies an rvalue. */ st->st_LoopStmt.es_AExp = ResolveExp(isg, st->st_MyGroup, st->st_LoopStmt.es_AExp, &VoidType, RESOLVE_AUTOCAST); } /* * Procedure bodies are not resolved here. We avoid resolving * the body until the last possible moment. * * The procedure body will be resolved in ResolveDecl()'s DOP_PROC * case. */ if (st->st_LoopStmt.es_Body) { ResolveStmt(isg, st->st_LoopStmt.es_Body, flags); } break; case ST_BreakCont: break; case ST_Bad: break; case ST_IfElse: /* * NOTE: BoolType global implies an rvalue. */ st->st_IfStmt.es_Exp = ResolveExp(isg, st->st_MyGroup, st->st_IfStmt.es_Exp, &BoolType, RESOLVE_AUTOCAST); ResolveStmt(isg, st->st_IfStmt.es_TrueStmt, flags); if (st->st_IfStmt.es_FalseStmt) ResolveStmt(isg, st->st_IfStmt.es_FalseStmt, flags); break; case ST_Return: /* * NOTE: lvalue/rvalue depends on return type. */ st->st_RetStmt.es_ProcRetType = resolveReturnType(st->st_MyGroup, flags); if (st->st_RetStmt.es_Exp) { if (st->st_Flags & STF_DIDRESULT) dfatal_stmt(st, TOK_ERR_RESULT_SEQUENCING, NULL); st->st_RetStmt.es_Exp = ResolveExp(isg, st->st_MyGroup, st->st_RetStmt.es_Exp, st->st_RetStmt.es_ProcRetType, RESOLVE_AUTOCAST); } break; case ST_Result: /* * NOTE: lvalue/rvalue depends on return type. */ if (st->st_Flags & STF_DIDRESULT) dfatal_stmt(st, TOK_ERR_RESULT_SEQUENCING, NULL); if ((st->st_Parent->st_Flags & STF_SEMTOP) == 0) dfatal_stmt(st, TOK_ERR_RESULT_SEQUENCING, NULL); st->st_ResStmt.es_ProcRetType = resolveReturnType(st->st_MyGroup, flags); if (st->st_ResStmt.es_Exp) { st->st_ResStmt.es_Exp = ResolveExp(isg, st->st_MyGroup, st->st_ResStmt.es_Exp, st->st_ResStmt.es_ProcRetType, RESOLVE_AUTOCAST); } { /* * Flag that we executed result; */ Stmt *scan; for (scan = st; scan; scan = scan->st_Parent) { scan->st_Flags |= STF_DIDRESULT; scan->st_MyGroup->sg_Flags |= SGF_DIDRESULT; if (scan->st_Flags & STF_SEMTOP) break; } } break; case ST_Switch: /* * NOTE: Switch type must be an rvalue. * * NOTE: It is possible to switch on a type. See ST_Case below for * more detail. */ st->st_SwStmt.es_Exp->ex_Flags |= EXF_REQ_TYPE; st->st_SwStmt.es_Exp = ResolveExp(isg, st->st_MyGroup, st->st_SwStmt.es_Exp, NULL, 0); #if 0 /* * Switch-on-expression() expects an rvalue. */ if ((st->st_SwStmt.es_Exp->ex_Flags & EXF_RET_TYPE) == 0) { st->st_SwStmt.es_Exp->ex_Type = DEL_LVALUE(st->st_SwStmt.es_Exp->ex_Type); } #endif { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { ResolveStmt(isg, scan, flags); } } break; case ST_Case: /* * Handle a case/default. Note that when switching on a type, each * case expression must return a type. * * NOTE: Case type must be an rvalue. We use the switch type to * cast, so it will be. */ { Stmt *scan; Exp *exp; Type *type; /* * Set type to cast cases to if we are switching on an * expression, otherwise we are switching on a type and should * not try to coerce the cases (it doesn't make sense to). */ dassert_stmt(st, st->st_Parent->st_Op == ST_Switch); if (st->st_Parent->st_SwStmt.es_Exp->ex_Flags & EXF_RET_TYPE) type = NULL; else type = st->st_Parent->st_SwStmt.es_Exp->ex_Type; /* * case: (if es_Exp is NULL, this is a default: ) */ if ((exp = st->st_CaseStmt.es_Exp) != NULL) { if (type == NULL) exp->ex_Flags |= EXF_REQ_TYPE; exp = ResolveExp(isg, st->st_MyGroup, exp, type, RESOLVE_AUTOCAST); if (type == NULL) dassert(exp->ex_Flags & EXF_RET_TYPE); st->st_CaseStmt.es_Exp = exp; } /* * Elements of the case/default */ RUNE_FOREACH(scan, &st->st_List, st_Node) { ResolveStmt(isg, scan, flags); } } break; case ST_Exp: /* * NOTE: VoidType global implies an rvalue. * * NOTE: If ResolveExp() doesn't cast to void for us, we will do it * here. */ { Exp *exp; exp = ResolveExp(isg, st->st_MyGroup, st->st_ExpStmt.es_Exp, &VoidType, RESOLVE_AUTOCAST); if (exp->ex_Type != &VoidType) { exp = resolveExpCast(isg, st->st_MyGroup, exp, &VoidType, flags); } st->st_ExpStmt.es_Exp = exp; } break; case ST_Proc: { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { ResolveStmt(isg, scan, flags); } } break; case ST_ThreadSched: break; default: dassert_stmt(st, 0); } /* * Calculate and propagate complexity upward. */ { SemGroup *sg; if ((sg = st->st_MyGroup) != NULL) { ++sg->sg_Complexity; if ((st->st_Flags & STF_SEMTOP) == 0 && sg->sg_Parent && RUNE_NEXT(st, st_Node) == NULL) { sg->sg_Parent->sg_Complexity += sg->sg_Complexity; } /* * Head of procedure needs to know if any ABI calls will be made * so it can reserve stack space. */ if ((st->st_Flags & STF_SEMTOP) == 0 && sg->sg_Parent) { sg->sg_Parent->sg_Flags |= sg->sg_Flags & SGF_ABICALL; } } } st->st_Flags |= STF_RESOLVED; st->st_Flags &= ~STF_RESOLVING; } /* * Locate the ST_Proc statement and resolve & return its return type */ static Type * resolveReturnType(SemGroup *sg, int flags __unused) { Declaration *d; Type *type; Stmt *st; /* * Locate the ST_Proc statement */ while (sg && (sg->sg_Stmt == NULL || sg->sg_Stmt->st_Op != ST_Proc)) sg = sg->sg_Parent; dassert(sg != NULL); st = sg->sg_Stmt; d = st->st_ProcStmt.es_Decl; /* decl is already resolved */ dassert_decl(d, d->d_Op == DOP_PROC); dassert_decl(d, d->d_Flags & (DF_RESOLVING | DF_RESOLVED)); type = d->d_ProcDecl.ed_Type; dassert_decl(d, type->ty_Op == TY_PROC); return (type->ty_ProcType.et_RetType); } Type * resolveArgsType(SemGroup *sg, int flags __unused) { Declaration *d; Type *type; Stmt *st; /* * Locate the ST_Proc statement */ while (sg && (sg->sg_Stmt == NULL || sg->sg_Stmt->st_Op != ST_Proc)) sg = sg->sg_Parent; dassert(sg != NULL); st = sg->sg_Stmt; d = st->st_ProcStmt.es_Decl; /* decl is already resolved */ dassert_decl(d, d->d_Op == DOP_PROC); dassert_decl(d, d->d_Flags & (DF_RESOLVING | DF_RESOLVED)); type = d->d_ProcDecl.ed_Type; dassert_decl(d, type->ty_Op == TY_PROC); return (type->ty_ProcType.et_ArgsType); } /* * ResolveDecl() - resolve a declaration * * If the declaration represents a procedure argument, special processing of * LVALUE scope is required to pass the declaration by reference instead of * by value. Note that the size of the underlying type DOES NOT CHANGE... it * may be much larger. * * NOTE: We do not resolve d_Offset here. */ static void ResolveDecl(Declaration *d, int retry) { Type *type; Stmt *st; SemGroup *sg = NULL; int ok = 0; /* * Recursion detection */ if (d->d_Flags & DF_RESOLVED) return; if (d->d_Flags & DF_RESOLVING) { if (retry == 0) return; } d->d_Flags |= DF_RESOLVING; /* * Resolve according to the kind of declaration */ switch (d->d_Op) { case DOP_CLASS: if (d->d_ClassDecl.ed_Super) ResolveType(d->d_ClassDecl.ed_Super, NULL, 0); sg = d->d_ClassDecl.ed_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { d->d_Bytes = d->d_ClassDecl.ed_SemGroup->sg_Bytes; d->d_AlignMask = d->d_ClassDecl.ed_SemGroup->sg_AlignMask; ok = 1; } break; case DOP_ALIAS: /* * Alias access is a barrier and always returns an rvalue. * * DupExp is absolutely required due to the alias's target context * being different for each consumer. */ type = ResolveType(d->d_AliasDecl.ed_Type, NULL, 0); if (type->ty_Flags & TF_RESOLVED) ok = 1; if (d->d_AliasDecl.ed_OrigAssExp) { d->d_AliasDecl.ed_AssExp = DupExp(d->d_MyGroup, d->d_AliasDecl.ed_OrigAssExp); d->d_AliasDecl.ed_AssExp = ResolveExp(d->d_ImportSemGroup, d->d_MyGroup, d->d_AliasDecl.ed_AssExp, DEL_LVALUE(type), RESOLVE_AUTOCAST); } break; case DOP_TYPEDEF: d->d_Flags |= DF_RESOLVED; /* XXX */ type = ResolveType(d->d_TypedefDecl.ed_Type, NULL, 0); d->d_Flags &= ~DF_RESOLVED; if (type->ty_Flags & DF_RESOLVED) ok = 1; break; case DOP_IMPORT: /* * This only occurs when resolving an import's semantic group. Since * we are scanning statements in that context we do not have to * recurse here, ResolveStmt() will do it for us. */ ok = 1; break; case DOP_PROC: /* * XXX global procedure, later on, make the argument a type instead * of storage? * * Avoid a circular loop failure when the procedure declaration * references the class it is defined in by marking the resolve * complete even if the type isn't. We can do this because the * procedure takes no field storage. */ ResolveType(d->d_ProcDecl.ed_Type, NULL, 0); ok = 1; /* * Deal with constructor/destructor chaining. The chaining winds up * being reversed and will be corrected by the caller. * * NOTE: Constructors and destructors might be referenced without the * entire SG being resolved, so be sure to set the ABI flags here. */ if (d->d_ScopeFlags & SCOPE_GLOBAL) { if ((d->d_Flags & DF_ONGLIST) == 0 && (d->d_ScopeFlags & (SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR))) { d->d_GNext = d->d_MyGroup->sg_GBase; d->d_Flags |= DF_ONGLIST; d->d_MyGroup->sg_GBase = d; d->d_MyGroup->sg_Flags |= SGF_GABICALL; } } else { if ((d->d_Flags & DF_ONCLIST) == 0 && (d->d_ScopeFlags & SCOPE_CONSTRUCTOR)) { d->d_CNext = d->d_MyGroup->sg_CBase; d->d_Flags |= DF_ONCLIST; d->d_MyGroup->sg_CBase = d; d->d_MyGroup->sg_Flags |= SGF_ABICALL; } if ((d->d_Flags & DF_ONDLIST) == 0 && (d->d_ScopeFlags & SCOPE_DESTRUCTOR)) { d->d_DNext = d->d_MyGroup->sg_DBase; d->d_Flags |= DF_ONDLIST; d->d_MyGroup->sg_DBase = d; d->d_MyGroup->sg_Flags |= SGF_ABICALL; } } /* * If this procedure is bound to a DLL we have to resolve it here. */ if (d->d_ScopeFlags & SCOPE_CLANG) { char buf[RUNE_IDTOSTR_LEN]; d->d_ProcDecl.ed_DLLFunc = FindDLLSymbol(NULL, d->d_ImportSemGroup, runeid_text(d->d_Id, buf)); } break; case DOP_ARGS_STORAGE: case DOP_STACK_STORAGE: case DOP_GLOBAL_STORAGE: case DOP_GROUP_STORAGE: type = ResolveType(d->d_StorDecl.ed_Type, NULL, 0); /* * Complete if the underlying type is resolved. */ if (type->ty_Flags & TF_RESOLVED) ok = 1; /* * Promote the lvalue storage qualifier (e.g. from a typedef) into * the declaration's scope. This is what ultimately controls lvalue * vs rvalue arguments to procedures and such. */ if ((type->ty_SQFlags & SF_LVALUE) && (d->d_ScopeFlags & SCOPE_LVALUE) == 0) { d->d_ScopeFlags |= SCOPE_LVALUE; } /* * Default assignment handling expects an rvalue. */ if (d->d_StorDecl.ed_OrigAssExp) { d->d_StorDecl.ed_AssExp = DupExp(d->d_MyGroup, d->d_StorDecl.ed_OrigAssExp); d->d_StorDecl.ed_AssExp = ResolveExp(d->d_ImportSemGroup, d->d_MyGroup, d->d_StorDecl.ed_AssExp, DEL_LVALUE(type), RESOLVE_AUTOCAST); } if (d->d_ScopeFlags & SCOPE_LVALUE) { /* * Object is passed as a LValueStor structure. Note that d_Bytes * is going to be different then the underlying type (which * represents the actual object). */ d->d_Bytes = sizeof(LValueStor); d->d_AlignMask = LVALUESTOR_ALIGN; } else { /* * Object is passed by value. */ d->d_AlignMask = type->ty_AlignMask; d->d_Bytes = type->ty_Bytes; } /* * If the declaration represents or contains an argument-lvalue or a * pointer we have to add it to the SemGroup's SRBase list to * properly reference or dereference the elements. XXX only do this * for non-global storage. * * If the declaration has LVALUE scope we must do the same because * the ref is tracked. */ if ((d->d_Flags & DF_ONSRLIST) == 0) { if (d->d_Op != DOP_GLOBAL_STORAGE && (type->ty_Flags & TF_HASLVREF)) { d->d_SRNext = d->d_MyGroup->sg_SRBase; d->d_MyGroup->sg_SRBase = d; d->d_Flags |= DF_ONSRLIST; } else if (d->d_ScopeFlags & SCOPE_LVALUE) { d->d_SRNext = d->d_MyGroup->sg_SRBase; d->d_MyGroup->sg_SRBase = d; d->d_Flags |= DF_ONSRLIST; } } /* * Deal with constructor/destructor chaining. The chaining winds up * being reversed and will be corrected by the caller. * * NOTE: Constructors and destructors might be referenced without the * entire SG being resolved, so be sure to set the ABI flags here. */ if ((d->d_Flags & DF_ONCLIST) == 0 && (type->ty_Flags & TF_HASCONSTRUCT)) { d->d_CNext = d->d_MyGroup->sg_CBase; d->d_MyGroup->sg_CBase = d; d->d_MyGroup->sg_Flags |= SGF_ABICALL; d->d_Flags |= DF_ONCLIST; } if ((d->d_Flags & DF_ONDLIST) == 0 && (type->ty_Flags & TF_HASDESTRUCT)) { d->d_DNext = d->d_MyGroup->sg_DBase; d->d_MyGroup->sg_DBase = d; d->d_MyGroup->sg_Flags |= SGF_ABICALL; d->d_Flags |= DF_ONDLIST; } if ((d->d_Flags & DF_ONGLIST) == 0 && (type->ty_Flags & (TF_HASGCONSTRUCT | TF_HASGDESTRUCT))) { d->d_GNext = d->d_MyGroup->sg_GBase; d->d_MyGroup->sg_GBase = d; d->d_MyGroup->sg_Flags |= SGF_GABICALL; d->d_Flags |= DF_ONGLIST; } break; default: dassert_decl(d, 0); } if (ok) { d->d_Flags &= ~DF_RESOLVING; d->d_Flags |= DF_RESOLVED; } else { deferDecl(d); } /* * Post resolution flag resolving (to handle recursion) */ switch (d->d_Op) { case DOP_PROC: /* * Create copies of procedures as they are needed (thus avoiding an * XxY matrix effect). */ if ((st = d->d_ProcDecl.ed_OrigBody) == NULL) { Declaration *super = d->d_Super; while (super && super->d_ProcDecl.ed_OrigBody == NULL) { super = super->d_Super; } if (super) { st = super->d_ProcDecl.ed_OrigBody; d->d_ProcDecl.ed_OrigBody = st; } } if (st && (d->d_Flags & DF_DIDPULLDOWN) == 0) { /* * Procedure is being used in the primary class it was defined * in or pulled into from a super-class. * * Link the procedure body to the declaration and resolve the * procedure body in the context of the correct class. * * NOTE: Alignment and storage is not resolved here. */ d->d_Flags |= DF_DIDPULLDOWN; st = DupStmt(d->d_MyGroup, st->st_Parent, st); dassert_stmt(st, d->d_ProcDecl.ed_ProcBody == NULL); d->d_ProcDecl.ed_ProcBody = st; st->st_ProcStmt.es_Decl = d; st->st_ProcStmt.es_Scope = d->d_Scope; ResolveStmt(d->d_ImportSemGroup, st, 0); } break; default: break; } /* * __align(%d) scope qualifier, override the type's alignment */ if ((d->d_Scope.s_Flags & SCOPE_ALIGN) && d->d_Scope.s_AlignOverride) d->d_AlignMask = d->d_Scope.s_AlignOverride - 1; /* * Make sure that the semantic group associated with the declaration * is resolved. */ sg = d->d_MyGroup; if (sg && (sg->sg_Op == SG_MODULE || sg->sg_Op == SG_CLASS)) { /* SG_COMPOUND too? maybe not */ ResolveSemGroup(d->d_MyGroup, 0); } } /* * ResolveExp() - resolve expression * * Resolve an expression. We are expected to resolve all ex_Type's for the * expression tree as well as expected to track down operators and base * identifiers. * * itype is a type hint. If non-NULL, the caller would like our expression * to return the specified type. There are a few special cases: * * EXF_REQ_ARRAY - when OBRACKET requests an array optimization it passes a * post-array-indexed typehint (as if you had done the optimization). You * must ignore itype if you are unable to do the optimization. * * NOTE: Even rvalues may have refstor side-effects at run-time. */ #define exFlags exp->ex_Flags #define exFlags2 exp->ex_Flags2 #define exType exp->ex_Type #define exToken exp->ex_Token #define exDecl exp->ex_Decl #define exLhs exp->ex_Lhs #define exVisibility exp->ex_Visibility #define exRhs exp->ex_Rhs #define exId exp->ex_Id #define exStr exp->ex_Str static Exp * ResolveExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { int couldconst; /* * Expressions can only be resolved once. If we hit this assertion * it likely means someone forgot to DupExp() an expression somewhere. */ dassert_exp(exp, (exFlags & EXF_RESOLVED) == 0); if (exp->ex_Flags & EXF_DUPEXP) exp = DupExp(sg, exp); couldconst = 0; /* * Ensure that the cast target type hint is resolved. */ if (itype) ResolveType(itype, NULL, 0); /* * note: certain cases below call other resolver functions and assume * that ex* variables are unchanged. */ dassert((exFlags & EXF_DUPEXP) || (exFlags & EXF_RESOLVED) == 0); switch (exToken) { case TOK_ASS: /* * An assignment. Note that we optimize void returns (such as when * an assignment is a statement like 'a = 4;' ... the result of the * assignment is cast to void. * * NOTE: Left-hand-side must be an LVALUE, return type inherits this * feature unless the parent turns off the bit so the TOK_ASS * run-time must deal with that. */ exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); //dassert_exp(exLhs, exLhs->ex_Type->ty_SQFlags & SF_LVALUE); dassert_exp(exLhs, exLhs->ex_Flags2 & EX2F_LVALUE); exRhs = ResolveExp(isg, sg, exRhs, DEL_LVALUE(exLhs->ex_Type), flags | RESOLVE_AUTOCAST); if (exLhs->ex_Type->ty_SQFlags & SF_CONST) { dfatal_exp(exp, TOK_ERR_READONLY, NULL); } /* AssExp handles this optimization */ if (itype == &VoidType) { exType = itype; exFlags |= EXF_RET_VOID; } else { exType = exLhs->ex_Type; } break; case TOK_ANDAND: /* * NOTE: BoolType global implies an rvalue. */ couldconst = 1; exLhs = ResolveExp(isg, sg, exLhs, &BoolType, flags | RESOLVE_AUTOCAST); #if 1 /* * If left-side can terminate the operation, mark the expression as * PROBCONST for the interpreter and code generator (allowing the rhs * to not be a constant). */ if (exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) { TmpData ts; exLhs = resolveConstExpBool(isg, sg, exLhs, flags, &ts); if (ts.ts_Bool == 0) exFlags |= EXF_PROBCONST; } #endif /* * Resolve rhs, and we can also flag PROBCONST if both sides are * constants. */ exRhs = ResolveExp(isg, sg, exRhs, &BoolType, flags | RESOLVE_AUTOCAST); if ((exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) && (exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST))) { exFlags |= EXF_PROBCONST; } exType = &BoolType; break; case TOK_OROR: /* * NOTE: BoolType global implies an rvalue. */ couldconst = 1; exLhs = ResolveExp(isg, sg, exLhs, &BoolType, flags | RESOLVE_AUTOCAST); #if 1 /* * If left-side can terminate the operation, mark the expression as * PROBCONST for the interpreter and code generator (allowing the rhs * to not be a constant). */ if (exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) { TmpData ts; exLhs = resolveConstExpBool(isg, sg, exLhs, flags, &ts); if (ts.ts_Bool) exFlags |= EXF_PROBCONST; } #endif /* * Resolve rhs, and we can also flag PROBCONST if both sides are * constants. */ exRhs = ResolveExp(isg, sg, exRhs, &BoolType, flags | RESOLVE_AUTOCAST); if ((exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) && (exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST))) { exFlags |= EXF_PROBCONST; } exType = &BoolType; break; case TOK_DECL: /* * This synthesized token occurs when we are able to collapse a * structural indirection or dotted element into a declaration. For * example, 'module.routine'. */ /* XXX couldconst? */ break; case TOK_DOT: case TOK_STRIND: /* * Structual field access. The left hand side may be an object * (class or compound), a class type, or a compound type. * * A dotted access requires an lvalue on the left hand side if the * left hand side represents storage. * * The result will be an lvalue if the right hand side represents * storage. We only loop if the right hand side is an alias * replacement. */ { runeid_t id; Declaration *d; SemGroup *sg2; Type *type; int globalOnly = 0; int s; int visibility; int isRefTo = 0; int procedureOnly = 0; int eno = TOK_ERR_ID_NOT_FOUND; /* * NOTE: Hint must 'always happen' since we may be modifying an * expression that will later be Dup'd. * * NOTE: Lhs is always an lvalue for TOK_DOT, but does not have * to be for TOK_STRIND. */ exLhs->ex_Flags |= EXF_REQ_TYPE; if (exToken == TOK_DOT) exLhs->ex_Flags |= exFlags & EXF_ADDRUSED; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); /* * It shouldn't be possible for the RHS to be turned into * a TOK_SEMGRP_ID prior to resolution. * * (XXX shouldn't be possible. */ dassert(exRhs->ex_Token != TOK_SEMGRP_ID); dassert_exp(exRhs, exRhs->ex_Token == TOK_STRUCT_ID); exRhs = ResolveExp(isg, sg, exRhs, NULL, flags & ~RESOLVE_AUTOCAST); id = exRhs->ex_Id; type = exLhs->ex_Type; /* * Calculate scope and SemGroup to search. Note that it is legal * to do a structural '.' selection on a pointer, but it works * differently then indirecting through a pointer via '->'. In * the case of '.' on a pointer, we first search the system * Pointer class. */ if (exLhs->ex_Flags & EXF_RET_TYPE) { globalOnly = 1; } /* * Figure out the base type used to look-up the identifier. An * identifier that resolves into a procedure winds up only being * a hint for a reference type. */ if (exToken == TOK_STRIND) { switch (type->ty_Op) { case TY_PTRTO: type = type->ty_RawPtrType.et_Type; break; case TY_REFTO: type = type->ty_RefType.et_Type; isRefTo = 1; break; default: dassert_exp(exp, 0); /* not reached */ } } again: switch (type->ty_Op) { case TY_CLASS: sg2 = type->ty_ClassType.et_SemGroup; break; case TY_COMPOUND: sg2 = type->ty_CompType.et_SemGroup; break; case TY_ARGS: sg2 = type->ty_ArgsType.et_SemGroup; break; case TY_VAR: sg2 = type->ty_VarType.et_SemGroup; break; case TY_IMPORT: sg2 = type->ty_ImportType.et_SemGroup; break; case TY_PTRTO: /* YYY */ dassert_exp(exp, PointerType.ty_Op == TY_CLASS); sg2 = PointerType.ty_ClassType.et_SemGroup; break; case TY_REFTO: /* YYY */ dassert_exp(exp, ReferenceType.ty_Op == TY_CLASS); sg2 = ReferenceType.ty_ClassType.et_SemGroup; break; default: /* * Possibly a pointer, aka ptr.NULL */ sg2 = NULL; } visibility = exLhs->ex_Visibility; /* * Locate the identifier normally, via its type. ty_Visbility * is the initial visibility (scope) that the semantic search * should use in locating the identifier. */ if (sg2) { runeid_t ary[2] = { id, 0 }; int level; if (exLhs->ex_Token == TOK_ID || exLhs->ex_Token == TOK_DECL) { if (exLhs->ex_Decl->d_Search) { level = exLhs->ex_Decl->d_Search->sg_Level; } else { level = sg2->sg_Level; } /* * SUPER (super.blah and super->blah) handling */ if (exLhs->ex_Flags & EXF_SUPER) { if (level == 0) { fprintf(stderr, "No superclass available\n"); dassert_exp(exp, 0); } --level; } } else { level = sg2->sg_Level; /* may be -1 */ } visibility &= type->ty_Visibility; d = FindDeclPath(&exp->ex_LexRef, NULL, sg2, NULL, ary, FDC_NOBACK, &visibility, level, &eno); /* * SUPER (super.blah and super->blah) handling * * If the super is visible and a procedure we just found * our own refinement, not the superclass method. * This is because there is no 'superclass method' per say, * refinements *REPLACE* superclass declarations and inherit * the superclass's level. However, we still want to be able * to chain method calls so what we do instead is go through * and find the procedure that we smacked when we did the * refinement. This procedure has already been conveniently * brought into the subclass context as an 'invisible' entity * at the same d_Level. * * The run-time detects the EXF_SUPER case and knows it can * use a static call instead of a dynamic call, so we need * to get this right. */ if ((exLhs->ex_Flags & EXF_SUPER) && d && d->d_Op == DOP_PROC && (d->d_ScopeFlags & SCOPE_ALL_VISIBLE)) { runeid_t id2 = d->d_Id; SemGroup *olevel = d->d_Level; while ((d = RUNE_NEXT(d, d_Node)) != NULL) { if (d->d_Id == id2 && d->d_Level == olevel && d->d_Op == DOP_PROC) { break; } } } } else { d = NULL; } if (d && procedureOnly && d->d_Op != DOP_PROC) { fprintf(stderr, "PTR.ELEMENT may be used for special " "pointer method calls, but not to " "access storage elements. " "Use PTR->ELEMENT instead\n"); dassert_exp(exp, 0); } /* * If referencing actual storage the storage must be declared * global. */ if (d && globalOnly && (d->d_Op & DOPF_STORAGE) && (d->d_ScopeFlags & SCOPE_GLOBAL) == 0) { char buf[RUNE_IDTOSTR_LEN]; fprintf(stderr, "%s is not global. Only globals can be accessed " "through a type\n", runeid_text(d->d_Id, buf)); dassert_exp(exp, 0); } if (d) { /* * Identifier found. Note that if we are going through a * reference type the declaration is not the actual one we * use at run time. It's just a template. */ ResolveDecl(d, 0); exDecl = d; exVisibility = visibility; if (exFlags & EXF_REQ_ADDROF) d->d_Flags |= DF_ADDROF; if (exFlags & EXF_ADDRUSED) d->d_Flags |= DF_ADDRUSED; /* * Misc. */ switch (d->d_Op) { case DOP_PROC: exType = d->d_ProcDecl.ed_Type; if (d->d_ProcDecl.ed_Type->ty_SQFlags & SF_METHOD) { /* * Method call, do not collapse the expression into a * direct declaration because the object is needed * later. */ dassert_exp(exLhs, (exLhs->ex_Flags & EXF_RET_TYPE) == 0); } else if (isRefTo) { /* * Call via reference. The lhs is required to * evaluate the actual method call at run-time. */ } else { /* * Global method call or normal call. For the global * method case the lhs is not needed because the * parser entered the first argument as a type * already. * * Degenerate into a TOK_DECL. We depend on this * later. (mark ex_Type as parse-time for DupExp). */ exFlags &= ~EXF_BINARY; exFlags |= EXF_PARSE_TYPE; exLhs = NULL; exRhs = NULL; exToken = TOK_DECL; } break; case DOP_ALIAS: exType = ResolveTypeSimple(d->d_AliasDecl.ed_Type); dassert_decl(d, d->d_AliasDecl.ed_OrigAssExp != NULL); /* * NOTE: exLhs must be NULL if exp is unresolved. exp * tree duplications do not duplicate the alias's * exLHS even though UNARY is set. * * DupExp is absolutely required due to the alias's * target context being different for each consumer. */ dassert_exp(exp, exRhs->ex_Lhs == NULL); exRhs->ex_Flags |= EXF_ALIAS | EXF_UNARY; exRhs->ex_Lhs = DupExp(sg2, d->d_AliasDecl.ed_OrigAssExp); exRhs->ex_Lhs = ResolveExp(isg, sg2, exRhs->ex_Lhs, exType, flags | RESOLVE_AUTOCAST); exFlags2 |= exRhs->ex_Flags2 & EX2F_LVALUE; break; case DOP_ARGS_STORAGE: case DOP_STACK_STORAGE: case DOP_GLOBAL_STORAGE: case DOP_GROUP_STORAGE: /* * Set type. The Rhs is a STRUCT_ID and does not require * a type to be assigned to it. * * Return type is always an LVALUE, parent may adjust. */ //exType = ADD_LVALUE(d->d_StorDecl.ed_Type); exType = ResolveTypeSimple(d->d_StorDecl.ed_Type); exFlags2 |= EX2F_LVALUE; /* * Pull up global constants */ if (exToken == TOK_DOT && d->d_Op == DOP_GLOBAL_STORAGE && (d->d_ScopeFlags & SCOPE_READONLY) && (exLhs->ex_Flags & EXF_RET_TYPE)) { exFlags |= EXF_PROBCONST; } break; case DOP_TYPEDEF: /* * XXX make sure this is only used in the lhs of a * structural reference. XXX * * XXX what if we went through a TY_RETO type? This type * will be wrong. * * collapse the exp node. */ exType = d->d_TypedefDecl.ed_Type; exToken = TOK_DECL; exFlags &= ~EXF_BINARY; break; case DOP_IMPORT: /* * Do not collapse an import, we require more resolution. * e.g. import. will be collapsed, but 'import' * cannot be. */ if (exFlags & EXF_REQ_TYPE) { exType = AllocImportType( &d->d_ImportDecl.ed_SemGroup->sg_ClassList, d->d_ImportDecl.ed_SemGroup, visibility); exFlags |= EXF_RET_TYPE; break; } break; case DOP_CLASS: /* * Do not collapse a class, we require more resolution. * e.g. class. will be collapsed, but 'class' * cannot be. */ if (exFlags & EXF_REQ_TYPE) { exType = d->d_ClassDecl.ed_SemGroup->sg_ClassType; exType = TypeToVisibilityType(exType, visibility); exFlags |= EXF_RET_TYPE; break; } exType = ResolveTypeSimple(exType); exFlags2 |= EX2F_LVALUE; break; default: dassert_exp(exp, 0); break; } if (d->d_Op == DOP_PROC) { if (d->d_ScopeFlags & SCOPE_PURE) couldconst = 1; } else if (exType->ty_SQFlags & SF_CONST) { couldconst = 1; } } else if ((s = SpecialSemGroupGet(id)) != 0) { /* * Identifier not found, check for a special identifier. */ exRhs->ex_Token = TOK_SEMGRP_ID; exRhs->ex_Int32 = s; exDecl = NULL; switch (s) { case SPECIAL_NULL: dassert(type->ty_Op == TY_PTRTO || type->ty_Op == TY_REFTO); /* NULL is not an lvalue */ /* exType = DEL_LVALUE(type); */ exType = type; exFlags |= EXF_NULL; exFlags2 &= ~EX2F_LVALUE; break; case SPECIAL_COUNT: dassert(type->ty_Op != TY_PTRTO && type->ty_Op != TY_REFTO); exType = &Int32Type; break; case SPECIAL_TYPE: case SPECIAL_DATA: /* * typeof(self.__data[]) vs (cast)self.__data[] */ dassert(type->ty_Op != TY_PTRTO && type->ty_Op != TY_REFTO); dassert(exFlags & EXF_REQ_ARRAY); exFlags |= EXF_RET_ARRAY; if (s == SPECIAL_TYPE) { exFlags |= EXF_RET_TYPE; exType = &DynamicLValueType; } else if (exFlags & EXF_REQ_TYPE) { exFlags |= EXF_RET_TYPE; exType = &DynamicLValueType; } else if (itype) { exType = itype; } else { /* * dynamic data must be cast */ dassert_exp(exp, 0); exType = &DynamicLValueType; } break; case SPECIAL_VAR_COUNT: dassert(type->ty_Op != TY_PTRTO && type->ty_Op != TY_REFTO); exType = &Int32Type; sg->sg_Flags |= SGF_ABICALL; break; case SPECIAL_VAR_TYPE: case SPECIAL_VAR_DATA: /* * typeof(self.__vardata[]) vs (cast)self.__vardata[] */ dassert(type->ty_Op != TY_PTRTO && type->ty_Op != TY_REFTO); dassert(exFlags & EXF_REQ_ARRAY); exFlags |= EXF_RET_ARRAY; if (s == SPECIAL_TYPE) { exFlags |= EXF_RET_TYPE; exType = &DynamicLValueType; } else if (exFlags & EXF_REQ_TYPE) { exFlags |= EXF_RET_TYPE; exType = &DynamicLValueType; } else if (itype) { exType = itype; } else { /* * dynamic data must be cast */ dassert_exp(exp, 0); exType = &DynamicLValueType; } sg->sg_Flags |= SGF_ABICALL; break; case SPECIAL_TYPEID: exType = &Int32Type; break; case SPECIAL_TYPESTR: exType = &StrType; break; default: dassert_exp(exRhs, 0); break; } } else { /* * This is nasty, I admit. If we have a pointer or reference * type try again. */ exDecl = NULL; if (type->ty_Op == TY_REFTO) { type = type->ty_RefType.et_Type; procedureOnly = 1; goto again; } if (type->ty_Op == TY_PTRTO) { type = type->ty_RawPtrType.et_Type; procedureOnly = 1; goto again; } dfatal_exp(exRhs, eno, NULL); /* NOT REACHED */ } ResolveTypeSimple(exType); } dassert_exp(exp, exType != NULL); break; case TOK_STRUCT_ID: /* * NOTE: unresolved identifiers should not have alias expression * sub-tree duplications attached to them. assert it. */ dassert_exp(exp, exLhs == NULL); break; case TOK_OPER: /* * NOTE: LVALUE/RVALUE for elements and return type depends on the * operator. Operator functions normally self-optimize the cases at * run-time. */ couldconst = 1; exp = resolveExpOper(isg, sg, exp, itype, flags & ~RESOLVE_AUTOCAST); break; case TOK_PTRIND: /* * Indirect through an expression. * * Return type is typically an LVALUE (if representing storage). Exp * parent might turn it off so run-time must test. Lhs may or may * not be. */ { Type *type; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); type = exLhs->ex_Type; switch (type->ty_Op) { case TY_REFTO: fprintf(stderr, "You cannot use '*' on a reference type\n"); dassert_exp(exLhs, 0); #if 0 //exType = ADD_LVALUE(type->ty_RefType.et_Type); exType = type->ty_RefType.et_Type; exFlags2 |= EX2F_LVALUE; #endif break; case TY_PTRTO: //exType = ADD_LVALUE(type->ty_RawPtrType.et_Type); exType = type->ty_RawPtrType.et_Type; exFlags2 |= EX2F_LVALUE; break; default: dassert_exp(exLhs, 0); break; } } break; case TOK_ADDR: /* * Take the address of an (LVALUE) expression. Returns an RVALUE. * Allow for a short-cut optimization which replaces the TOK_ADDR * sequence with its argument in the &ary[n] case. */ { Type *type; /* * Hint must 'always happen' since we may be modifying an * expression that will later be Dup'd. * * It is sufficient to test EXF_ADDRUSED to determine if * SRSGET/SRSPUT is needed for the procedure. */ exLhs->ex_Flags |= EXF_REQ_ADDROF | EXF_ADDRUSED; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); if (exLhs->ex_Flags & EXF_RET_ADDROF) { exp = exLhs; } else { type = exLhs->ex_Type; //dassert_exp(exLhs, type->ty_SQFlags & SF_LVALUE); dassert_exp(exLhs, exLhs->ex_Flags2 & EX2F_LVALUE); exType = ResolveTypeSimple(TypeToRawPtrType(type)); /* DEL_LVALUE() not needed here */ } } break; case TOK_OBRACKET: /* * Array index, takes an RVALUE, returns an LVALUE. * * Note: we have to convert the special __data[exp] case. * * Note: ex_Flags hints must 'always happen' since we may be * modifying an expression that will later be Dup'd. */ exRhs = ResolveExp(isg, sg, exRhs, NULL, flags & ~RESOLVE_AUTOCAST); if (exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) { exRhs = resolveConstExp(isg, sg, exRhs, flags | RESOLVE_FAILOK); } exLhs->ex_Flags |= EXF_REQ_ARRAY | (exFlags & EXF_REQ_TYPE); exLhs->ex_Flags |= EXF_ADDRUSED /* | (exFlags & EXF_REQ_ADDROF) */ ; exLhs->ex_AuxExp = exRhs; exLhs = ResolveExp(isg, sg, exLhs, itype, flags & ~RESOLVE_AUTOCAST); /* * If we are indexing an actual array we have to retain EXF_ADDRUSED * to prevent it from being cached in a register. Otherwise we are * indirecting through a pointer and not taking the address of the * pointer itself. (tests/cat.d uses gets() which is a good test of * this). */ if (exLhs->ex_Type && exLhs->ex_Type->ty_Op != TY_ARYOF) exLhs->ex_Flags &= ~(EXF_ADDRUSED | EXF_REQ_ADDROF); if (MatchType(&IntegralType, exRhs->ex_Type) >= SG_COMPAT_FAIL) { dfatal_exp(exRhs, TOK_ERR_EXPECTED_INTEGRAL_TYPE, NULL); } if (exLhs->ex_Flags & EXF_RET_ARRAY) { /* * __data and __vardata specials */ /* don't modify ex_Token, EXF_DUPEXP might be set */ /* exp->ex_Token = TOK_ERR_EXP_REMOVED; */ return (exLhs); } else if (exFlags & EXF_REQ_ADDROF) { /* * &ary[i] optimization - allows us to create a bounded pointer * (returns an RVALUE). * * XXX now we just return a raw pointer */ Type *type; exFlags |= EXF_RET_ADDROF; dassert((exLhs->ex_Flags & EXF_RET_TYPE) == 0); exLhs->ex_AuxExp = NULL; type = exLhs->ex_Type; switch (type->ty_Op) { case TY_ARYOF: type = type->ty_AryType.et_Type; break; case TY_PTRTO: type = type->ty_RawPtrType.et_Type; break; case TY_REFTO: /* Cannot take address of a reference type */ dassert_exp(exp, 0); break; } exType = ResolveType(TypeToRawPtrType(type), NULL, 0); /* returns an RVALUE */ } else { /* * Unoptimized array lookup, returns an lvalue */ Type *type; dassert((exLhs->ex_Flags & EXF_RET_TYPE) == 0); exLhs->ex_AuxExp = NULL; type = exLhs->ex_Type; switch (type->ty_Op) { case TY_ARYOF: type = type->ty_AryType.et_Type; break; case TY_PTRTO: type = type->ty_RawPtrType.et_Type; break; case TY_REFTO: fprintf(stderr, "Cannot index a reference type\n"); dassert_exp(exp, 0); break; } //exType = ADD_LVALUE(type); exType = ResolveTypeSimple(type); exFlags2 |= EX2F_LVALUE; /* returns an LVALUE */ } break; case TOK_OPAREN: dassert_exp(exp, 0); /* XXX */ break; case TOK_DSTRING: case TOK_BSTRING: /* * XXX we should return a bounded pointer here. */ exType = &StrType; exFlags |= EXF_CONST; couldconst = 1; if ((exFlags2 & EX2F_ESCDONE) == 0) { string_t str; exFlags2 |= EX2F_ESCDONE; str = StrTableEscapeQuotedString(exStr, strlen(exStr), 1); ReplaceStrTable(&exp->ex_Str, str); } break; case TOK_SSTRING: /* * Set EXF_PARSE_TYPE to make sure that ex_Type survives DupExp(). * * exp->u.uint32 is always set to the single-quoted result * (from the lexer) */ couldconst = 1; exFlags |= EXF_CONST | EXF_PARSE_TYPE; dassert(exType != NULL); break; case TOK_INTEGER: /* * Integer and related type is already loaded into the exp */ couldconst = 1; exFlags |= EXF_CONST; dassert(exType != NULL); break; case TOK_FLOAT: /* * Float and related type is already loaded into the exp */ couldconst = 1; exFlags |= EXF_CONST; dassert(exType != NULL); break; case TOK_VOIDEXP: exType = &VoidType; break; case TOK_SELF: /* * The self identifier represents the current procedure's arguments. * A varargs procedure will actually be called with an extended * version of this type, but for resolution purposes we can use this * time. * * This is an LVALUE to support things like self.new() XXX. */ //exType = ADD_LVALUE(resolveArgsType(sg, flags)); exType = ResolveTypeSimple(resolveArgsType(sg, flags)); exFlags2 |= EX2F_LVALUE; break; case TOK_DOLLAR: /* * The '$' identifier represents the current procedure's return * storage. */ if (sg->sg_Flags & SGF_DIDRESULT) dfatal_exp(exp, TOK_ERR_RESULT_SEQUENCING, NULL); //exType = ADD_LVALUE(resolveReturnType(sg, flags)); exType = ResolveTypeSimple(resolveReturnType(sg, flags)); exFlags2 |= EX2F_LVALUE; break; case TOK_ID: case TOK_CLASSID: /* * Lookup the identifier. The returned declaration could represent a * class, typedef, module, or storage, but for this case we only * allow storage or a constant. Since we are starting from our own * semantic group, visibility is initially ALL (private, library, and * public). * * The identifier might represent something at a higher scoping * layer. For example, a nested procedure accessing a variable in * the parent procedure or a method procedure in a class accessing an * element of the object. * * It is also possible for the current execution scoping layer (sg) * to have a secondary contextual layer from which global constants * can be accessed. This is typically set when resolving procedure * arguments for procedures called through objects or types. Only * type globals can be accesed via this shortcut. * * This returns an LVALUE if the id represents storage. */ { runeid_t ary[2]; int eno = TOK_ERR_ID_NOT_FOUND; exDecl = NULL; /* * Special case 'super'. XXX TY_REFTO * * Make an in-place change to the expression structure. 'super' * is actually 'this' with the EXF_SUPER flag set. */ if (exId == RUNEID_SUPER) { exId = RUNEID_THIS; exFlags |= EXF_SUPER; } ary[0] = exp->ex_Id; ary[1] = 0; exDecl = FindDeclPath(&exp->ex_LexRef, isg, sg, NULL, ary, FDC_NULL, &exVisibility, -1, &eno); if (exDecl == NULL) { exDecl = FindDeclPathAltContext(&exp->ex_LexRef, isg, sg, NULL, ary, FDC_NULL, &exVisibility, -1, &eno); } if (exDecl == NULL) { dfatal_exp(exp, eno, NULL); } /* * The EXF flag is set by TOK_ADDR, possibly propagated down via * TOK_DOT. Use this to flag that the stack context might be * used outside of its normal life. LValue scoped declarations * do not count because they have their own RefStor. * * (This code is primarily responsible for causing SRSGET and * SRSPUT instructions to be emitted). */ if ((exFlags & EXF_ADDRUSED) && (exDecl->d_Scope.s_Flags & SCOPE_LVALUE) == 0) { exDecl->d_MyGroup->sg_Flags |= SGF_ADDRUSED; } /* * We have to resolve the declaration here, we no longer have the * redundancy to resolve it elsewhere. */ if ((exDecl->d_Flags & DF_RESOLVING) == 0) ResolveDecl(exDecl, 0); } switch (exDecl->d_Op) { case DOP_ARGS_STORAGE: if (sg->sg_Flags & SGF_DIDRESULT) dfatal_exp(exp, TOK_ERR_RESULT_SEQUENCING, NULL); /* fall through */ case DOP_STACK_STORAGE: case DOP_GLOBAL_STORAGE: case DOP_GROUP_STORAGE: /* * Storage identifiers are lvalues. * * Try to delay this step, giving the language more flexibility * in avoiding resolver loops from interdependencies that can * cause it to fail. * * We can't delay this step when resolving an expression that the * resolver needs an actual constant result for. */ //exType = ADD_LVALUE(exDecl->d_StorDecl.ed_Type); exType = ResolveTypeSimple(exDecl->d_StorDecl.ed_Type); exFlags2 |= EX2F_LVALUE; if (exFlags & EXF_ADDRUSED) exDecl->d_Flags |= DF_ADDRUSED; if (exFlags & EXF_REQ_ADDROF) exDecl->d_Flags |= DF_ADDROF; if (exType->ty_SQFlags & SF_CONST) couldconst = 1; break; case DOP_ALIAS: /* * Aliases are rvalues (even if they could be lvalues). * XXX actually allow them to be lvalues too. */ exType = ResolveTypeSimple(exDecl->d_AliasDecl.ed_Type); exFlags |= EXF_ALIAS | EXF_UNARY; /* * NOTE: exLhs must be NULL if exp is unresolved. exp tree * duplications do not duplicate the alias's exLHS even though * UNARY is set. However, because we probably have not actually * duplicated exp yet, we have to clear the field in our pre-dup * copy. * * NOTE: DupExp is absolutely required due to the alias's target * context being different for each consumer. */ if (exFlags & EXF_DUPEXP) exLhs = NULL; dassert_exp(exp, exLhs == NULL); exLhs = DupExp(sg, exDecl->d_AliasDecl.ed_OrigAssExp); exLhs = ResolveExp(isg, sg, exLhs, exType, flags | RESOLVE_AUTOCAST); /* * Inherit EXF_NULL (NULL pointer special) through the alias, * otherwise it will not be assignable to arbitrary pointers. */ exFlags |= exLhs->ex_Flags & EXF_NULL; exFlags2 |= exLhs->ex_Flags2 & EX2F_LVALUE; break; case DOP_PROC: /* * A procedural identifier. * * Note: procedural pointers cannot be changed so they are not * lvalues. */ dassert_exp(exp, (exFlags & EXF_REQ_PROC)); exType = exDecl->d_ProcDecl.ed_Type; if (exDecl->d_ScopeFlags & SCOPE_PURE) couldconst = 1; break; case DOP_TYPEDEF: if (exFlags & EXF_REQ_TYPE) { exType = exDecl->d_TypedefDecl.ed_Type; exFlags |= EXF_RET_TYPE; break; } dassert_exp(exp, 0); break; case DOP_CLASS: if (exFlags & EXF_REQ_TYPE) { exType = exDecl->d_ClassDecl.ed_SemGroup->sg_ClassType; exType = TypeToVisibilityType(exType, exVisibility); exFlags |= EXF_RET_TYPE; break; } dassert_exp(exp, 0); break; case DOP_IMPORT: if (exFlags & EXF_REQ_TYPE) { exType = AllocImportType( &exDecl->d_ImportDecl.ed_SemGroup->sg_ClassList, exDecl->d_ImportDecl.ed_SemGroup, exVisibility); exFlags |= EXF_RET_TYPE; break; } dassert_exp(exp, 0); break; default: dassert_exp(exp, 0); } break; case TOK_NOT: /* * NOTE: BoolType global implies an rvalue. */ couldconst = 1; exLhs = ResolveExp(isg, sg, exLhs, &BoolType, flags | RESOLVE_AUTOCAST); break; case TOK_TYPE: if (exFlags & EXF_REQ_TYPE) { ResolveType(exType, NULL, 0); exFlags |= EXF_RET_TYPE; } else { dassert_exp(exp, 0); } break; case TOK_CAST: /* * User cast (or maybe the parser inserted it). Try to resolve the * expression with the requested type hint but tell ResolveExp() not * to force the cast. * * Then check the result. If ResolveExp() was not able to optimize * the requested cast then resolve the cast. * * If the types are compatible we still keep the TOK_CAST node in * place for the moment. XXX we really need to formalized how * ex_Type is set Similar vs Exact. * * NOTE: Cast results are always an RVALUE. XXX validate here. */ couldconst = 1; if ((exFlags & EXF_PARSE_TYPE) == 0) { exRhs->ex_Flags |= EXF_REQ_TYPE; exRhs = ResolveExp(isg, sg, exRhs, NULL, flags & ~RESOLVE_AUTOCAST); exType = exRhs->ex_Type; } exLhs = ResolveExp(isg, sg, exLhs, exType, flags & ~RESOLVE_AUTOCAST); if (SimilarType(exType, exLhs->ex_Type) == 0) { exp = resolveExpCast(isg, sg, exLhs, exType, flags); } #if 0 /* propagate NULL flag to allow cast to any pointer type */ if (exLhs->ex_Flags & EXF_NULL) printf("LHS NULL\n"); exp->ex_Flags |= exLhs->ex_Flags & EXF_NULL; #endif break; case TOK_CALL: /* * Calls require the RHS to be a compound expression representing the * procedure arguments. METHOD calls insert the lhs as the first rhs * argument by creating a placeholder which is then properly cast * as part of the compound-argument resolver, and dealt with at * run-time. * * XXX deal with pointer-to-function verses function XXX the lhs must * at the moment resolve to the procedure itself. * * In regards to procedure pointers, the declaration will require a * pointer to the procedure's statement body. XXX this pointer can * be the physical storage associated with the lhs data but thus * requires the type to be a pointer. We do not support the 'C' * (*ptr_to_func)(...) form. You have to use ptr_to_func(...). */ { Type *ltype; Type *atype; /* type for alt context */ SemGroup *save_asg; /* save old alt context */ dassert_exp(exRhs, exRhs->ex_Token == TOK_COMPOUND); /* * Note: ex_Flags hints must 'always happen' since we may be * modifying an expression that will later be Dup'd. */ exLhs->ex_Flags |= EXF_REQ_PROC; exLhs->ex_Flags |= EXF_ADDRUSED; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); ltype = exLhs->ex_Type; dassert_exp(exLhs, ltype != NULL && ltype->ty_Op == TY_PROC); dassert_exp(exLhs, exLhs->ex_Decl != NULL); dassert_exp(exRhs, exRhs->ex_Token == TOK_COMPOUND); /* * If the lhs type indicates a method procedure, then it's lhs * is the object we wish to pass as the first argument to the * method. We dup the lhs exp. For a STRIND TY_PTRTO * method call we indirect the element and convert it to a * TOK_DOT lvalue argument of the underlying object. * * A method call via a reference object is a very weird case. * * Since the method called through an object winds up being a * method tailored for that object, and we are calling through a * reference to an object, the actual method will be looked up at * run time and will match the object. Thus we can safely * indirect through the reference object for this one case. Since * (*ref_obj) is not normally allowed this will be special-cased * at compile-time or run-time. * * Note that this occurs before we evaluate the compound * expression on the right hand side. Also note that since the * resolver can be called multiple times on a shared expression, * we have to be careful to shift the arguments around only once. */ if ((ltype->ty_SQFlags & SF_METHOD) && (exRhs->ex_Flags & EXF_CALL_CONV) == 0) { Exp *lhs; Exp *nexp; lhs = exLhs->ex_Lhs; exRhs->ex_Flags |= EXF_CALL_CONV; nexp = AllocExp(NULL); nexp->ex_Token = TOK_THISARG; nexp->ex_Type = lhs->ex_Type; nexp->ex_Flags |= EXF_PARSE_TYPE; nexp->ex_Flags2 |= EX2F_LVALUE; LexDupRef(&lhs->ex_LexRef, &nexp->ex_LexRef); switch (exLhs->ex_Token) { case TOK_STRIND: /* indirect */ /* * Calling through a ref or pointer * * call * / \ * STRIND ARGS * / \ * blah e.g.func_id (resolved) * * * * NOTE: Do not set EXF_RESOLVED, we need to call the * resolver to properly propagate ADDRUSED. */ break; case TOK_DOT: /* * Calling via '.', e.g. stdin->efd.importdesc(). * Take the address of stdin->efd, which will give * us a pointer rather than a reference. It is not * possible to obtain a reference from an embedded type. * This will trigger resolution of the pointer *this * of the method rather than the @this version. * * If this is a pointer or reference, it will match the * built-in methods for PointerType and ReferenceType. * * Pass directly as an lvalue. If this is a pointer or * reference only the builtin methods for the Pointer * or Reference class are possible. These methods * require a content-locked reference. */ if (lhs->ex_Type->ty_Op == TY_CLASS) { Exp *ntmp; ntmp = AllocExp(NULL); ntmp->ex_Lhs = nexp; ntmp->ex_Token = TOK_ADDR; ntmp->ex_Type = TypeToRawPtrType(lhs->ex_Type); ntmp->ex_Flags |= EXF_UNARY | EXF_PARSE_TYPE; nexp = ntmp; } break; default: dassert_exp(exp, 0); lhs = NULL; break; } /* * Make sure atype survives DupExp(). */ //lhs->ex_Flags |= EXF_PARSE_TYPE; atype = lhs->ex_Type; /* * Insert */ //lhs = DupExp(sg, lhs); //lhs->ex_Next = exRhs->ex_Lhs; //exRhs->ex_Lhs = lhs; nexp->ex_Next = exRhs->ex_Lhs; nexp->ex_Flags |= EXF_PARSE_TYPE; exRhs->ex_Lhs = nexp; } else if (ltype->ty_SQFlags & SF_METHOD) { atype = exRhs->ex_Lhs->ex_Type; } else { atype = NULL; } /* * Try to set an alternative search context during resolution of * the procedure arguments. This context is only searched if an * identifier cannot be found through normal means so local * variables and such will override it as the programmer should * expect. Since the local semantic stack is under the * programmer's control, unexpected collisions should either not * occur or be easily fixed. */ if (atype) { switch (atype->ty_Op) { case TY_REFTO: atype = atype->ty_RefType.et_Type; break; case TY_PTRTO: atype = atype->ty_RawPtrType.et_Type; break; } if (atype->ty_Op != TY_CLASS) atype = NULL; } if (atype) { save_asg = sg->sg_AltContext; sg->sg_AltContext = atype->ty_ClassType.et_SemGroup; } else { save_asg = NULL; } /* * Resolve the right hand side, which are the procedure arguments * as a compound type. This can get tricky. XXX * * NOTE: We inherit the SF_LVALUE flag from the return type. * Parent might turn it off. */ /* d = exLhs->ex_Decl; */ exRhs = ResolveExp(isg, sg, exRhs, ltype->ty_ProcType.et_ArgsType, flags | RESOLVE_AUTOCAST); exType = ltype->ty_ProcType.et_RetType; if (atype) { /* * Restore AltContext after resolving rhs. */ sg->sg_AltContext = save_asg; } else if ((exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) && (exLhs->ex_Decl->d_ScopeFlags & SCOPE_PURE)) { /* * atype NULL (not method call, which requires an object), * arguments can become constants, pure function, so result * can become a constant. */ exFlags |= EXF_PROBCONST; } /* * Additional work to inline the procedure */ resolveDynamicProcedure(isg, sg, exp, flags); resolveProcedureInline(isg, sg, exp, flags); } break; case TOK_INLINE_CALL: /* * An inlined call has already resolved via TOK_CALL. It will not be * a constant, and any argument modifications have already been * performed. */ { Type *ltype; Declaration *d; Type *atype; /* type for alt context */ SemGroup *save_asg; /* save old alt context */ exLhs->ex_Flags |= EXF_REQ_PROC; exLhs->ex_Flags |= EXF_ADDRUSED; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); d = exLhs->ex_Decl; ltype = exLhs->ex_Type; dassert(ltype); /* * Try to set an alternative search context during resolution of * the procedure arguments. This context is only searched if an * identifier cannot be found through normal means so local * variables and such will override it as the programmer should * expect. Since the local semantic stack is under the * programmer's control, unexpected collisions should either not * occur or be easily fixed. */ if (ltype->ty_SQFlags & SF_METHOD) { Exp *rhs; rhs = exRhs->ex_Lhs; atype = rhs->ex_Type; } else { atype = NULL; } if (atype) { switch (atype->ty_Op) { case TY_REFTO: atype = atype->ty_RefType.et_Type; break; case TY_PTRTO: atype = atype->ty_RawPtrType.et_Type; break; } if (atype->ty_Op != TY_CLASS) atype = NULL; } if (atype) { save_asg = sg->sg_AltContext; sg->sg_AltContext = atype->ty_ClassType.et_SemGroup; } else { save_asg = NULL; } exRhs = ResolveExp(isg, sg, exRhs, ltype->ty_ProcType.et_ArgsType, flags | RESOLVE_AUTOCAST); if (atype) { sg->sg_AltContext = save_asg; } exType = ltype->ty_ProcType.et_RetType; ResolveStmt(d->d_ImportSemGroup, exp->ex_AuxStmt, flags); } break; case TOK_COMPOUND: /* * (NOTE EARLY RETURN) * * A compound expression should always be an RVALUE, but might * contain LVALUEs (XXX). */ couldconst = 1; exp = resolveCompoundExp(isg, sg, exp, itype, flags); return (exp); /* not reached */ case TOK_BRACKETED: /* * (NOTE EARLY RETURN) */ couldconst = 1; exp = resolveBracketedExp(isg, sg, exp, itype, flags); return (exp); /* not reached */ case TOK_TYPEOF: /* * The caller must be able to handle a type return when typeof() is * used. */ dassert_exp(exp, exFlags & EXF_REQ_TYPE); /* fall through */ case TOK_SIZEOF: case TOK_ARYSIZE: /* * If an expression was supplied, convert it to a type. * * NOTE: ex_Flags hints must 'always happen' since we may be * modifying an expression that will later be Dup'd. */ couldconst = 1; if ((exFlags & EXF_RET_TYPE) == 0) { dassert(exLhs != NULL); exLhs->ex_Flags |= EXF_REQ_TYPE; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); exType = exLhs->ex_Type; #if 1 /* do not clear EXF_UNARY, messes up tmp exp storage */ /* exFlags &= ~EXF_UNARY; */ #endif exFlags |= EXF_RET_TYPE; /* XXX delete the lhs */ } else { ResolveType(exType, NULL, 0); } /* * Create appropriate integer constants for sizeof() and * arysize(). */ switch (exToken) { case TOK_SIZEOF: exp->ex_Token = TOK_INTEGER; exp->ex_Tmp.ts_USize = exType->ty_Bytes; exType = &USizeType; exFlags &= ~EXF_RET_TYPE; exFlags |= EXF_CONST; break; case TOK_ARYSIZE: dassert_exp(exp, (exType->ty_Flags & TF_RESOLVING) == 0); dassert_exp(exp, exType->ty_Op == TY_ARYOF); if (exType->ty_AryType.et_Type->ty_Bytes) { exp->ex_Tmp.ts_USize = exType->ty_Bytes / exType->ty_AryType.et_Type->ty_Bytes; } else { exp->ex_Tmp.ts_USize = 0; } exp->ex_Token = TOK_INTEGER; exType = &USizeType; exFlags &= ~EXF_RET_TYPE; exFlags |= EXF_CONST; /* exLhs = NULL; */ break; case TOK_TYPEOF: /* type is returned */ break; } break; case TOK_THISARG: break; default: dassert_exp(exp, 0); break; } /* * Ensure that the cast target type is resolved. */ if (exType) { ResolveType(exType, NULL, 0); /* XXX exType was ex_Type */ /* * If the type hint did not succeed we may have to cast the * expression to the requested type. Note that if the itype was set * as part of an array optimization request which could not be * handled, we must ignore itype. * * Note that SimilarType() will allow exp->ex_Type to be a var-args * TY_ARGS, and since the original Rhs of a call is set to the * procedure arguments type, VarType.et_Type should match exactly. */ if (itype && (exFlags & (EXF_REQ_ARRAY | EXF_RET_ARRAY)) != EXF_REQ_ARRAY) { if ((itype->ty_Flags & TF_RESOLVED) == 0) ResolveType(itype, NULL, 0); // if ((itype->ty_SQFlags & SF_LVALUE) && // (exType->ty_SQFlags & SF_LVALUE) == 0) if ((itype->ty_SQFlags & SF_LVALUE) && (exFlags2 & EX2F_LVALUE) == 0) { /* XXX */ fprintf(stderr, "Exp must be an lvalue here\n"); dassert_exp(exp, 0); } if (!SimilarType(itype, exType) && (flags & RESOLVE_AUTOCAST)) { if (exp->ex_Flags & EXF_DUPEXP) { Exp *nexp = AllocExp(NULL); nexp->ex_Tmp = exp->ex_Tmp; LexDupRef(&exp->ex_LexRef, &nexp->ex_LexRef); exp = nexp; exFlags &= ~EXF_DUPEXP; /* exp = DupExp(sg, exp); */ } exFlags |= EXF_RESOLVED; exp = resolveExpCast(isg, sg, exp, itype, flags); } } } /* * Generic constant evaluation flag. Note that EXF_PROBCONST could also * be set above (TOK_CALL). */ if (couldconst && (exLhs == NULL || (exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST))) && (exRhs == NULL || (exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)))) { exp->ex_Flags |= EXF_PROBCONST; } exp->ex_Flags |= EXF_RESOLVED; return (exp); } /* * Resolve an expression for which the resolver needs the result immediately. */ Exp * resolveConstExp(SemGroup *isg, SemGroup *sg, Exp *exp, int flags) { urunesize_t tmpbytes; urunesize_t tmpalign; srunesize_t ooffset; int rstate; flags &= ~RESOLVE_AUTOCAST; if ((exp->ex_Flags & EXF_RESOLVED) == 0) { exp = ResolveExp(isg, sg, exp, NULL, flags); } if ((exp->ex_Flags & EXF_RESOLVED) == 0) { fprintf(stderr, "early resolve for constant expression failed\n"); LexPrintRef(&exp->ex_LexRef, 0); return exp; } rstate = exp->ex_RState; ooffset = exp->ex_TmpOffset; tmpbytes = 0; tmpalign = 0; resolveExpAlign(exp, &tmpalign, RESOLVE_FINALIZE); resolveExpStorage(exp, RESOLVE_FINALIZE, 0, &tmpbytes); if (tmpbytes < sg->sg_TmpBytes) tmpbytes = sg->sg_TmpBytes; if ((exp->ex_Flags & (EXF_CONST | EXF_PROBCONST)) == 0) { if (flags & RESOLVE_FAILOK) return exp; dfatal_exp(exp, TOK_ERR_EXPECTED_INTEGRER_CONST, NULL); } { /* * Special interpreter execution to resolve the expression. */ RunContext ct; size_t align; rundata_t data; ObjectInfo *info; bzero(&ct, offsetof(RunContext, ct_TmpCtxObjInfo)); ct.ct_Flags |= CTF_RESOLVING; /* * NOTE: minimum alignment for posix_memalign() is sizeof(void *). */ align = sg->sg_TmpAlignMask + 1; if (align < sizeof(void *)) /* posix_memalign requirement */ align = sizeof(void *); if (tmpbytes <= sizeof(ct.ct_TmpCtxObjData) && align <= sizeof(float128_t)) { info = &ct.ct_TmpCtxObjInfo; initObjectInfo(info, &VoidType, RSOP_TMPSPACE); ct.ct_TmpData = (void *)&ct.ct_TmpCtxObjData; } else { if (align < sizeof(float128_t)) align = sizeof(float128_t); info = allocObjectInfo(&VoidType, RSOP_TMPSPACE, tmpbytes, align); ct.ct_TmpData = info->in_Data.od_Base; } ct.ct_CtxObject = info; ct.ct_TmpBytes = tmpbytes; exp->ex_Run(&ct, &data, exp); if ((exp->ex_Flags & EXF_CONST) == 0) { dfatal_exp(exp, TOK_ERR_EXPECTED_INTEGRER_CONST, NULL); } invalObjectInfo(info); } /* * exp is now a constant, restore the original ex_TmpOffset for normal * execution/operation (the storage may be needed for large constants). */ if (rstate & RSF_STORAGE) { exp->ex_TmpOffset = ooffset; /* resolveExpStorage(exp, &tmpbytes); */ } else { exp->ex_TmpOffset = -1; exp->ex_RState &= ~(RSF_STORAGE | RSF_SUB_STORAGE); } resolveExpAlign(exp, &tmpalign, RESOLVE_CLEAN | RESOLVE_FINALIZE); return exp; } __unused Exp * resolveConstExpBool(SemGroup *isg, SemGroup *sg, Exp *exp, int flags, TmpData *ts) { urunesize_t tmpbytes; urunesize_t tmpalign; srunesize_t ooffset; int rstate; flags &= ~RESOLVE_AUTOCAST; if ((exp->ex_Flags & EXF_RESOLVED) == 0) { exp = ResolveExp(isg, sg, exp, NULL, RESOLVE_FINALIZE); } /* * [re]-resolve the storage from 0 so we can execute the expression. */ rstate = exp->ex_RState; ooffset = exp->ex_TmpOffset; tmpbytes = 0; tmpalign = 0; resolveExpAlign(exp, &tmpalign, RESOLVE_FINALIZE); resolveExpStorage(exp, RESOLVE_FINALIZE, 0, &tmpbytes); if (tmpbytes < sg->sg_TmpBytes) tmpbytes = sg->sg_TmpBytes; if ((exp->ex_Flags & (EXF_CONST | EXF_PROBCONST)) == 0) { dfatal_exp(exp, TOK_ERR_EXPECTED_INTEGRER_CONST, NULL); } { /* * Special interpreter execution to resolve the expression. */ RunContext ct; TmpData *rts; rundata_t data; ObjectInfo *info; bzero(&ct, offsetof(RunContext, ct_TmpCtxObjInfo)); ct.ct_Flags |= CTF_RESOLVING; /* * NOTE: minimum alignment for posix_memalign() is sizeof(void *). */ if (tmpbytes <= sizeof(ct.ct_TmpCtxObjData) && tmpalign <= sizeof(float128_t)) { info = &ct.ct_TmpCtxObjInfo; ct.ct_TmpData = (void *)&ct.ct_TmpCtxObjData; initObjectInfo(info, &VoidType, RSOP_TMPSPACE); } else { if (tmpalign < sizeof(float128_t)) tmpalign = sizeof(float128_t); info = allocObjectInfo(&VoidType, RSOP_TMPSPACE, tmpbytes, tmpalign); ct.ct_TmpData = (void *)info->in_Data.od_Base; } ct.ct_CtxObject = info; ct.ct_TmpBytes = tmpbytes; exp->ex_Run(&ct, &data, exp); rts = data.data; if ((exp->ex_Flags & EXF_CONST) == 0) { dfatal_exp(exp, TOK_ERR_EXPECTED_INTEGRER_CONST, NULL); } ts->ts_Bool = rts->ts_Bool; invalObjectInfo(info); } /* * exp is now a constant, restore the original ex_TmpOffset for normal * execution/operation (the storage may be needed for large constants). */ if (rstate & RSF_STORAGE) { exp->ex_TmpOffset = ooffset; tmpbytes = 0; resolveExpStorage(exp, RESOLVE_FINALIZE, exp->ex_TmpOffset, &tmpbytes); } else { exp->ex_TmpOffset = -1; exp->ex_RState &= ~(RSF_STORAGE | RSF_SUB_STORAGE); } resolveExpAlign(exp, &tmpalign, RESOLVE_CLEAN | RESOLVE_FINALIZE); return exp; } /* * Extract constant from already-constant-resolved expression. * resolveConstExp() must have previously been called on exp. * * Expression must have already been constant-optimized, meaning that we * should be able to execute it without a context to access the cached * results in exp->u. * * (This can also be called by the generator) */ int64_t resolveGetConstExpInt64(Exp *exp) { rundata_t data; int64_t value; dassert_exp(exp, (exp->ex_Flags & EXF_CONST)); exp->ex_Run(NULL, &data, exp); if (exp->ex_Type->ty_Flags & TF_ISUNSIGNED) { switch (exp->ex_Type->ty_Bytes) { case 1: value = *(uint8_t *) data.data; break; case 2: value = *(uint16_t *) data.data; break; case 4: value = *(uint32_t *) data.data; break; case 8: value = *(uint64_t *) data.data; break; default: value = 0; dassert_exp(exp, 0); break; } } else { switch (exp->ex_Type->ty_Bytes) { case 1: value = *(int8_t *) data.data; break; case 2: value = *(int16_t *) data.data; break; case 4: value = *(int32_t *) data.data; break; case 8: value = *(int64_t *) data.data; break; default: value = 0; dassert_exp(exp, 0); break; } } return value; } float128_t resolveGetConstExpFloat128(Exp *exp) { rundata_t data; float128_t value; dassert_exp(exp, exp->ex_Token == TOK_FLOAT || (exp->ex_Flags & EXF_CONST)); exp->ex_Run(NULL, &data, exp); switch (exp->ex_Type->ty_Bytes) { case 4: value = (float128_t) *(float32_t *) data.data; break; case 8: value = (float128_t) *(float64_t *) data.data; break; case 16: value = *(float128_t *) data.data; break; default: value = 0; dassert_exp(exp, 0); break; } return value; } /* * resolveCompoundExp() - resolve a compound expression (called from * ResolveExp() and resolveExpOper()). * * Resolve a compound expression. Compound expressions require a compound * type to normalize against. This will work for direct assignments, return * values, casts, and procedure arguments only. * * NOTE: We can't use itype if EXF_REQ_ARRAY is specified because its hinting * for the array optimization case, which we cannot do. * * Compound expressions may be used in conjuction with types representing * classes, compound types, and procedure arguments. The compound expression * may contain subclasses of the superclasses expected by itype. This is * only allowed if the procedure's body has not yet been generated (for * example, a method call in a subclass). * * Partially resolved operators are typically converted into procedure calls * and method calls are also partially resolved, so some elements may already * be resolved. * * XXX named initialization, missing elements (structural initialization), * and so forth needs to be dealt with. */ Exp * resolveCompoundExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { Exp **pscan; Exp *scan; Declaration *d; SemGroup *sg2; int varargs = 0; int isconst = 1; Type *type; Type *stype; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ /* * Expression dup()ing */ if (exp->ex_Flags & EXF_DUPEXP) { #if DUPEXP_DEBUG static int count; fprintf(stderr, "DUPEXPC %d\n", ++count); #endif exp = DupExp(sg, exp); } if (itype && (exp->ex_Flags & EXF_REQ_ARRAY) == 0) exp->ex_Type = itype; /* * If we don't have a SemGroup to normalize against, XXX how should we * normalize the compound expression? */ if (exp->ex_Type == NULL) { dassert_exp(exp, 0); } /* * Normalize the compound expression based on the argument types expected * by the procedure. We have to resolve the type before we start the * scan in order to ensure that d_Offset is properly assigned. * * Use the declarations found in the compound type semantic group to * coerce the procedure arguments to generate the correct compound type. * Note that ResolveExp() recursion must still use the SemGroup that was * passed to us. * * XXX deal with defaults and pre-resolved arguments. XXX */ type = ResolveType(exp->ex_Type, NULL, 0); switch (type->ty_Op) { case TY_ARGS: sg2 = type->ty_ArgsType.et_SemGroup; break; case TY_VAR: sg2 = type->ty_VarType.et_SemGroup; break; case TY_COMPOUND: sg2 = type->ty_CompType.et_SemGroup; break; case TY_CLASS: sg2 = type->ty_ClassType.et_SemGroup; break; default: dassert_exp(exp, 0); sg2 = NULL; /* NOT REACHED */ break; } pscan = &exp->ex_Lhs; /* * Scan the compound expression and match it up against the compound * type. */ d = RUNE_FIRST(&sg2->sg_DeclList); while ((scan = *pscan) != NULL) { if (scan->ex_ArgId) { /* * Named argument, find it * * (Overloading not allowed) */ int eno = TOK_ERR_ID_NOT_FOUND; Declaration *nd; nd = FindDeclId(sg2, scan->ex_ArgId, &eno); if (nd == NULL) { dfatal_exp(scan, eno, NULL); /* NOT REACHED */ } /* * XXX for now, punt on setting EXF_PROBCONST if the named * argument skips a declaration. */ if (nd != d && (d == NULL || nd != RUNE_NEXT(d, d_Node))) { isconst = 0; } d = nd; } else { /* * Unnamed argument, run through sequentially. Skip any * non-storage or global storage. */ while (d && d->d_Op != DOP_ARGS_STORAGE && d->d_Op != DOP_STACK_STORAGE && d->d_Op != DOP_GROUP_STORAGE) { d = RUNE_NEXT(d, d_Node); } /* * Ran out of storage declarations. If this is a var-args * SemGroup then we actually create a new SemGroup (and * eventually a new type) to represent it. * * We then extend the varargs SemGroup. This isn't pretty. */ if (d == NULL) { if (varargs == 0 && (sg2->sg_Flags & SGF_VARARGS)) { sg2 = DupSemGroup(sg2->sg_Parent, NULL, sg2, 1); varargs = 1; } if (varargs == 0) { fprintf(stderr, "Too many arguments in " "expression\n"); dassert_exp(scan, 0); } } } /* * Unlink the expression from the compound list temporarily so we can * safely resolve it. Either cast the expression to the compound * element, or create a compound element (e.g. varargs call) to match * the expression. * * Due to the resolver moving things around, the elements of a * compound expression are sometimes resolved multiple times. */ *pscan = scan->ex_Next; scan->ex_Next = NULL; stype = scan->ex_Type; if (d) { /* * Compound declaration (e.g. argument decl) for the argument * we are stuffing the expression into. */ Type *dtype = d->d_StorDecl.ed_Type; int sflags; /* * Do not cast ptr to lvalue-void-ptr or ref to lvalue-void-ref, * the lvalue needs to have the original ptr or ref type. */ if ((dtype->ty_SQFlags & SF_LVALUE) && stype->ty_Op == TY_PTRTO /*&& SimilarType(dtype, &VoidPtrType)*/) { dtype = NULL; sflags = flags & ~RESOLVE_AUTOCAST; } else if ((dtype->ty_SQFlags & SF_LVALUE) && stype->ty_Op == TY_REFTO /*&& SimilarType(dtype, &VoidRefType)*/) { dtype = NULL; sflags = flags & ~RESOLVE_AUTOCAST; } else { sflags = flags | RESOLVE_AUTOCAST; } /* * LValueStor's need an address, set ADDRUSED. */ if (d->d_ScopeFlags & SCOPE_LVALUE) scan->ex_Flags |= EXF_ADDRUSED; if ((scan->ex_Flags & EXF_RESOLVED) == 0) { scan = ResolveExp(isg, sg, scan, dtype, sflags); } else if (dtype) { /* * Cast the argument (scan) to the expected (dtype). * * Since we have already resolved the expression we need to * do the same sanity checking that it would do to cast. * * NOTE! Do NOT insert a cast when the target type is * lvalue void * or lvalue void @. Otherwise the * lv_Type loaded into the LValueStor will be incorrect * for operations, e.g. stdin.new() */ //dassert_exp(scan, (dtype->ty_SQFlags & SF_LVALUE) == 0 || // (scan->ex_Type->ty_SQFlags & SF_LVALUE)); dassert_exp(scan, (dtype->ty_SQFlags & SF_LVALUE) == 0 || (scan->ex_Flags2 & EX2F_LVALUE)); if (!SimilarType(dtype, scan->ex_Type)) { /* * We need a cast */ scan = resolveExpCast(isg, sg, scan, dtype, flags); } } } else { /* * var-arg. Use the same type but do not pass as an lvalue * or as constant storage (we are copying). Constant-storage * would also trip-up later checks. */ Scope tscope = INIT_SCOPE(0); if ((scan->ex_Flags & EXF_RESOLVED) == 0) { scan = ResolveExp(isg, sg, scan, NULL, flags & ~RESOLVE_AUTOCAST); } dassert(varargs != 0); d = AllocDeclaration(sg2, DOP_ARGS_STORAGE, &tscope); d->d_StorDecl.ed_Type = DEL_LVALUE_CONST(scan->ex_Type); ++sg2->sg_VarCount; d->d_Bytes = scan->ex_Type->ty_Bytes; d->d_AlignMask = scan->ex_Type->ty_AlignMask; /* * __align(%d) scope qualifier, override the type's alignment */ if ((d->d_Scope.s_Flags & SCOPE_ALIGN) && d->d_Scope.s_AlignOverride) { d->d_AlignMask = d->d_Scope.s_AlignOverride - 1; } d->d_Offset = sg2->sg_Bytes; } /* * Relink and check if constant */ scan->ex_Next = *pscan; *pscan = scan; if ((scan->ex_Flags & (EXF_CONST | EXF_PROBCONST)) == 0) isconst = 0; stype = scan->ex_Type; /* * If the declaration requires an LVALUE, assert that we have an * lvalue. Otherwise set the direct-store request (also see * InterpCompoundExp). */ if (d->d_ScopeFlags & SCOPE_LVALUE) { //if ((stype->ty_SQFlags & SF_LVALUE) == 0) if ((scan->ex_Flags2 & EX2F_LVALUE) == 0) fprintf(stderr, "argument must be an lvalue\n"); dassert_exp(scan, (scan->ex_Flags2 & EX2F_LVALUE)); } /* * Catch a programmer's mistake, passing an argument as constant * storage. An argument is not contant storage. */ if (type->ty_Op == TY_ARGS && (d->d_Op & DOPF_STORAGE)) { if (d->d_StorDecl.ed_Type->ty_SQFlags & SF_CONST) { dfatal_decl(d, TOK_ERR_READONLY_ARG, NULL); } } /* * accounting */ d = RUNE_NEXT(d, d_Node); pscan = &scan->ex_Next; } /* * Make sure the caller knows its a var-args function even if we didn't * supply any additional args. Otherwise the backend may not generate * the correct form for calls to the target. */ if (varargs == 0 && (sg2->sg_Flags & SGF_VARARGS)) { sg2 = DupSemGroup(sg2->sg_Parent, NULL, sg2, 1); varargs = 1; } /* * Resolve the varargs sg2 after building it. */ if (varargs) { ResolveSemGroup(sg2, 0); } /* * If we made a var-args call, adjust the expression's type */ if (varargs) { dassert(type->ty_Op == TY_ARGS); exp->ex_Type = ResolveType(TypeToVarType(type, sg2), NULL, 0); } if (isconst) exp->ex_Flags |= EXF_PROBCONST; exp->ex_Flags |= EXF_RESOLVED; return (exp); } /* * resolveBracketedExp() - resolve a bracketed expression. * * Resolve a bracketed expression. Bracketed expressions require an array * type to normalize against. * * The bracketed expressions may contain subclasses of the superclasses * expected by itype. */ Exp * resolveBracketedExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { Exp **pscan; Exp *scan; int isconst = 1; Type *type; //Type *stype; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ /* * Expression dup()ing */ if (exp->ex_Flags & EXF_DUPEXP) { #if DUPEXP_DEBUG static int count; fprintf(stderr, "DUPEXPC %d\n", ++count); #endif exp = DupExp(sg, exp); } /* * Expression type is the hinted type. */ if (itype && (exp->ex_Flags & EXF_REQ_ARRAY) == 0) exp->ex_Type = itype; /* * We need a type to normalize against. */ if (exp->ex_Type == NULL) { dassert_exp(exp, 0); /* NOT REACHED */ } /* * Normalize the bracketed expression based on the array type. We have * to resolve the type before we start the scan in order to ensure that * d_Offset is properly assigned. */ type = ResolveType(exp->ex_Type, NULL, 0); if (type->ty_Op != TY_ARYOF) { dassert_exp(exp, 0); /* NOT REACHED */ } type = type->ty_AryType.et_Type; /* element type */ /* * Scan the bracketed expression and match each element against the * element type. */ pscan = &exp->ex_Lhs; while ((scan = *pscan) != NULL) { Type *dtype; int sflags; /* * Unlink the expression from the compound list temporarily so we can * safely resolve it. Either cast the expression to the compound * element, or create a compound element (e.g. varargs call) to match * the expression. * * Due to the resolver moving things around, the elements of a * compound expression are sometimes resolved multiple times. */ *pscan = scan->ex_Next; scan->ex_Next = NULL; dtype = type; /* * HACK! XXX YYY */ if ((SimilarType(dtype, &PointerType) || SimilarType(dtype, &ReferenceType)) && (dtype->ty_SQFlags & SF_LVALUE) == SF_LVALUE) { dtype = NULL; sflags = flags & ~RESOLVE_AUTOCAST; } else { sflags = flags | RESOLVE_AUTOCAST; } /* * LValueStor needs a RS, set ADDRUSED to make sure its available to * the generator. */ if (dtype->ty_SQFlags & SF_LVALUE) scan->ex_Flags |= EXF_ADDRUSED; if ((scan->ex_Flags & EXF_RESOLVED) == 0) { scan = ResolveExp(isg, sg, scan, dtype, sflags); } else { /* * Since we have already resolved the expression we need to do * the same sanity checking that it would do to cast. */ #if 0 dassert_exp(scan, (dtype->ty_SQFlags & SF_LVALUE) == 0 || (scan->ex_Type->ty_SQFlags & SF_LVALUE)); #endif dassert_exp(scan, (dtype->ty_SQFlags & SF_LVALUE) == 0 || (scan->ex_Flags2 & EX2F_LVALUE)); if (!SimilarType(dtype, scan->ex_Type)) { scan = resolveExpCast(isg, sg, scan, dtype, flags); } } /* * Relink and check if constant */ scan->ex_Next = *pscan; *pscan = scan; if ((scan->ex_Flags & (EXF_CONST | EXF_PROBCONST)) == 0) isconst = 0; //stype = scan->ex_Type; /* * If the declaration requires an LVALUE, assert that we have an * lvalue. Otherwise set the direct-store request (also see * InterpCompoundExp). */ if (dtype->ty_SQFlags & SF_LVALUE) { //if ((stype->ty_SQFlags & SF_LVALUE) == 0) if ((scan->ex_Flags2 & EX2F_LVALUE) == 0) fprintf(stderr, "argument must be an lvalue\n"); dassert_exp (scan, (scan->ex_Flags2 & EX2F_LVALUE)); } pscan = &scan->ex_Next; } if (isconst) exp->ex_Flags |= EXF_PROBCONST; exp->ex_Flags |= EXF_RESOLVED; return (exp); } /* * resolveExpCast() - Cast the expression to the specified type and return * the cast expression. * * Note that expression nodes depend on their ex_Type being correct, and also * expressions may be shared, so be careful not to modify the ex_Type (or * anything else) in the existing expression. * * This code is somewhat different then resolveExpOper() and friends. The Exp * argument has already been resolved so do not resolve it again. * * As with operators we have to locate the cast declaration matching the cast * we want to do. */ static Exp * resolveExpCast(SemGroup *isg, SemGroup *sg, Exp *exp, Type *ltype, int flags) { Type *rtype; Declaration *d; int didagain = 0; int oflags = flags; flags &= ~RESOLVE_AUTOCAST; again: rtype = exp->ex_Type; dassert(rtype && ltype); /* * XXX attempt to cast from subclass to superclass? */ /* * XXX look in our local semantic hierarchy for a compatible cast ? */ dassert(ltype->ty_Op != TY_UNRESOLVED); dassert(rtype->ty_Op != TY_UNRESOLVED); /* * Look in the right hand (source) type for the cast */ d = findCast(rtype, ltype, rtype, flags); /* * If that fails then look in the left hand (destination) type for the * cast. */ if (d == NULL) { d = findCast(ltype, ltype, rtype, flags); } /* * Look for pointer or reference type casts */ if (d == NULL && rtype->ty_Op == TY_PTRTO) { d = findCast(&PointerType, ltype, rtype, flags); } if (d == NULL && rtype->ty_Op == TY_REFTO) { d = findCast(&ReferenceType, ltype, rtype, flags); } if (d == NULL) { /* * We could not find a specific cast operator. There are some * inherent casts that we can do. We run through these in attempt to * come up with matching types. */ if (ltype->ty_Op != rtype->ty_Op && (ltype->ty_Op == TY_PTRTO || ltype->ty_Op == TY_ARYOF) && (rtype->ty_Op == TY_PTRTO || rtype->ty_Op == TY_ARYOF)) { /* * Pointers or arrays can be cast to pointers of the same * type. * * Cast the right hand type to an equivalent * pointer/array * of the right hand type and re-resolve the cast. */ exp = ExpToCastExp(exp, ResolveType(ChangeType(rtype, ltype->ty_Op), NULL, 0)); return (resolveExpCast(isg, sg, exp, ltype, flags)); } else if (MatchType(ltype, rtype) <= SG_COMPAT_PART) { /* * If the types are compatible (casting rtype->ltype), we can * cast trivially. */ exp = ExpToCastExp(exp, ltype); } else if (MatchType(&NumericType, ltype) <= SG_COMPAT_SUBCLASS && MatchType(&NumericType, rtype) <= SG_COMPAT_SUBCLASS) { /* * Casting from one numeric type to another must be supported by * the interpreter/compiler. */ exp = ExpToCastExp(exp, ltype); } else if (SimilarType(&VoidType, ltype)) { /* * Casting anything to void is allowed (throwing the object * away). E.g. statement-expressions. */ exp = ExpToCastExp(exp, ltype); } else if (SimilarType(&VoidPtrType, ltype)) { /* * Casting a pointer to a (void *) is trivial, but is only * allowed if the underlying structure does not contain any * pointers. * * NOTE: Generally only used when a pointer is being cast to an * integer. Rune does not allow casting back to other pointer * types. * * XXX validate integral # of objects fit in pointer range. */ if (rtype->ty_RawPtrType.et_Type->ty_Flags & TF_HASLVREF) dfatal_exp(exp, TOK_ERR_LIMITED_VOIDP_CAST, NULL); exp = ExpToCastExp(exp, ltype); } else if (SimilarType(&VoidRefType, ltype)) { /* * Casting a pointer to a (void @) is trivial. * * NOTE: Generally only used when a pointer is being cast to an * integer. Rune does not allow casting back to other pointer * types. * * XXX validate integral # of objects fit in pointer range. */ if (rtype->ty_RawPtrType.et_Type->ty_Flags & TF_HASLVREF) dfatal_exp(exp, TOK_ERR_LIMITED_VOIDP_CAST, NULL); exp = ExpToCastExp(exp, ltype); } else if (SimilarType(rtype, &VoidPtrType)) { /* * Casting from a void pointer may not be trivial but we leave it * up to the interpreter/compiler. * * Only allow if the target does not contain any pointers or if * the right-hand-side is NULL. * * XXX validate integral # of objects fit in pointer range. */ switch (ltype->ty_Op) { case TY_REFTO: if ((exp->ex_Flags & EXF_NULL) == 0 && (ltype->ty_RefType.et_Type->ty_Flags & TF_HASLVREF)) { dfatal_exp(exp, TOK_ERR_LIMITED_VOIDP_CAST, NULL); } break; default: break; } exp = ExpToCastExp(exp, ltype); } else if (SimilarType(rtype, &CVoidPtrType)) { switch (ltype->ty_Op) { case TY_PTRTO: if ((exp->ex_Flags & EXF_NULL) == 0 && (ltype->ty_RawPtrType.et_Type->ty_Flags & TF_HASLVREF)) { dfatal_exp(exp, TOK_ERR_LIMITED_VOIDP_CAST, NULL); } break; default: break; } } else if (SimilarType(ltype, &BoolType) && (rtype->ty_Op == TY_PTRTO || rtype->ty_Op == TY_REFTO)) { /* * Any pointer can be cast to a boolean, which tests against * NULL. */ exp = ExpToCastExp(exp, ltype); } else if (ltype->ty_Op == rtype->ty_Op && (ltype->ty_Op == TY_PTRTO || ltype->ty_Op == TY_ARYOF)) { /* * We allow casts of pointers to similar numeric types if they * are the same size, though this is really rather a hack. This * is mainly to handle the signed<->unsigned cast case. XXX */ int ok = 0; switch (ltype->ty_Op) { case TY_PTRTO: if ((ltype->ty_RawPtrType.et_Type->ty_SQFlags & SF_CONST) == 0 && (rtype->ty_RawPtrType.et_Type->ty_SQFlags & SF_CONST) != 0) { dfatal_exp(exp, TOK_ERR_READONLY, NULL); } if (MatchType(&NumericType, ltype->ty_RawPtrType.et_Type) <= SG_COMPAT_SUBCLASS && MatchType(&NumericType, rtype->ty_RawPtrType.et_Type) <= SG_COMPAT_SUBCLASS && ltype->ty_Bytes == rtype->ty_Bytes) { exp = ExpToCastExp(exp, ltype); ok = 1; } break; case TY_ARYOF: if ((ltype->ty_AryType.et_Type->ty_SQFlags & SF_CONST) == 0 && (rtype->ty_AryType.et_Type->ty_SQFlags & SF_CONST) != 0) { dfatal_exp(exp, TOK_ERR_READONLY, NULL); } if (MatchType(&NumericType, ltype->ty_AryType.et_Type) <= SG_COMPAT_SUBCLASS && MatchType(&NumericType, rtype->ty_AryType.et_Type) <= SG_COMPAT_SUBCLASS && ltype->ty_Bytes == rtype->ty_Bytes) { exp = ExpToCastExp(exp, ltype); ok = 1; } break; } if (ok == 0) { fprintf(stderr, "Unable to resolve cast from pointers " "to dissimilar numeric types " "%s to %s\n", TypeToStr(rtype, NULL), TypeToStr(ltype, NULL)); dassert_exp(exp, 0); } } else if (didagain == 0 && (oflags & RESOLVE_AUTOCAST) && (exp->ex_Flags2 & EX2F_WASCOMP) && ltype->ty_Op == TY_COMPOUND && rtype->ty_Op != TY_COMPOUND) { /* * The expression parser might have optimized-out the * TOK_COMPOUND wrapper around single-element parenthesized * expressions. Add it back in if the cast target expects a * compound expression. * * XXX Currently hack a SetDupExp() to avoid re-resolving the * already-resolved component. */ exp = ExpToCompoundExp(exp, TOK_COMPOUND); exp = resolveCompoundExp(isg, sg, exp, ltype, flags); didagain = 1; goto again; } else if (didagain == 0 && (oflags & RESOLVE_AUTOCAST) && (exp->ex_Flags2 & EX2F_WASCOMP) && ltype->ty_Op == TY_CLASS && rtype->ty_Op == TY_CLASS && ltype != &VoidType && (ltype->ty_Flags & (TF_ISBOOL | TF_ISINTEGER | TF_ISFLOATING)) == 0 && (rtype->ty_Flags & (TF_ISBOOL | TF_ISINTEGER | TF_ISFLOATING))) { /* * The expression parser might have optimized-out the * TOK_COMPOUND wrapper around single-element parenthesized * expressions used in a class iterator (in an assignment). Add * it back in if the ltype is a non-core class and rtype is a * core class. * * XXX Currently hack a SetDupExp() to avoid re-resolving the * already-resolved component. */ exp = ExpToCompoundExp(exp, TOK_COMPOUND); exp = resolveCompoundExp(isg, sg, exp, ltype, flags); didagain = 1; goto again; } else { fprintf(stderr, "Unable to resolve cast from %s to %s\n", TypeToStr(rtype, NULL), TypeToStr(ltype, NULL)); dassert_exp(exp, 0); } } else if (d->d_ScopeFlags & SCOPE_INTERNAL) { /* * We found a cast operator and it is an internal operator */ exp = ExpToCastExp(exp, ltype); exp->ex_Decl = d; } else { /* * We found a cast operator and it is a Rune cast procedure. We must * convert the cast to a procedure call. If we want * resolveCompoundExp() to be able to generate a compatible procedure * (in a subclass) we have to tell it about the procedure. */ Exp *sexp; sexp = ExpToCompoundExp(exp, TOK_COMPOUND); if (d->d_ProcDecl.ed_ProcBody == NULL) /* XXX */ sexp->ex_Decl = d; sexp = resolveCompoundExp(isg, sg, sexp, d->d_ProcDecl.ed_Type->ty_ProcType.et_ArgsType, flags); exp = AllocExp(NULL); exp->ex_Lhs = AllocExp(NULL); exp->ex_Lhs->ex_Token = TOK_DECL; exp->ex_Lhs->ex_Id = d->d_Id; exp->ex_Lhs->ex_Decl = d; exp->ex_Lhs->ex_Type = d->d_ProcDecl.ed_Type; exp->ex_Lhs->ex_Flags |= EXF_RESOLVED; exp->ex_Rhs = sexp; exp->ex_Flags |= EXF_BINARY; exp->ex_Token = TOK_CALL; /* XXX use ltype or procedure's rettype? */ exp->ex_Type = ltype; LexDupRef(&sexp->ex_LexRef, &exp->ex_LexRef); LexDupRef(&sexp->ex_LexRef, &exp->ex_Lhs->ex_LexRef); ResolveDecl(d, 0); /* * Additional work to inline the procedure */ resolveDynamicProcedure(isg, sg, exp, flags); resolveProcedureInline(isg, sg, exp, flags); } exp->ex_Flags |= EXF_RESOLVED; return (exp); } static Declaration * findCast(Type *btype, Type *ltype, Type *rtype, int flags) { SemGroup *sg; Declaration *d; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ dassert(rtype->ty_Op != TY_UNRESOLVED); dassert(ltype->ty_Op != TY_UNRESOLVED); /* * Locate the base type. If the base type does not have a SemGroup there * are no casts. (XXX put system operators here) */ sg = BaseType(&btype); dassert(btype->ty_Op != TY_UNRESOLVED); if (sg == NULL) return (NULL); /* * Look for the cast in the SemGroup */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { if (d->d_Op == DOP_PROC && (d->d_ScopeFlags & SCOPE_CAST)) { ResolveType(d->d_ProcDecl.ed_Type, NULL, 0); if (MatchCastTypes(d, ltype, rtype)) return (d); } } /* * Failed. If the base type is a compound type, look for the cast in the * SemGroup for each element making up the compound type. e.g. so * (mycustomtype, double) would find the cast in mycustomtype. */ if (btype->ty_Op == TY_COMPOUND) { RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { Declaration *d2; if (d->d_Op & DOPF_STORAGE) { ResolveType(d->d_StorDecl.ed_Type, NULL, 0); d2 = findCast(d->d_StorDecl.ed_Type, ltype, rtype, flags); } else if (d->d_Op == DOP_TYPEDEF) { ResolveType(d->d_StorDecl.ed_Type, NULL, 0); d2 = findCast(d->d_TypedefDecl.ed_Type, ltype, rtype, flags); } else { d2 = NULL; } if (d2) return (d2); } } return (NULL); } /* * resolveExpOper() - resolve an operator * * This is complex enough that it is broken out into its own procedure. * Normally we just look the operator up but we have to special case pointer * arithmatic because we do will not know until now that we have to do it. * * itype is a return-type hint only. resolveExpOper() can ignore it if it * wishes. We currently use it to detect cast-to-void, such as when an * expression like "++i" is used in a for() loop or as a standalone * statement. This allows us to optimize the case. */ static Exp * resolveExpOper(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { Declaration *d; int isPointerOp = 0; int isReferenceOp = 0; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ dassert_exp(exp, exp->ex_Id != 0); if (exFlags & EXF_BINARY) { exLhs = ResolveExp(isg, sg, exLhs, NULL, flags); exRhs = ResolveExp(isg, sg, exRhs, NULL, flags); } else if (exFlags & EXF_UNARY) { exLhs = ResolveExp(isg, sg, exLhs, NULL, flags); } else { dassert_exp(exp, 0); } /* * If the lhs is a pointer look the operator up in the Pointer class * first. Operators in the Pointer class are special-cased. A second * pointer argument or a pointer return value must match the lhs pointer. * * If this fails, or if the ltype is not a pointer, then look the * operator up normally. */ if (exLhs->ex_Type->ty_Op == TY_PTRTO) { Type *ltype; Type *rtype; if (exFlags & EXF_BINARY) { rtype = exRhs->ex_Type; ltype = exLhs->ex_Type; } else { dassert(exFlags & EXF_UNARY); rtype = NULL; ltype = exLhs->ex_Type; } d = findOper(&PointerType, exp->ex_Id, ltype, rtype, flags); if (d) isPointerOp = 1; else d = findExpOper(exp, flags); } else if (exLhs->ex_Type->ty_Op == TY_REFTO) { Type *ltype; Type *rtype; if (exFlags & EXF_BINARY) { rtype = exRhs->ex_Type; ltype = exLhs->ex_Type; } else { dassert(exFlags & EXF_UNARY); rtype = NULL; ltype = exLhs->ex_Type; } d = findOper(&ReferenceType, exp->ex_Id, ltype, rtype, flags); if (d) isReferenceOp = 1; else d = findExpOper(exp, flags); } else { d = findExpOper(exp, flags); } /* * Fall through to finish up resolving the operator. We just set ex_Decl * for internal operators, and construct a call for non-internal * procedural operators. */ if (d) { Declaration *d2; Type *type; SemGroup *sg2; int count = 0; dassert_exp(exp, d != NULL); dassert_exp(exp, d->d_Op == DOP_PROC); dassert_exp(exp, d->d_ProcDecl.ed_Type->ty_Op == TY_PROC); type = d->d_ProcDecl.ed_Type; exType = type->ty_ProcType.et_RetType; /* * Special case for internal Pointer ops. The return type is the * left-hand type (we may still optimize it to void later). */ if (isReferenceOp && (d->d_ScopeFlags & SCOPE_INTERNAL) && SimilarType(&VoidRefType, exType)) { if (exType->ty_SQFlags & SF_LVALUE) exType = ADD_LVALUE(exLhs->ex_Type); else exType = DEL_LVALUE(exLhs->ex_Type); } if (isPointerOp && (d->d_ScopeFlags & SCOPE_INTERNAL) && SimilarType(&VoidPtrType, exType)) { if (exType->ty_SQFlags & SF_LVALUE) exType = ADD_LVALUE(exLhs->ex_Type); else exType = DEL_LVALUE(exLhs->ex_Type); } type = d->d_ProcDecl.ed_Type->ty_ProcType.et_ArgsType; dassert(type->ty_Op == TY_ARGS); sg2 = type->ty_ArgsType.et_SemGroup; /* * Assert that LVALUE requirements are met. XXX MatchType() code * should disallow the non-lvalue-cast-to-lvalue case so we don't * have to do a check here. */ RUNE_FOREACH(d2, &sg2->sg_DeclList, d_Node) { if ((d2->d_Op & DOPF_STORAGE) && d2->d_Op != DOP_GLOBAL_STORAGE) { if (count == 0) { if ((d2->d_ScopeFlags & SCOPE_LVALUE) && //(exLhs->ex_Type->ty_SQFlags & SF_LVALUE) == 0) (exLhs->ex_Flags2 & EX2F_LVALUE) == 0) { fprintf(stderr, "lhs of exp must be lvalue\n"); dassert_exp(exp, 0); } } else if (count == 1) { if ((d2->d_ScopeFlags & SCOPE_LVALUE) && //(exRhs->ex_Type->ty_SQFlags & SF_LVALUE) == 0) (exRhs->ex_Flags2 & EX2F_LVALUE) == 0) { fprintf(stderr, "rhs of exp must be lvalue\n"); dassert_exp(exp, 0); } } ++count; } } if (d->d_ScopeFlags & SCOPE_INTERNAL) { /* * Internal operator. Optimize any cast to void by having the * internal function deal with it. (since we aren't setting * exType the optimization currently doesn't do anything, see * ST_Exp) */ exDecl = d; if (itype == &VoidType) { /* exType = itype; */ exFlags |= EXF_RET_VOID; } } else { /* * Normal procedural operator. Convert the left and right hand * sides to a compound expression and convert exp to a TOK_CALL. * NOTE! ex_Rhs may be NULL (unary op). * * The compound expression may need to rewrite a subclass * procedure, which it can do if the procedure's body has not yet * been created (or duplicated from the superclass). ex_Decl * must be set in this case. * * Note that the expression structure may be shared. The * conversion is permanent so that is ok. * * XXX keep the type intact? */ exLhs->ex_Next = exRhs; exRhs = exLhs; exRhs = ExpToCompoundExp(exRhs, TOK_COMPOUND); if (d->d_ProcDecl.ed_ProcBody == NULL) exRhs->ex_Decl = d; exRhs = resolveCompoundExp(isg, sg, exRhs, type, flags); exLhs = AllocExp(NULL); LexDupRef(&exp->ex_LexRef, &exLhs->ex_LexRef); exLhs->ex_Token = TOK_ID; exLhs->ex_Id = d->d_Id; exLhs->ex_Decl = d; exLhs->ex_Type = d->d_ProcDecl.ed_Type; exLhs->ex_Flags |= EXF_RESOLVED; exp->ex_Token = TOK_CALL; exFlags = EXF_BINARY; ResolveDecl(d, 0); /* * Additional work to inline the procedure */ resolveDynamicProcedure(isg, sg, exp, flags); resolveProcedureInline(isg, sg, exp, flags); } } if (d == NULL) { char buf[RUNE_IDTOSTR_LEN]; fprintf(stderr, "Unable to resolve operator: %s\n", runeid_text(exp->ex_Id, buf)); dassert_exp(exp, 0); } /* * Flag a pure operator whos arguments are constants as probably being * constant. */ if (d->d_ScopeFlags & SCOPE_PURE) { if ((exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) && (exRhs == NULL || (exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)))) { exFlags |= EXF_PROBCONST; } } exp->ex_Flags |= EXF_RESOLVED; return exp; } /* * Helper, visibility must be properly set immediately, prior to any * circularity, to guarantee that search functions work without deferral. */ static void resvis_set(resvis_t *vis, int visibility) { while (vis) { *vis->visp = visibility; vis = vis->next; } } /* * ResolveType() - Resolve a type (always returns its argument) * * Resolve a type. Always returns consistent visibility information to the * caller, even if the resolution remains in-progress. Thus all * modifications to the resvis chain occurs on the front-end of any * recursion. * * Flags, Size and Alignment information might take several passes for * classes (due to chains of DF_DYNAMICREF'd processes), or arrays (due to * the * array size not being immediately resolvable). */ Type * ResolveType(Type *type, resvis_t *vis, int retry) { SemGroup *sg = NULL; int ok = 0; int dummy_vis; resvis_t myvis; myvis.next = vis; myvis.visp = &dummy_vis; /* * Detect circular loop. */ if (type->ty_Flags & TF_RESOLVED) { resvis_set(vis, type->ty_Visibility); return (type); } if (type->ty_Flags & TF_RESOLVING) { if (retry == 0) { resvis_set(vis, type->ty_Visibility); return (type); } } type->ty_Flags |= TF_RESOLVING; /* * Remember that visibility data must be set at the head of any recursion * chain. */ loop_unresolved: switch (type->ty_Op) { case TY_CLASS: /* * NOTE: Special case, PointerType and ReferenceType fields not in * classes XXX (force alignment and bytes)? */ dassert(type->ty_SQList == &type->ty_ClassType.et_SemGroup->sg_ClassList); /* visibility already determined by resolveUnresClass? */ dassert(type->ty_Visibility != 0); resvis_set(vis, type->ty_Visibility); /* * The superclass (if any) cannot depend on our subclass, so resolve * it first. Note that resolveUnresClass() does not do everything * because it has to be called in the ResolveClasses() stage, so * finish it up here with a real resolve. */ if (type->ty_ClassType.et_Super) { Type **superp = &type->ty_ClassType.et_Super; if ((*superp)->ty_Op == TY_UNRESOLVED) resolveUnresClass(*superp); ResolveType(*superp, NULL, 0); } /* * DEPENDENCY - SG must resolve for us to resolve. (if we can't * resolve this it is likely an embedded object loop). */ sg = type->ty_ClassType.et_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { if (type != &PointerType && type != &ReferenceType) { type->ty_Bytes = sg->sg_Bytes; type->ty_AlignMask = sg->sg_AlignMask; } ok = 1; } #if 0 /* * Fixup type ty_SQFlags here XXX removed Any hard class type must be * given the SF_HARD storage qualifier. */ if (sg->sg_Stmt->u.ClassStmt.es_Decl->d_ScopeFlags & SCOPE_HARD) type->ty_SQFlags |= SF_HARD; #endif break; case TY_PTRTO: /* * NOTE: Do not set TF_HASLVREF, C pointers are not tracked. * We do set TF_HASPTR to indicate that the type is or * contains a pointer. * * Always complete, even if the target type is incomplete. (allow * circular references). */ type->ty_Bytes = sizeof(void *); type->ty_AlignMask = RAWPTR_ALIGN; type->ty_Flags |= TF_HASPTR; myvis.visp = &type->ty_Visibility; ResolveType(type->ty_RawPtrType.et_Type, &myvis, 0); ok = 1; break; case TY_REFTO: /* * Set TF_HASLVREF, references are tracked. * * Always complete, even if the target type is incomplete. (allow * circular references). */ type->ty_Bytes = sizeof(ReferenceStor); type->ty_AlignMask = REFERENCESTOR_ALIGNMASK; type->ty_Flags |= TF_HASLVREF; myvis.visp = &type->ty_Visibility; ResolveType(type->ty_RefType.et_Type, &myvis, 0); ok = 1; break; case TY_ARYOF: /* * Inherit TF_HASLVREF and TF_HASPTR (if array type is or contains * something which needs to be tracked or checked). * * The array size must resolve sufficiently for us to resolve. */ { Exp *exp; Type *atype; if (type->ty_AryType.et_OrigArySizeExp) { type->ty_AryType.et_ArySizeExp = DupExp(NULL, type->ty_AryType.et_OrigArySizeExp); } exp = type->ty_AryType.et_ArySizeExp; atype = type->ty_AryType.et_Type; myvis.visp = &type->ty_Visibility; ResolveType(atype, &myvis, 0); exp = resolveConstExp(NULL, type->ty_AryType.et_SemGroup, exp, 0); if ((exp->ex_Flags & EXF_RESOLVED) && (atype->ty_Flags & TF_RESOLVED)) { type->ty_AryType.et_ArySizeExp = exp; type->ty_AryType.et_Count = resolveGetConstExpInt64(exp); type->ty_AlignMask = type->ty_AryType.et_Type->ty_AlignMask; type->ty_Bytes = type->ty_AryType.et_Type->ty_Bytes * type->ty_AryType.et_Count; type->ty_Flags |= type->ty_AryType.et_Type->ty_Flags & (TF_HASLVREF | TF_HASPTR | TF_HASCONSTRUCT | TF_HASDESTRUCT | TF_HASGCONSTRUCT | TF_HASGDESTRUCT | TF_HASASS); ok = 1; } } break; case TY_COMPOUND: /* * All elements of a compound type must resolve for the compound type * to resolve. * * NOTE: TF_HASLVREF and TF_HASPTR inherited as appropriate * after switch. */ sg = type->ty_CompType.et_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { type->ty_Bytes = sg->sg_Bytes; type->ty_AlignMask = sg->sg_AlignMask; type->ty_Visibility = SCOPE_ALL_VISIBLE; ok = 1; } break; case TY_VAR: /* * All elements of a compound type must resolve for the compound type * to resolve. * * NOTE: TF_HASLVREF and TF_HASPTR inherited as appropriate * after switch. */ sg = type->ty_VarType.et_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { type->ty_Bytes = sg->sg_Bytes; type->ty_AlignMask = sg->sg_AlignMask; type->ty_Visibility = SCOPE_ALL_VISIBLE; ok = 1; } break; case TY_ARGS: /* * All elements of a compound type must resolve for the compound type * to resolve. * * NOTE: TF_HASLVREF and TF_HASPTR inherited as appropriate * after switch. */ sg = type->ty_ArgsType.et_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { type->ty_Bytes = sg->sg_Bytes; type->ty_AlignMask = sg->sg_AlignMask; type->ty_Visibility = SCOPE_ALL_VISIBLE; ok = 1; } break; case TY_PROC: /* * We mark the type as resolved regardless of the state of the * underlying argument and return types. * * NOTE: Storage not tracked. */ type->ty_Bytes = 0; type->ty_AlignMask = 0; type->ty_Visibility = SCOPE_ALL_VISIBLE; resvis_set(vis, type->ty_Visibility); ResolveType(type->ty_ProcType.et_ArgsType, NULL, 0); ResolveType(type->ty_ProcType.et_RetType, NULL, 0); ok = 1; break; case TY_STORAGE: /* * Raw storage must always resolve. * * NOTE: Base storage is not tracked. */ type->ty_Bytes = type->ty_StorType.et_Bytes; /* XXX check pwr of 2 */ if (type->ty_Bytes) type->ty_AlignMask = type->ty_Bytes - 1; type->ty_Visibility = SCOPE_ALL_VISIBLE; resvis_set(vis, type->ty_Visibility); ok = 1; break; case TY_UNRESOLVED: /* * We loop until the type is no longer TY_UNRESOLVED. * * NOTE: resolveUnresClass() is not really a recursive function so we * don't have to pre-set visibility. */ resolveUnresClass(type); /* visibility set by resolveUnresClass() */ goto loop_unresolved; break; case TY_DYNAMIC: /* * A Dynamic type is basically unknown at compile-time. Always * resolve. * * NOTE: Tracking unknown (must be handled at run-time). */ type->ty_Visibility = SCOPE_ALL_VISIBLE; resvis_set(vis, type->ty_Visibility); ok = 1; break; case TY_IMPORT: /* * TY_IMPORT types cannot be directly referenced by the program. They * are implicitly used as a placeholder for a module's global storage * at run-time. * * NOTE: Storage is persistent, so wrapper is not tracked. */ sg = type->ty_ImportType.et_SemGroup; ResolveSemGroup(sg, 0); type->ty_Visibility = SCOPE_ALL_VISIBLE; /* XXX */ resvis_set(vis, type->ty_Visibility); ok = 1; break; default: dpanic("Unknown type %d (type=%p)", type->ty_Op, type); break; } if (ok) { type->ty_Flags &= ~TF_RESOLVING; type->ty_Flags |= TF_RESOLVED; if (sg) { if (sg->sg_Flags & SGF_ISINTEGER) type->ty_Flags |= TF_ISINTEGER; if (sg->sg_Flags & SGF_ISUNSIGNED) type->ty_Flags |= TF_ISUNSIGNED; if (sg->sg_Flags & SGF_ISFLOATING) type->ty_Flags |= TF_ISFLOATING; if (sg->sg_Flags & SGF_ISBOOL) type->ty_Flags |= TF_ISBOOL; if (sg->sg_Flags & SGF_HASASS) type->ty_Flags |= TF_HASASS; if (sg->sg_Flags & SGF_HASPTR) type->ty_Flags |= TF_HASPTR; if (sg->sg_SRBase) type->ty_Flags |= TF_HASLVREF; /* XXX TF_VARARGS */ if (sg->sg_Flags & SGF_VARARGS) type->ty_Flags |= TF_HASLVREF; if (sg->sg_CBase) type->ty_Flags |= TF_HASCONSTRUCT; if (sg->sg_DBase) type->ty_Flags |= TF_HASDESTRUCT; /* * Combine constructor/destructor hint flags for globals because * we have just one linked list for global constructors and * destructors (no need to optimize heavily). */ if (sg->sg_GBase) type->ty_Flags |= TF_HASGCONSTRUCT | TF_HASGDESTRUCT; dassert(type->ty_Visibility != 0); } } else { /* * NOTE: visibility is always set prior to any deferral or * circularity. */ deferType(type); } /* * Resolve the default expression for the type, if any. We do not * require the expression to complete. * * XXX qualified types just copy the exp. bad bad YYY * * YYY ResolveExp() no ISG (import sem group) */ if (type->ty_OrigAssExp) { type->ty_Flags |= TF_HASASS; type->ty_AssExp = DupExp(sg, type->ty_OrigAssExp); type->ty_AssExp = ResolveExp(NULL, sg, type->ty_AssExp, DEL_LVALUE(type), RESOLVE_AUTOCAST); } /* * ty_DynamicVector is nominally used when a Rune binary is run, but we * also need to set up enough of it such that mixed interpretation and * execution, or even just straight interpretation, works. This is * because the interpreter calls into libruntime. */ type->ty_DynamicVector = DefaultDynamicVector; /* * NOTE: Cannot resolve type alignment here, it must be done in a * separate pass due to dependencies. */ return (type); } /* * resolveUnresClass() - resolve an unresolved dotted id sequence into a * class * * Unresolved type identifier sequences must be resolved. We are also * responsible for setting the visibility of the type's elements. */ void resolveUnresClass(Type *super) { runeid_t *dottedId; SemGroup *sg; Declaration *d; int visibility = SCOPE_ALL_VISIBLE; int eno = 0; dassert_type(super, super->ty_Op == TY_UNRESOLVED); dottedId = super->ty_UnresType.et_DottedId; sg = super->ty_UnresType.et_SemGroup; d = FindDeclPath(NULL, super->ty_UnresType.et_ImportSemGroup, sg, super, dottedId, FDC_NULL, &visibility, -1, &eno); if (d == NULL) { errorDottedId(dottedId, "Unable to resolve class"); dassert_type(super, 0); } /* * Resolve the unresolved type. Note that this occurs during class * resolution and we can't call ResolveType() here without getting into a * loop, so we do not yet know storage requirements (ty_Bytes and * ty_Align). */ switch (d->d_Op) { case DOP_CLASS: sg = d->d_ClassDecl.ed_SemGroup; dassert(sg); TypeToQualType(sg->sg_ClassType, super, super->ty_OrigAssExp, super->ty_SQFlags | sg->sg_ClassType->ty_SQFlags, visibility); #if 0 sg = d->d_ClassDecl.ed_SemGroup; super->ty_Op = TY_CLASS; super->ty_ClassType.et_SemGroup = sg; super->ty_ClassType.et_Super = d->d_ClassDecl.ed_Super; super->ty_Visibility = visibility; super->ty_SQFlags = sg->sg_ClassType->ty_SQFlags; if (super->ty_SQList) RUNE_REMOVE(super->ty_SQList, super, ty_Node); super->ty_SQList = &sg->sg_ClassList; RUNE_INSERT_TAIL(super->ty_SQList, super, ty_Node); #endif dassert(visibility); /* can't resolve super here */ /* * XXX should we move the class from the unresolved list to the new * SemGroup's actual list? */ break; case DOP_TYPEDEF: /* * Adjust super instead of allocating a new super, so all other * references to super using this class path get resolved too. * * XXX which AssExp do we use ? */ dassert_type(super, d->d_TypedefDecl.ed_Type != super); TypeToQualType(d->d_TypedefDecl.ed_Type, super, super->ty_AssExp, super->ty_SQFlags | d->d_TypedefDecl.ed_Type->ty_SQFlags, visibility); /* can't resolve super here */ break; default: errorDottedId(dottedId, "identifier is not a class or typedef"); dassert_type(super, 0); } } /* * Resolve the declarations in a non-stack semantic group. The sg is being * referenced by someone, who resolves it with this. This may take multiple * passes. We: * * - Resolve all real storage elements, referenced or not, so the structure * has a consistent size. Size and Alignment becomes valid when primarily * resolution via SGF_RESOLVED / SGF_GRESOLVED completes. * * - Most procedures are only resolved on-demand and are not resolved here. * However, access to the SG implies that all constructors and destructors * must be active, so we resolve those. * * - We must also resolve any DF_DYNAMICREF'd procedures, which are dynamic * method calls in sub-classes. The flag is set on the method in the * subclass when a method call is made in any super-class. * * (Any newly added DF_DYNAMICREF'd procedures will be resolved by the code * setting the flag if it finds that the SG is undergoing resolution or * already resolved). * * - We supply a dynamic index for all procedures, whether they are * referenced or not, and leave the index NULL if they are not. This allows * us to resolve the indices & extent of the dynamic index array even if late * procedures are added. * * NOTE! This code does not resolve declarations related to executable * semantic groups, such as sub-blocks within a procedure, but it does have * to resolve procedure definitions found in Class's and such. * * NOTE! This code handles the last stage of subclass refinement, by checking * the validity of the refinement and setting sg_Compat properly. */ static void ResolveSemGroup(SemGroup *sg, int retry) { Declaration *d; Type *type; int dyncount; int ok; if ((sg->sg_Flags & (SGF_RESOLVED | SGF_GRESOLVED)) == (SGF_RESOLVED | SGF_GRESOLVED)) { return; } if (sg->sg_Flags & (SGF_RESOLVING | SGF_GRESOLVING)) { if (retry == 0) return; } if (sg->sg_Flags & SGF_RESOLVED) goto section2; sg->sg_Flags |= SGF_RESOLVING; sg->sg_Bytes = 0; ok = 1; /* * index 0 - reserved for dynamic initialization index 1 - reserved for * dynamic destructor */ dyncount = 2; /* * SECTION1 - INSTANTIATED OBJECT RESOLUTION & PROCEDURE RESOLUTION * * Handle SCOPE_REFINE and DF_DYNAMICREF flagging. We resolve non-global * elements with real storage. */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { /* * DF_DYNAMICREF requires that the declaration be resolved because it * might be used in a dynamic method call, even if it was not * directly referenced. So if the SemGroup (i.e. class) is * referenced at all, so to must the method. */ if (d->d_Flags & DF_DYNAMICREF) { if ((d->d_Flags & (DF_RESOLVED | DF_RESOLVING)) == 0) { ResolveDecl(d, 0); } } /* * Process all procedures and any non-global instantiated storage. */ switch (d->d_Op) { case DOP_CLASS: case DOP_TYPEDEF: case DOP_ALIAS: case DOP_IMPORT: break; case DOP_PROC: /* * Assign the dynamic index. There may be multiple entries for * the same d_Id, they are ordered such that refinements use the * same DynIndex as in the superclass which is what allows * dynamic method calls to work properly. All non-refined * subclass elements are ordered after all refined/non=refined * superclass elements (replacing the superclass element and * using the same DynIndex when refined). * * We must assign d_DynIndex regardless of whether the procedure * is used or not to guarantee a consistent index between * super-class and sub-class. */ if ((d->d_ScopeFlags & SCOPE_INTERNAL) == 0 && (d->d_ProcDecl.ed_Type->ty_SQFlags & (SF_METHOD | SF_GMETHOD))) { d->d_DynIndex = dyncount; ++dyncount; } /* * Only process referenced procedures, plus any that were flagged * (see above), plus any constructors or destructors. */ if ((d->d_Flags & (DF_RESOLVED | DF_RESOLVING)) == 0) { if (d->d_ScopeFlags & (SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR)) { ResolveDecl(d, 0); } } if ((d->d_Flags & (DF_RESOLVED | DF_RESOLVING)) == 0) break; if (d->d_ScopeFlags & SCOPE_GLOBAL) { if ((d->d_Flags & DF_ONGLIST) == 0 && (d->d_ScopeFlags & (SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR))) { d->d_GNext = d->d_MyGroup->sg_GBase; d->d_MyGroup->sg_GBase = d; d->d_Flags |= DF_ONGLIST; sg->sg_Flags |= SGF_GABICALL; } } else { if ((d->d_Flags & DF_ONCLIST) == 0 && (d->d_ScopeFlags & SCOPE_CONSTRUCTOR)) { d->d_CNext = d->d_MyGroup->sg_CBase; d->d_MyGroup->sg_CBase = d; d->d_Flags |= DF_ONCLIST; sg->sg_Flags |= SGF_ABICALL; } if ((d->d_Flags & DF_ONDLIST) == 0 && (d->d_ScopeFlags & SCOPE_DESTRUCTOR)) { d->d_DNext = d->d_MyGroup->sg_DBase; d->d_MyGroup->sg_DBase = d; d->d_Flags |= DF_ONDLIST; sg->sg_Flags |= SGF_ABICALL; } } break; case DOP_STACK_STORAGE: /* * can't happen. Stack storage is only used in executable * contexts. The SGs for executable contexts are not handled * by ResolveSemGroup() */ dassert_decl(d, 0); break; case DOP_ARGS_STORAGE: case DOP_GROUP_STORAGE: /* * Stop if the resolver looped, caller may try later */ ResolveDecl(d, 0); if ((d->d_Flags & DF_RESOLVED) == 0) { ok = 0; break; } /* * Update SG size, alignment, set d_Offset and d_Storage within * the SG. */ if (sg->sg_AlignMask < d->d_AlignMask) sg->sg_AlignMask = d->d_AlignMask; sg->sg_Bytes = BASEALIGN(sg->sg_Bytes, d->d_AlignMask); d->d_Offset = sg->sg_Bytes; /* * Set d_Storage based on scope and intended default for d_Op. */ sg->sg_Bytes += d->d_Bytes; type = d->d_StorDecl.ed_Type; if (d->d_StorDecl.ed_OrigAssExp) sg->sg_Flags |= SGF_HASASS; if (type->ty_Flags & TF_HASASS) sg->sg_Flags |= SGF_HASASS; if (type->ty_Flags & TF_HASLVREF) sg->sg_Flags |= SGF_HASLVREF; if (type->ty_Flags & TF_HASPTR) sg->sg_Flags |= SGF_HASPTR; if (type->ty_Flags & TF_HASCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASDESTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGCONSTRUCT) sg->sg_Flags |= SGF_GABICALL; if (type->ty_Flags & TF_HASGDESTRUCT) sg->sg_Flags |= SGF_GABICALL; checkUnrestrictedType(d, type); break; case DOP_GLOBAL_STORAGE: /* handled in pass2 */ break; default: dpanic_sg(sg, 0, "Unknown d->d_Op %d", d->d_Op); break; } /* * Finish up any refinements. (Effects 'ok'? no for now) */ if (d->d_ScopeFlags & SCOPE_REFINE) { if (d->d_Flags & (DF_RESOLVING | DF_RESOLVED)) { ResolveDecl(d->d_Super, 0); ResolveDecl(d, 0); RefineDeclaration(sg, d->d_Super, d); } } } if (ok) { sg->sg_Bytes = BASEALIGN(sg->sg_Bytes, sg->sg_AlignMask); sg->sg_Flags &= ~SGF_RESOLVING; sg->sg_Flags |= SGF_RESOLVED; /* * If no dynamic methods and no dynamic initialization or destruction * required, set dyncount to 0. */ if (dyncount == 2 && (sg->sg_Flags & SGF_HASASS) == 0 && sg->sg_SRBase == NULL && sg->sg_CBase == NULL && sg->sg_DBase == NULL) { dyncount = 0; } sg->sg_DynCount = dyncount; sg->sg_Flags &= ~SGF_RESOLVING; } /* * SECTION2 - GLOBAL RESOLUTION */ section2: if (sg->sg_Flags & SGF_GRESOLVED) goto section3; sg->sg_Flags |= SGF_GRESOLVING; sg->sg_GlobalBytes = 0; ok = 1; RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { switch (d->d_Op) { case DOP_CLASS: case DOP_TYPEDEF: case DOP_ALIAS: case DOP_IMPORT: case DOP_PROC: break; case DOP_STACK_STORAGE: /* * can't happen. Stack storage is only used in executable * contexts. */ dassert_decl(d, 0); case DOP_ARGS_STORAGE: case DOP_GROUP_STORAGE: /* * Non-globals were handled in section1 */ break; case DOP_GLOBAL_STORAGE: /* * Global storage is handled in section2 * * NOTE: We only process referenced global storage. This will * include global elements referenced by constructors, which are * always run even if not specifically referenced. */ ResolveDecl(d, 0); if ((d->d_Flags & (DF_RESOLVING | DF_RESOLVED)) == 0) break; if ((d->d_Flags & DF_RESOLVED) == 0) { ok = 0; break; } if (sg->sg_GlobalAlignMask < d->d_AlignMask) sg->sg_GlobalAlignMask = d->d_AlignMask; sg->sg_GlobalBytes = (sg->sg_GlobalBytes + d->d_AlignMask) & ~d->d_AlignMask; d->d_Offset = sg->sg_GlobalBytes; sg->sg_GlobalBytes += d->d_Bytes; if (d->d_StorDecl.ed_OrigAssExp) sg->sg_Flags |= SGF_GHASASS; type = d->d_StorDecl.ed_Type; if (type->ty_Flags & TF_HASASS) sg->sg_Flags |= SGF_GHASASS; if (type->ty_Flags & TF_HASLVREF) sg->sg_Flags |= SGF_GHASLVREF; if (type->ty_Flags & TF_HASPTR) sg->sg_Flags |= SGF_GHASPTR; if (type->ty_Flags & TF_HASCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASDESTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGDESTRUCT) sg->sg_Flags |= SGF_ABICALL; checkUnrestrictedType(d, type); break; default: dpanic_sg(sg, 0, "Unknown d->d_Op %d", d->d_Op); break; } /* * Finish up any refinements. (Effects 'ok'? no for now) */ if (d->d_ScopeFlags & SCOPE_REFINE) { if (d->d_Flags & (DF_RESOLVING | DF_RESOLVED)) { ResolveDecl(d->d_Super, 0); ResolveDecl(d, 0); RefineDeclaration(sg, d->d_Super, d); } } } /* * Structures may not directly or indirectly contain pointers * or references unless they are internal. XXX * * For now allow pointers but not references. */ if (sg->sg_Op == SG_CLASS && (sg->sg_ClassType->ty_SQFlags & (SF_STRUCT | SF_INTERNAL)) == SF_STRUCT) { if (sg->sg_Flags & (SGF_HASLVREF /* | SGF_HASPTR*/)) { dfatal_sg(sg, TOK_ERR_STRUCT_CONTENT, NULL); } } /* * Final alignment */ if (ok) { sg->sg_GlobalBytes = (sg->sg_GlobalBytes + sg->sg_GlobalAlignMask) & ~sg->sg_GlobalAlignMask; sg->sg_Flags &= ~SGF_GRESOLVING; sg->sg_Flags |= SGF_GRESOLVED; } /* * SECTION3 - Final rollup (future) */ section3: if ((sg->sg_Flags & (SGF_RESOLVED | SGF_GRESOLVED)) != (SGF_RESOLVED | SGF_GRESOLVED)) { deferSG(sg); } /* * This gets hit of Int32Type is resolved before its class. * This is a big no-no. */ if (sg == Int32Type.ty_ClassType.et_SemGroup && sg->sg_Bytes != 4) { dpanic("Resolver improperly early-resolved Int32Type\n"); } } /* * findExpOper() - Find operator declaration matching expression * * Locate the operator declaration (a DOP_PROCDEF) that matches the * expression or NULL if no match could be found. The expression's left and * right hand sides must already be resolved. * * NOTE! A temporary 'copy' Exp may be passed, not all fields are valid. */ static Declaration *testIConstantForType(Declaration *d, Type *type, Exp *exp); static Declaration *testFConstantForType(Declaration *d, Type *type, Exp *exp); static Declaration * findExpOper(Exp *exp, int flags) { Type *ltype; Type *rtype; Declaration *d; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ if (exp->ex_Flags & EXF_BINARY) { rtype = exp->ex_Rhs->ex_Type; ltype = exp->ex_Lhs->ex_Type; } else { dassert(exp->ex_Flags & EXF_UNARY); rtype = NULL; ltype = exp->ex_Lhs->ex_Type; } /* * XXX look in our local semantic hierarchy for a compatible operator ? */ /* * Attempt to find a matching operator from the left hand side type. */ d = findOper(ltype, exp->ex_Id, ltype, rtype, flags); if (d || (exp->ex_Flags & EXF_BINARY) == 0) return (d); /* * Attempt to find a matching binary operator from the right hand side * type. */ d = findOper(rtype, exp->ex_Id, ltype, rtype, flags); /* * If that fails but either the left or right-hand sides are constants, * see if we can find an operator by casting the constant to the * non-constant. */ if (d == NULL) { if (exp->ex_Rhs->ex_Token == TOK_INTEGER && exp->ex_Lhs->ex_Token != TOK_INTEGER && exp->ex_Lhs->ex_Token != TOK_FLOAT && (ltype->ty_Flags & TF_ISINTEGER)) { d = findOper(ltype, exp->ex_Id, ltype, ltype, flags); if (d) d = testIConstantForType(d, ltype, exp->ex_Rhs); } else if (exp->ex_Lhs->ex_Token == TOK_INTEGER && exp->ex_Rhs->ex_Token != TOK_INTEGER && exp->ex_Rhs->ex_Token != TOK_FLOAT && (rtype->ty_Flags & TF_ISINTEGER)) { d = findOper(rtype, exp->ex_Id, rtype, rtype, flags); if (d) d = testIConstantForType(d, rtype, exp->ex_Lhs); } else if (exp->ex_Rhs->ex_Token == TOK_FLOAT && exp->ex_Lhs->ex_Token != TOK_INTEGER && exp->ex_Lhs->ex_Token != TOK_FLOAT && (ltype->ty_Flags & TF_ISFLOATING)) { d = findOper(ltype, exp->ex_Id, ltype, ltype, flags); if (d) d = testFConstantForType(d, ltype, exp->ex_Rhs); } else if (exp->ex_Lhs->ex_Token == TOK_FLOAT && exp->ex_Rhs->ex_Token != TOK_INTEGER && exp->ex_Rhs->ex_Token != TOK_FLOAT && (rtype->ty_Flags & TF_ISFLOATING)) { d = findOper(rtype, exp->ex_Id, rtype, rtype, flags); if (d) d = testFConstantForType(d, rtype, exp->ex_Lhs); } } return (d); } /* * Calculate whether the constant can be safely cast. If it can, cast the * constant and return d. Otherwise complain and return NULL. */ static Declaration * testIConstantForType(Declaration *d, Type *type, Exp *exp) { int64_t v = resolveGetConstExpInt64(exp); if (type->ty_Flags & TF_ISUNSIGNED) { switch (type->ty_Bytes) { case 1: if (v != (int64_t) (uint8_t) v) d = NULL; break; case 2: if (v != (int64_t) (uint16_t) v) d = NULL; break; case 4: if (v != (int64_t) (uint32_t) v) d = NULL; break; case 8: break; default: break; } } else { switch (type->ty_Bytes) { case 1: if (v != (int64_t) (int8_t) v) d = NULL; break; case 2: if (v != (int64_t) (int16_t) v) d = NULL; break; case 4: if (v != (int64_t) (int32_t) v) d = NULL; break; case 8: break; default: break; } } /* * If successful change the constant's type and reset the interpreter to * re-evaluate it. */ if (d) { exp->ex_Type = type; exp->ex_Run = RunUnresolvedExp; exp->ex_Run64 = Run64DefaultExp; } else { dwarn_exp(exp, TOK_ERR_AUTOCAST_VALUE, NULL); } return d; } static Declaration * testFConstantForType(Declaration *d, Type *type, Exp *exp) { float128_t v = resolveGetConstExpFloat128(exp); switch (type->ty_Bytes) { case 4: if (v != (float32_t) v) d = NULL; break; case 8: if (v != (float64_t) v) d = NULL; break; case 16: break; } /* * If successful change the constant's type and reset the interpreter to * re-evaluate it. */ if (d) { exp->ex_Type = type; exp->ex_Run = RunUnresolvedExp; exp->ex_Run64 = Run64DefaultExp; } else { dwarn_exp(exp, TOK_ERR_AUTOCAST_VALUE, NULL); } return d; } static Declaration * findOper(Type *btype, runeid_t id, Type *ltype, Type *rtype, int flags) { SemGroup *sg; Declaration *d; int args = (rtype != NULL) ? 2 : 1; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ /* * Locate the base type. If the base type does not have a SemGroup there * are no operators. (XXX put system operators here) */ sg = BaseType(&btype); if (sg == NULL) return (NULL); /* * Look for the operator in the SemGroup * * TODO - For reasons currently unknown, complex internal operators * in the Pointer and Reference class (and probably others) * are not able to completely match if we do not pre-resolve * all procedural declarations before looking for matches. * It is unclear why this is the case. */ #if 1 RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { if (d->d_MyGroup == sg && d->d_Op == DOP_PROC) { ResolveDecl(d, 0); } } #endif for (d = FindOperId(sg, id, args); d; d = d->d_ONext) { ResolveDecl(d, 0); if (d->d_MyGroup == sg && d->d_Op == DOP_PROC && d->d_ProcDecl.ed_OperId == id && MatchOperatorTypes(d, ltype, rtype)) { return (d); } } /* * Failed. If the base type is a compound type, look for the operator in * the SemGroup for each element making up the compound type. e.g. so * (mycustomtype, double) would find the operator in mycustomtype. */ if (btype->ty_Op == TY_COMPOUND) { RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { Declaration *d2; if (d->d_Op & DOPF_STORAGE) { d2 = findOper(d->d_StorDecl.ed_Type, id, ltype, rtype, flags); } else if (d->d_Op == DOP_TYPEDEF) { d2 = findOper(d->d_TypedefDecl.ed_Type, id, ltype, rtype, flags); } else { d2 = NULL; } if (d2) return (d2); } } return (NULL); } static void errorDottedId(runeid_t *ary, const char *ctl,...) { char buf[RUNE_IDTOSTR_LEN]; va_list va; int i; va_start(va, ctl); vfprintf(stderr, ctl, va); va_end(va); fprintf(stderr, ": %s", runeid_text(ary[0], buf)); for (i = 1; ary[i]; ++i) fprintf(stderr, ".%s", runeid_text(ary[i], buf)); fprintf(stderr, "\n"); } /* * Resolve the alignment requirements for SemGroups related to statements, * including the alignment requirements needed for temporary expression * space. */ static void ResolveAlignment(Stmt *st, int flags) { SemGroup *sg = st->st_MyGroup; Stmt *scan; /* * State machine. * * RESOLVE_CLEAN - If set, RESOLVE_FINALIZE is also always set * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect alignment of (type). */ if (flags & RESOLVE_CLEAN) { if ((st->st_RState & (RSF_ALIGN | RSF_SUB_ALIGN)) == 0) return; st->st_RState &= ~(RSF_ALIGN | RSF_SUB_ALIGN | RSF_STORAGE | RSF_SUB_STORAGE); } else if (flags & RESOLVE_FINALIZE) { if (st->st_RState & RSF_SUB_ALIGN) return; st->st_RState |= RSF_ALIGN | RSF_SUB_ALIGN; } else { if (st->st_RState & RSF_ALIGN) return; st->st_RState |= RSF_ALIGN; } /* * If this is an executable semantic layer or an import layer then assign * alignment to declarations up-front. Of the various DOP_*_STORAGE ops, * we should only see DOP_STACK_STORAGE and DOP_GLOBAL_STORAGE. * * Note: if this is the root ST_Import STF_SEMANTIC is *NOT* set and sg * will be NULL. */ if ((st->st_Flags & STF_SEMANTIC) && st->st_Op != ST_Class) { Declaration *d; /* * Pre-scan for alignment. Don't try to propagate the alignment to * the parent for now as that would require recalculating the * parent(s). */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { switch (d->d_Op) { case DOP_STACK_STORAGE: case DOP_ARGS_STORAGE: case DOP_GROUP_STORAGE: if (sg->sg_AlignMask < d->d_AlignMask) sg->sg_AlignMask = d->d_AlignMask; break; case DOP_GLOBAL_STORAGE: if (sg->sg_GlobalAlignMask < d->d_AlignMask) sg->sg_GlobalAlignMask = d->d_AlignMask; break; default: break; } } } switch (st->st_Op) { case ST_Import: break; case ST_Module: case ST_Class: break; case ST_Typedef: /* XXX needed? */ if (st->st_TypedefStmt.es_Decl->d_Flags & DF_RESOLVED) { resolveDeclAlign(st->st_TypedefStmt.es_Decl, &sg->sg_TmpAlignMask, flags); } break; case ST_Decl: /* * NOTE: Don't calculate for declarations that belong in a different * context. */ { Declaration *d; int i; d = st->st_DeclStmt.es_Decl; for (i = 0; i < st->st_DeclStmt.es_DeclCount; ++i) { if (st->st_MyGroup == d->d_MyGroup && (d->d_Flags & DF_RESOLVED)) { resolveDeclAlign(d, &sg->sg_TmpAlignMask, flags); } d = RUNE_NEXT(d, d_Node); } } break; case ST_Block: break; case ST_Proc: break; case ST_Nop: break; case ST_Loop: { if (st->st_LoopStmt.es_BCond) { resolveExpAlign(st->st_LoopStmt.es_BCond, &sg->sg_TmpAlignMask, flags); } if (st->st_LoopStmt.es_ACond) { resolveExpAlign(st->st_LoopStmt.es_ACond, &sg->sg_TmpAlignMask, flags); } if (st->st_LoopStmt.es_AExp) { resolveExpAlign(st->st_LoopStmt.es_AExp, &sg->sg_TmpAlignMask, flags); } } break; case ST_BreakCont: break; case ST_Bad: break; case ST_IfElse: resolveExpAlign(st->st_IfStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Return: if (st->st_RetStmt.es_Exp) resolveExpAlign(st->st_RetStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Result: if (st->st_ResStmt.es_Exp) resolveExpAlign(st->st_ResStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Switch: /* * The switch expression's temporary data must be saved while we are * executing the sub-statements (the cases). */ resolveExpAlign(st->st_SwStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Case: if (st->st_CaseStmt.es_Exp) resolveExpAlign(st->st_CaseStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Exp: resolveExpAlign(st->st_ExpStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_ThreadSched: break; default: dassert_stmt(st, 0); } /* * Calculate storage requirements for substatements. offset acts as our * base. We union the storage for the substatements together. Note that * often scan->sg_MyGroup == sg. */ RUNE_FOREACH(scan, &st->st_List, st_Node) { if (scan->st_Op == ST_Class) { if (scan->u.ClassStmt.es_Decl->d_Flags & DF_RESOLVED) ResolveAlignment(scan, flags); } else if (scan->st_Op == ST_Decl && scan->st_DeclStmt.es_Decl->d_MyGroup != st->st_MyGroup) { /* * Do nothing */ ; } else if (scan->st_Op == ST_Decl && (scan->st_DeclStmt.es_Decl->d_Flags & DF_RESOLVED)) { /* * See prior comments, skip declarations that were moved to * another context * * (already resolved so can use junk offsets) */ resolveDeclAlign(scan->st_DeclStmt.es_Decl, &sg->sg_TmpAlignMask, flags); } else if (scan->st_Op == ST_Proc && scan->st_ProcStmt.es_Decl->d_ProcDecl.ed_OrigBody == scan) { /* Do not resolve template procedures! */ } else if (scan->st_Flags & STF_SEMTOP) { if (scan->st_Flags & STF_RESOLVED) ResolveAlignment(scan, flags); } else { if (scan->st_Flags & STF_RESOLVED) ResolveAlignment(scan, flags); } } /* * If this is a new semantic level then fully resolve the alignment for * the SG as a final clean-up (for alignment anyway). * * This will redundantly calculate temporary space requirements. * * Note that for non-Class executable SemGroup's TmpBytes is incorporated * in a downward fashion while sg_Bytes is incorporated in an upward * fashion. It can become quite confusing. Don't ask me why I did it * that way. */ if (st->st_Flags & STF_SEMANTIC) { resolveSemGroupAlign(sg, flags); //if ((sg->sg_RState & RSF_SUB_ALIGN) == 0) { // resolveSemGroupAlign(sg, flags); //} } /* * Propagate alignment requirements upward. */ if ((st->st_Flags & (STF_SEMANTIC | STF_SEMTOP)) == STF_SEMANTIC) { if (sg->sg_Parent->sg_AlignMask < sg->sg_AlignMask) sg->sg_Parent->sg_AlignMask = sg->sg_AlignMask; if (sg->sg_Parent->sg_TmpAlignMask < sg->sg_TmpAlignMask) sg->sg_Parent->sg_TmpAlignMask = sg->sg_TmpAlignMask; } } /* * ResolveStorage() - Final storage resolution pass * * This pass carefully scans the SemGroup hierarchy and assigns offsets to * declarations. * * PROCEDURES - all the various 'executable' semantic layers in a procedure * are collapsed together for efficiency, so we only have to manage one * context. This means that the d_Offset assigned to declarations in * sub-blocks may exceed the sg_ size of the sub-block's SemGroup. We do not * attempt to resolve procedure body templates (d_ProcDecl.ed_OrigBody). * * CLASSES - are given offsets in their SemGroup's relative to 0, if not * already resolved. * * IMPORTS - are given offsets in their SemGroup's relative to 0 * * COMPOUND TYPES - (such as procedure arguments) are given offsets in their * SemGroup's relative to 0. * * TEMPORARY STORAGE - expressions may require temporary storage for * intermediate results. That space is reserved here. * * We specifically do not resolve unrelated storage. */ static void ResolveStorage(Stmt *st, int flags) { urunesize_t base; urunesize_t gbase; urunesize_t limit; urunesize_t glimit; SemGroup *sg = st->st_MyGroup; Stmt *scan; Type *type; /* * State machine for Storage (not used for cleaning) * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect storage for (st). */ if (flags & RESOLVE_FINALIZE) { dassert_stmt(st, st->st_RState & RSF_SUB_ALIGN); if (st->st_RState & RSF_SUB_STORAGE) return; st->st_RState |= RSF_STORAGE | RSF_SUB_STORAGE; } else { dassert_stmt(st, st->st_RState & RSF_ALIGN); if (st->st_RState & RSF_STORAGE) return; st->st_RState |= RSF_STORAGE; } /* * TODO - pure expressions can be run multiple times * dassert((st->st_Flags & STF_TMPRES1/2) == 0); */ /* * If this is an executable semantic layer or an import layer then assign * storage to declarations up-front. Of the various DOP_*_STORAGE ops, * we should only see DOP_STACK_STORAGE and DOP_GLOBAL_STORAGE. * * Note: If this is the root ST_Import STF_SEMANTIC is *NOT* set and sg * will be NULL. */ if ((st->st_Flags & STF_SEMANTIC) && st->st_Op != ST_Class) { Declaration *d; /* * Make sure we aren't stuck in a recursive loop. If the SG * has already been resolved, assert calculated offsets. */ //dassert((sg->sg_Flags & (SGF_FRESOLVED | SGF_FRESOLVING)) == 0); dassert((sg->sg_Flags & SGF_FRESOLVING) == 0); sg->sg_Flags |= SGF_FRESOLVING; /* * The base offset for sub-semantic-blocks must match the alignment * they require in order to allow us to do an aligned BZEROing of the * space. We do not include the temporary space here (it does not * need to be BZERO'd). * * NOTE: sg_TmpAlignMask is taken into accoun when the top-level * frame is allocated. */ if (st->st_Flags & STF_SEMTOP) { base = 0; gbase = 0; } else { SemGroup *psg = sg->sg_Parent; base = BASEALIGN(psg->sg_BlkOffset + psg->sg_BlkBytes, sg->sg_AlignMask); gbase = BASEALIGN(psg->sg_GlobalBlkOffset + psg->sg_GlobalBlkBytes, sg->sg_GlobalAlignMask); } dassert((sg->sg_Flags & SGF_FRESOLVED) == 0 || sg->sg_BlkOffset == base); sg->sg_BlkOffset = base; sg->sg_GlobalBlkOffset = gbase; /* * Classify storage (note: class decls are handled elsewhere) */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { /* * Set d_Storage based on scope and intended default for d_Op. */ switch (d->d_Op) { case DOP_STACK_STORAGE: case DOP_ARGS_STORAGE: case DOP_GROUP_STORAGE: type = d->d_StorDecl.ed_Type; base = BASEALIGN(base, d->d_AlignMask); dassert((sg->sg_Flags & SGF_FRESOLVED) == 0 || (urunesize_t)d->d_Offset == base); d->d_Offset = base; base += d->d_Bytes; if (d->d_StorDecl.ed_OrigAssExp) sg->sg_Flags |= SGF_HASASS; if (type->ty_Flags & TF_HASASS) sg->sg_Flags |= SGF_HASASS; if (type->ty_Flags & TF_HASLVREF) sg->sg_Flags |= SGF_HASLVREF; if (type->ty_Flags & TF_HASPTR) sg->sg_Flags |= SGF_HASPTR; if (type->ty_Flags & TF_HASCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASDESTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGDESTRUCT) sg->sg_Flags |= SGF_ABICALL; checkUnrestrictedType(d, type); break; case DOP_GLOBAL_STORAGE: type = d->d_StorDecl.ed_Type; gbase = BASEALIGN(gbase, d->d_AlignMask); dassert((sg->sg_Flags & SGF_FRESOLVED) == 0 || (urunesize_t)d->d_Offset == gbase); d->d_Offset = gbase; gbase += d->d_Bytes; if (d->d_StorDecl.ed_OrigAssExp) sg->sg_Flags |= SGF_GHASASS; if (type->ty_Flags & TF_HASASS) sg->sg_Flags |= SGF_GHASASS; if (type->ty_Flags & TF_HASLVREF) sg->sg_Flags |= SGF_GHASLVREF; if (type->ty_Flags & TF_HASPTR) sg->sg_Flags |= SGF_GHASPTR; if (type->ty_Flags & TF_HASCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASDESTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGDESTRUCT) sg->sg_Flags |= SGF_ABICALL; checkUnrestrictedType(d, type); break; default: break; } } /* * The byte size of the block does not have to be aligned, but * aligning it (within reason) might provide a benefit. */ dassert((sg->sg_Flags & SGF_FRESOLVED) == 0 || sg->sg_BlkBytes == base - sg->sg_BlkOffset); sg->sg_Bytes = base; dassert((sg->sg_Flags & SGF_FRESOLVED) == 0 || sg->sg_GlobalBlkBytes == gbase - sg->sg_GlobalBlkOffset); sg->sg_GlobalBytes = gbase; limit = base; dassert((sg->sg_Flags & SGF_FRESOLVED) == 0 || sg->sg_BlkBytes == sg->sg_Bytes - sg->sg_BlkOffset); sg->sg_BlkBytes = base - sg->sg_BlkOffset; sg->sg_GlobalBlkBytes = gbase - sg->sg_GlobalBlkOffset; sg->sg_Flags |= SGF_FRESOLVED; sg->sg_Flags &= ~SGF_FRESOLVING; } /* * Figure out how much temporary space we need to be able to execute * statements and expressions. Temporary space, like the main procedural * space, must be inherited from and consolidated into the top-level * SemGroup */ if (sg) { base = sg->sg_TmpBytes; gbase = sg->sg_GlobalTmpBytes; } else { /* * Root ST_Import. avoid compiler warnings */ base = 0; gbase = 0; } limit = base; glimit = gbase; switch (st->st_Op) { case ST_Import: if (st->st_ImportStmt.es_DLL) { void (*func)(void); func = dlsym(st->st_ImportStmt.es_DLL, "ResolveStorage"); if (func) func(); } break; case ST_Module: case ST_Class: break; case ST_Typedef: if (st->st_TypedefStmt.es_Decl->d_Flags & DF_RESOLVED) { resolveDeclStorage(st->st_TypedefStmt.es_Decl, flags, base, &limit, gbase, &glimit); } break; case ST_Decl: /* * Temporary space for declarations are handled here. * * Resolve declarations, skipping any whos context was moved to a * class (e.g. a declaration at the top level of a file like * Fd.setfd(...) also exists in the Fd class). */ { Declaration *d; int i; d = st->st_DeclStmt.es_Decl; if (d->d_Op == DOP_GLOBAL_STORAGE) st->st_DeclStmt.es_TmpOffset = gbase; else st->st_DeclStmt.es_TmpOffset = base; for (i = 0; i < st->st_DeclStmt.es_DeclCount; ++i) { if (st->st_MyGroup != d->d_MyGroup) { /* printf("SKIPB %s\n", d->d_Id); */ /* * resolveDeclStorage(d, flags, * base, &limit, gbase, &glimit); */ } else if (d->d_Flags & DF_RESOLVED) { resolveDeclStorage(d, flags, base, &limit, gbase, &glimit); } else { resolveDeclStorage(d, flags, base, &limit, gbase, &glimit); } d = RUNE_NEXT(d, d_Node); } } break; case ST_Block: break; case ST_Proc: break; case ST_Nop: break; case ST_Loop: { if (st->st_LoopStmt.es_BCond && (flags & RESOLVE_FINALIZE)) { resolveExpStorage(st->st_LoopStmt.es_BCond, flags, base, &limit); } if (st->st_LoopStmt.es_ACond && (flags & RESOLVE_FINALIZE)) { resolveExpStorage(st->st_LoopStmt.es_ACond, flags, base, &limit); } if (st->st_LoopStmt.es_AExp && (flags & RESOLVE_FINALIZE)) { resolveExpStorage(st->st_LoopStmt.es_AExp, flags, base, &limit); } } break; case ST_BreakCont: break; case ST_Bad: break; case ST_IfElse: if (flags & RESOLVE_FINALIZE) resolveExpStorage(st->st_IfStmt.es_Exp, flags, base, &limit); break; case ST_Return: if (st->st_RetStmt.es_Exp && (flags & RESOLVE_FINALIZE)) resolveExpStorage(st->st_RetStmt.es_Exp, flags, base, &limit); break; case ST_Result: if (st->st_ResStmt.es_Exp && (flags & RESOLVE_FINALIZE)) resolveExpStorage(st->st_ResStmt.es_Exp, flags, base, &limit); break; case ST_Switch: /* * The switch expression's temporary data must be saved while we are * executing the sub-statements (the cases). */ if (flags & RESOLVE_FINALIZE) { urunesize_t xlimit = base; resolveExpStorage(st->st_SwStmt.es_Exp, flags, base, &xlimit); base = xlimit; if (limit < xlimit) limit = xlimit; } break; case ST_Case: if (st->st_CaseStmt.es_Exp && (flags & RESOLVE_FINALIZE)) resolveExpStorage(st->st_CaseStmt.es_Exp, flags, base, &limit); break; case ST_Exp: if (flags & RESOLVE_FINALIZE) resolveExpStorage(st->st_ExpStmt.es_Exp, flags, base, &limit); break; case ST_ThreadSched: break; default: dassert_stmt(st, 0); } /* * Calculate storage requirements for substatements. (base) may have * been adjusted if this statement level's temporary storage needs to be * retained (aka switch() expression). * * Note that often scan->sg_MyGroup == sg. */ RUNE_FOREACH(scan, &st->st_List, st_Node) { dassert(scan->st_Op != ST_Proc); if (scan->st_Op == ST_Class) { if (scan->u.ClassStmt.es_Decl->d_Flags & DF_RESOLVED) ResolveStorage(scan, flags); } else if (scan->st_Op == ST_Decl) { /* * Ignore declarations here, they will be handled in the semgroup * scan in the next loop */ } else if (scan->st_Op == ST_Proc) { /* Do not resolve template procedures! */ if (scan->st_ProcStmt.es_Decl->d_ProcDecl.ed_OrigBody == scan) { /* XXX */ } else { /* XXX */ } } else if (scan->st_Flags & STF_SEMTOP) { assert(scan->st_MyGroup != sg); if (scan->st_Flags & STF_RESOLVED) ResolveStorage(scan, flags); } else { /* * This is a bit of a mess. The baseline sg_TmpBytes needs to be * set so calculated temporary offsets are relative to it, and * then restored. Otherwise we might blow away the * SGF_TMPRESOLVED SemGroup * * XXX */ urunesize_t save_offset; urunesize_t save_goffset; if (scan->st_Flags & STF_RESOLVED) { save_offset = scan->st_MyGroup->sg_TmpBytes; save_goffset = scan->st_MyGroup->sg_GlobalTmpBytes; scan->st_MyGroup->sg_TmpBytes = base; scan->st_MyGroup->sg_GlobalTmpBytes = gbase; ResolveStorage(scan, flags); if (scan->st_MyGroup->sg_TmpBytes < save_offset) scan->st_MyGroup->sg_TmpBytes = save_offset; if (scan->st_MyGroup->sg_GlobalTmpBytes < save_goffset) { scan->st_MyGroup->sg_GlobalTmpBytes = save_goffset; } if (limit < scan->st_MyGroup->sg_TmpBytes) limit = scan->st_MyGroup->sg_TmpBytes; if (glimit < scan->st_MyGroup->sg_GlobalTmpBytes) glimit = scan->st_MyGroup->sg_GlobalTmpBytes; } } } /* * If this is a new semantic level call resolveSemGroupStorage() to do * the final cleanup of SemGroup issues. This will redundantly calculate * temporary space requirements. Also, due to type/class references the * temporary space for a class may have already been resolved. Since a * class can only contain declarations it had better match what we * calculate here. * * Note that for non-Class executable SemGroup's TmpBytes is incorporated * in a downward fashion while sg_Bytes is incorporated in an upward * fashion. It can become quite confusing. Don't ask me why I did it * that way. */ if (st->st_Flags & STF_SEMANTIC) { resolveSemGroupStorage(sg, flags, limit, &limit, glimit, &glimit); } else if (sg) { sg->sg_TmpBytes = limit; sg->sg_GlobalTmpBytes = glimit; } /* else this is the Root st_Import */ if ((st->st_Flags & (STF_SEMANTIC | STF_SEMTOP)) == STF_SEMANTIC) { SemGroup *psg = sg->sg_Parent; if (psg->sg_Bytes < sg->sg_Bytes) psg->sg_Bytes = sg->sg_Bytes; if (psg->sg_GlobalBytes < sg->sg_GlobalBytes) psg->sg_GlobalBytes = sg->sg_GlobalBytes; } } /* * resolveDeclAlign() - Resolve the alignment requiret to process * a declaration. * * This is an expression tree traversal storage resolution procedure. We have * to traverse through declarations to get to default assignments and such. * * If a declaration has no assigned default the underlying type may itself * have an assigned default which must be dealt with. */ static void resolveDeclAlign(Declaration *d, urunesize_t *expalignp, int flags) { SemGroup *sg; /* * State machine. * * RESOLVE_CLEAN - If set, RESOLVE_FINALIZE is also always set * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect alignment of (d). */ if (flags & RESOLVE_CLEAN) { if ((d->d_RState & (RSF_ALIGN | RSF_SUB_ALIGN)) == 0) return; d->d_RState &= ~(RSF_ALIGN | RSF_SUB_ALIGN | RSF_STORAGE | RSF_SUB_STORAGE); } else if (flags & RESOLVE_FINALIZE) { if (d->d_RState & RSF_SUB_ALIGN) { if (*expalignp < d->d_AlignMask) *expalignp = d->d_AlignMask; return; } d->d_RState |= RSF_ALIGN | RSF_SUB_ALIGN; } else { if (d->d_RState & RSF_ALIGN) { if (*expalignp < d->d_AlignMask) *expalignp = d->d_AlignMask; return; } d->d_RState |= RSF_ALIGN; } switch (d->d_Op) { case DOP_CLASS: /* recursion already dealt with */ break; case DOP_ARGS_STORAGE: case DOP_STACK_STORAGE: case DOP_GROUP_STORAGE: { Type *type = d->d_StorDecl.ed_Type; resolveTypeAlign(type, expalignp, flags); if (d->d_StorDecl.ed_AssExp) { resolveExpAlign(d->d_StorDecl.ed_AssExp, expalignp, flags); } } break; case DOP_GLOBAL_STORAGE: { Type *type = d->d_StorDecl.ed_Type; resolveTypeAlign(type, expalignp, flags); if (d->d_StorDecl.ed_AssExp) { resolveExpAlign(d->d_StorDecl.ed_AssExp, expalignp, flags); } } break; case DOP_ALIAS: /* * Never try to resolve storage considerations for an alias's * assignment in the declaration itself. The run-time context * depends on who and how many other parts of the program reference * the alias and the expression tree will be duplicated for each. */ break; case DOP_TYPEDEF: /* XXX what about ty_AssExp ? should be in global space */ break; case DOP_IMPORT: /* recursion already dealt with */ break; case DOP_PROC: /* * Resolution of procedure declarations might have been deferred (see * TOK_ID in ResolveExp()). */ /* ResolveDecl(d, 0); */ { Stmt *st; Type *ptype; ptype = d->d_ProcDecl.ed_Type; resolveTypeAlign(ptype, expalignp, flags); if ((st = d->d_ProcDecl.ed_ProcBody) != NULL) { ResolveAlignment(st, flags); } } break; default: dassert_decl(d, 0); } /* * Make sure that the semantic group associated with the declaration * is resolved. */ sg = d->d_MyGroup; if (sg && (sg->sg_Op == SG_MODULE || sg->sg_Op == SG_CLASS)) { resolveSemGroupAlign(sg, flags); } } static void resolveDynamicDeclAlign(Declaration *d, urunesize_t *expalignp, int flags) { Declaration *scan; resolveDeclAlign(d, expalignp, flags); for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_MyGroup && (scan->d_MyGroup->sg_Flags & (SGF_RESOLVING | SGF_RESOLVED))) { resolveDeclAlign(scan, expalignp, flags); } } for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_SubBase) resolveDynamicDeclAlign(scan, expalignp, flags); } } static void resolveDeclStorage(Declaration *d, int flags, urunesize_t base, urunesize_t *limitp, urunesize_t gbase, urunesize_t *glimitp) { SemGroup *sg; /* * sync-up any adjustments to base made by the caller */ if (*limitp < base) *limitp = base; if (*glimitp < gbase) *glimitp = gbase; /* * State machine for Storage (not used for cleaning) * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect storage for (d). */ if (flags & RESOLVE_FINALIZE) { dassert_decl(d, d->d_RState & RSF_SUB_ALIGN); if (d->d_RState & RSF_SUB_STORAGE) return; d->d_RState |= RSF_STORAGE | RSF_SUB_STORAGE; } else { dassert_decl(d, d->d_RState & RSF_ALIGN); if (d->d_RState & RSF_STORAGE) return; d->d_RState |= RSF_STORAGE; } switch (d->d_Op) { case DOP_CLASS: /* recursion already dealt with */ break; case DOP_ARGS_STORAGE: case DOP_STACK_STORAGE: case DOP_GROUP_STORAGE: { Type *type = d->d_StorDecl.ed_Type; resolveTypeStorage(type, 0, base, limitp); if (d->d_StorDecl.ed_AssExp && (flags & RESOLVE_FINALIZE)) resolveExpStorage(d->d_StorDecl.ed_AssExp, flags, base, limitp); } break; case DOP_GLOBAL_STORAGE: { Type *type = d->d_StorDecl.ed_Type; resolveTypeStorage(type, RESOLVE_ISGLOB, gbase, glimitp); if (d->d_StorDecl.ed_AssExp && (flags & RESOLVE_FINALIZE)) { resolveExpStorage(d->d_StorDecl.ed_AssExp, flags, gbase, glimitp); } } break; case DOP_ALIAS: /* * Never try to resolve storage considerations for an alias's * assignment in the declaration itself. The run-time context * depends on who and how many other parts of the program reference * the alias and the expression tree will be duplicated for each. */ break; case DOP_TYPEDEF: /* XXX what about ty_AssExp ? should be in global space */ break; case DOP_IMPORT: /* recursion already dealt with */ break; case DOP_PROC: { Stmt *st; Type *ptype; ptype = d->d_ProcDecl.ed_Type; dassert(ptype->ty_Op == TY_PROC); resolveTypeStorage(ptype, flags, base, limitp); if ((st = d->d_ProcDecl.ed_ProcBody) != NULL) { if (flags & RESOLVE_FINALIZE) ResolveStorage(st, flags); } } break; default: dassert_decl(d, 0); } /* * Make sure that the semantic group associated with the declaration * is resolved. */ sg = d->d_MyGroup; if (sg && (sg->sg_Op == SG_MODULE || sg->sg_Op == SG_CLASS)) { resolveSemGroupStorage(sg, flags, 0, NULL, 0, NULL); } } static void resolveDynamicDeclStorage(Declaration *d, int flags, urunesize_t base, urunesize_t *limitp, urunesize_t gbase, urunesize_t *glimitp) { Declaration *scan; /* * sync-up any adjustments to base made by the caller */ if (*limitp < base) *limitp = base; if (*glimitp < gbase) *glimitp = gbase; resolveDeclStorage(d, flags, base, limitp, gbase, glimitp); for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_MyGroup && (scan->d_MyGroup->sg_Flags & (SGF_RESOLVING | SGF_RESOLVED))) { resolveDeclStorage(scan, flags, base, limitp, gbase, glimitp); } } for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_SubBase) { resolveDynamicDeclStorage(scan, flags, base, limitp, gbase, glimitp); } } } /* * resolveExpOnlyStorage() * * Resolve temporary storage for this exp structure, do not recurse * sub-expressions. Any type-temporary storage is tacked onto the end of * this expression's temporary area. * * We do not need to assign storage for expressions which return lvalues, * because they will simply return a pointer into non-temporary storage. */ static void resolveExpOnlyStorage(Exp *exp, int flags, urunesize_t base, urunesize_t *limitp) { Type *type; dassert(flags & RESOLVE_FINALIZE); dassert((exp->ex_RState & RSF_SUB_STORAGE) == 0); exp->ex_RState |= RSF_STORAGE | RSF_SUB_STORAGE; /* * sync-up any adjustments to base made by the caller */ if (*limitp < base) *limitp = base; #if 0 /* * State machine for Storage (not used for cleaning) * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect storage for (exp). */ if (flags & RESOLVE_FINALIZE) { dassert_exp(exp, exp->ex_RState & RSF_SUB_ALIGN); if (exp->ex_RState & RSF_SUB_STORAGE) return; exp->ex_RState |= RSF_STORAGE | RSF_SUB_STORAGE; } else { dassert_exp(exp, exp->ex_RState & RSF_ALIGN); if (exp->ex_RState & RSF_STORAGE) return; exp->ex_RState |= RSF_STORAGE; } #endif /* * Adjust if expression resolves to a type rather than a value. * e.g. when you do something like switch (typeof(int)) { ... }. * Types are handled as thin pointers. */ if (exp->ex_Flags & EXF_RET_TYPE) { exp->ex_TmpOffset = BASEALIGN(base, RAWPTR_ALIGN); SIZELIMIT(base, sizeof(void *), limitp); } /* * If the exp has a decl */ if (exp->ex_Decl) { Declaration *d; d = exp->ex_Decl; if (d->d_Flags & DF_RESOLVED) { resolveDeclStorage(d, flags, base, limitp, base, limitp); } } /* * Assign temporary offset. This offset does not overlap temporary space * reserved for sub-expressions. * * We must have an assigned type. Expression sequences like: * 'module.blah' are collapsed into 'blah' long before we get here, or * they should be. We should not encounter any TOK_TCMV_ID expression * tokens. Structural id's (the right hand side of X.Y) are resolved by * their parent expression node and no typing or temporary space is * required. * * Expressions that return lvalues do not need temporary space. */ type = exp->ex_Type; if (type == NULL) { switch (exp->ex_Token) { case TOK_STRUCT_ID: case TOK_SEMGRP_ID: break; default: dasserts_exp(exp, 0, "Unhandled expression token"); break; } exp->ex_TmpOffset = -3; } else if (exp->ex_Flags2 & EX2F_LVALUE) { // (type->ty_SQFlags & SF_LVALUE) /* * Expressive elements which return lvalues do not get temporary * space. Note that this also prevents lvalues such as large arrays * (int ary[999999999]) from reserving unnecessary stack space. * * NOTE: SF_LVALUE is now equivalent to SCOPE_LVALUE. It tells * us precisely whether the type is being stored as a * LValueStor or not. */ exp->ex_TmpOffset = -2; dassert_exp(exp, exp->ex_Token != TOK_COMPOUND); } else { /* * Reserve temporary space for potential intermediate results. * * Compound expressions may need extra space to default-init the * compound value, it is expected to be available to the generator * right after the nominal type in the TmpOffset. XXX also make * available to the interpreter? * * Procedure calls also may need extra space to default-init the * return value. XXX also make available to the interpreter? */ base = BASEALIGN(base, type->ty_AlignMask); /* * It may be convenient to use a larger alignment for arrays, which * would allow (e.g.) %xmm registers to be used on 64-bit arrays for * moves. Limit to 16-byte alignment for now. * * (See also resolveExpAlign()) */ if (type->ty_Op == TY_ARYOF || type->ty_Op == TY_COMPOUND || type->ty_Op == TY_ARGS) { if (type->ty_Bytes >= 16) { base = BASEALIGN(base, 15); } else if (type->ty_Bytes >= 8) { base = BASEALIGN(base, 7); } else if (type->ty_Bytes >= 4) { base = BASEALIGN(base, 3); } } /* * Temporary storage for this exp */ exp->ex_TmpOffset = base; SIZELIMIT(base, type->ty_Bytes, limitp); /* * A compound expression's type may need additional temporary * storage. NOTE: The type might not yet be changed to TY_COMPOUND, * but single-element compounds will use the same temporary space as * a non-compound. * * A procedure call may need additional temporary storage. * * (base was adjusted above and is exp->ex_TmpOffset) */ if (exp->ex_Token == TOK_COMPOUND) { /* * NOTE: type might not yet be changed to compound, but * single-element compound will use the same temporary space. */ resolveTypeStorage(type, RESOLVE_FINALIZE, base + type->ty_Bytes, limitp); } else if (exp->ex_Token == TOK_CALL) { resolveTypeStorage(type, RESOLVE_FINALIZE, base + type->ty_TmpBytes, limitp); } } dassert(exp->ex_TmpOffset != -1); } /* * Calculate the overlapping temporary space for sub-expression trees. * * (flags already asserted and RState adjusted by resolveExpOnlyStorage()) */ static void resolveExpSubStorage(Exp *exp, int flags, urunesize_t base, urunesize_t *limitp) { urunesize_t blimit; /* * sync-up any adjustments to base made by the caller */ if (*limitp < base) *limitp = base; if (exp->ex_Type) resolveTypeStorage(exp->ex_Type, flags, base, limitp); /* * Make sure resolved declarations have resolved temporary storage for * assigned expressions. XXX pure test */ if (exp->ex_Token == TOK_ID || exp->ex_Token == TOK_CLASSID) { Declaration *d; d = exp->ex_Decl; if (d && (d->d_Flags & DF_RESOLVED)) { resolveDeclStorage(d, flags, base, limitp, base, limitp); } /* note: UNARY can be set for aliases */ } /* * Used only by TOK_INLINE_CALL, calculates the temporary storage * base for the inline procedure's body. */ blimit = 0; /* * Calculate the overlapping temporary space for sub-trees. */ if (exp->ex_Flags & EXF_BINARY) { /* * Ensure lhs's NON-RECURSIVE temporary storage on-return does not * intefere with rhs's, or vise-versa. * * In addition, neither the lhs or rhs sides interfere with their * parent node's temporary storage, which is particularly important * for setting up the return storage for TOK_CALLs (etc). * * To do this offset the rhs storage by the non-recursive lhs * storage. */ blimit = base; resolveExpStorage(exp->ex_Lhs, flags, base, &blimit); if (exp->ex_Lhs->ex_TmpOffset >= 0) { resolveExpStorage(exp->ex_Rhs, flags, exp->ex_Lhs->ex_TmpOffset + exp->ex_Lhs->ex_Type->ty_Bytes, &blimit); } else { resolveExpStorage(exp->ex_Rhs, flags, base, &blimit); } SIZELIMIT(blimit, 0, limitp); } else if (exp->ex_Flags & EXF_UNARY) { resolveExpStorage(exp->ex_Lhs, flags, base, limitp); dassert_exp(exp, exp->ex_Lhs->ex_Next == NULL); } else if (exp->ex_Flags & EXF_COMPOUND) { /* * Each element will be copied into the compound storage in turn, so * we can union the temporary storage required for each element. */ Exp *scan; for (scan = exp->ex_Lhs; scan; scan = scan->ex_Next) { dassert_exp(scan, scan->ex_Type != NULL); resolveExpStorage(scan, flags, base, limitp); } } if (exp->ex_Token == TOK_CALL) { /* * Normal call */ resolveDynamicProcedureStorage(exp, flags, base, limitp, base, limitp); } else if (exp->ex_Token == TOK_INLINE_CALL) { /* * For an inlined-procedure, the procedure body has been dup'd and * thus should be entirely unique. We have to incorporate its * temporary storage into our own. * * The inlined procedure body's temporary space must begin after * the temporary space we reserve for the arguments and return * value (aka blimit). */ Stmt *st = exp->ex_AuxStmt; SemGroup *sg = st->st_MyGroup; /* dassert((exp->ex_Flags & EXF_DUPEXP) == 0); */ dassert(sg->sg_Parent); dassert(exp->ex_Flags & EXF_BINARY); dassert((st->st_Flags & (STF_SEMTOP | STF_SEMANTIC)) == STF_SEMANTIC); dassert((flags & RESOLVE_FINALIZE) && (st->st_RState & RSF_SUB_STORAGE) == 0); sg->sg_TmpBytes = BASEALIGN(blimit, sg->sg_TmpAlignMask); ResolveStorage(st, flags); SIZELIMIT(sg->sg_TmpBytes, 0, limitp); resolveDynamicProcedureStorage(exp, flags, base, limitp, base, limitp); } } /* * Resolve all storage parameters for an expression tree. When dealing * with an expression tree, we do not overlap the parent's temporary space * with that of either the lhs or rhs, and we do not overlap the lhs's * result storage with the rhs. However, the remainder of the lhs's * temporary space can be overlapped with the rhs. * * WARNING! Storage parameters for executable code cannot be re-resolved, * assert once-only. Resolve-time constant evaluation should be * interpreted just once. */ static void resolveExpStorage(Exp *exp, int flags, urunesize_t base, urunesize_t *limitp) { dassert(flags & RESOLVE_FINALIZE); dassert((exp->ex_RState & RSF_SUB_STORAGE) == 0); /* * sync-up any adjustments to base made by the caller */ if (*limitp < base) *limitp = base; resolveExpOnlyStorage(exp, flags, base, limitp); if (exp->ex_TmpOffset >= 0) { resolveExpSubStorage(exp, flags, exp->ex_TmpOffset + exp->ex_Type->ty_Bytes, limitp); } else { resolveExpSubStorage(exp, flags, base, limitp); } } static void resolveExpAlign(Exp *exp, urunesize_t *expalignp, int flags) { Type *type; /* * State machine. * * RESOLVE_CLEAN - If set, RESOLVE_FINALIZE is also always set * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect alignment of (type). * * NOTE: SGF_RESOLVED might not be set, indicating that we were able to * pick-out individual declarations in (global) SGs without having * to resolve the whole group. This allows unused declarations * to be omitted by the code generator. * */ if (flags & RESOLVE_CLEAN) { if ((exp->ex_RState & (RSF_ALIGN | RSF_SUB_ALIGN)) == 0) return; exp->ex_RState &= ~(RSF_ALIGN | RSF_SUB_ALIGN | RSF_STORAGE | RSF_SUB_STORAGE); } else if (flags & RESOLVE_FINALIZE) { if (exp->ex_RState & RSF_SUB_ALIGN) return; exp->ex_RState |= RSF_ALIGN | RSF_SUB_ALIGN; } else { if (exp->ex_RState & RSF_ALIGN) return; exp->ex_RState |= RSF_ALIGN; } if (exp->ex_Flags & EXF_RET_TYPE) { if (*expalignp < RAWPTR_ALIGN) *expalignp = RAWPTR_ALIGN; // type resolution might wind up being more complex so // keep going. //if ((flags & RESOLVE_FINALIZE) == 0) // return; } /* * Any associated type */ type = exp->ex_Type; if (type) { if (type->ty_SQFlags & SF_LVALUE) { if (*expalignp < LVALUESTOR_ALIGN) *expalignp = LVALUESTOR_ALIGN; } else { if (*expalignp < type->ty_AlignMask) *expalignp = type->ty_AlignMask; } resolveTypeAlign(type, expalignp, flags | RESOLVE_FINALIZE); /* * It may be convenient to use a larger alignment for arrays, which * would allow (e.g.) %xmm registers to be used on 64-bit arrays for * moves. Limit to 16-byte alignment for now. * * (See also resolveExpOnlyStorage()) */ if (type->ty_Op == TY_ARYOF || type->ty_Op == TY_COMPOUND || type->ty_Op == TY_ARGS) { #if 0 if (type->ty_Bytes >= 64) { if (*expalignp < 63) *expalignp = 63; } else if (type->ty_Bytes >= 32) { if (*expalignp < 31) *expalignp = 31; } else #endif if (type->ty_Bytes >= 16) { if (*expalignp < 15) *expalignp = 15; } else if (type->ty_Bytes >= 8) { if (*expalignp < 7) *expalignp = 7; } else if (type->ty_Bytes >= 4) { if (*expalignp < 3) *expalignp = 3; } } } /* * Any associated declaration */ if (exp->ex_Decl) { Declaration *d; d = exp->ex_Decl; if (d->d_Flags & DF_RESOLVED) { resolveDeclAlign(d, expalignp, flags); } } /* * Misc special cases */ switch(exp->ex_Token) { case TOK_CALL: /* * Alignment for dynamic procedures */ resolveDynamicProcedureAlign(exp, expalignp, flags); break; case TOK_INLINE_CALL: /* * Recurse through for an inline call, then roll-up the alignment * requirement(s) for the target procedure. We handle the 'arguments' * and 'return value' alignment in EXF_BINARY below. */ { SemGroup *asg; ResolveAlignment(exp->ex_AuxStmt, flags); asg = exp->ex_AuxStmt->st_MyGroup; if (*expalignp < asg->sg_TmpAlignMask) *expalignp = asg->sg_TmpAlignMask; resolveDynamicProcedureAlign(exp, expalignp, flags); } break; case TOK_CLASSID: break; } /* * Nominal lhs, rhs, and compound recursion */ if (exp->ex_Flags & EXF_BINARY) { resolveExpAlign(exp->ex_Lhs, expalignp, flags); resolveExpAlign(exp->ex_Rhs, expalignp, flags); } else if (exp->ex_Flags & EXF_UNARY) { resolveExpAlign(exp->ex_Lhs, expalignp, flags); } else if (exp->ex_Flags & EXF_COMPOUND) { Exp *scan; for (scan = exp->ex_Lhs; scan; scan = scan->ex_Next) { resolveExpAlign(scan, expalignp, flags); } } } /* * resolveTypeAlign() * * Figure out the temporary space required to initialize a type's defaults. * Note that the space will be figured independantly for any SemGroup's. */ static void resolveTypeAlign(Type *type, urunesize_t *expalignp, int flags) { SemGroup *sg = NULL; Type *subtype1 = NULL; Type *subtype2 = NULL; Type *pass2; dassert(type->ty_Flags & TF_RESOLVED); /* * State machine. * * RESOLVE_CLEAN - If set, RESOLVE_FINALIZE is also always set * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect alignment of (type). */ if (flags & RESOLVE_CLEAN) { if ((type->ty_RState & (RSF_ALIGN | RSF_SUB_ALIGN)) == 0) return; type->ty_RState &= ~(RSF_ALIGN | RSF_SUB_ALIGN | RSF_STORAGE | RSF_SUB_STORAGE); } else if (flags & RESOLVE_FINALIZE) { if (type->ty_RState & RSF_SUB_ALIGN) { if (*expalignp < type->ty_TmpAlignMask) *expalignp = type->ty_TmpAlignMask; return; } type->ty_RState |= RSF_ALIGN | RSF_SUB_ALIGN; } else { if (type->ty_RState & RSF_ALIGN) { if (*expalignp < type->ty_TmpAlignMask) *expalignp = type->ty_TmpAlignMask; return; } type->ty_RState |= RSF_ALIGN; } switch (type->ty_Op) { case TY_CLASS: sg = type->ty_ClassType.et_SemGroup; break; case TY_ARYOF: subtype1 = type->ty_AryType.et_Type; break; case TY_COMPOUND: sg = type->ty_CompType.et_SemGroup; break; case TY_PROC: subtype1 = type->ty_ProcType.et_ArgsType; subtype2 = type->ty_ProcType.et_RetType; break; case TY_IMPORT: sg = type->ty_ImportType.et_SemGroup; break; case TY_ARGS: sg = type->ty_ArgsType.et_SemGroup; break; case TY_VAR: sg = type->ty_VarType.et_SemGroup; break; case TY_PTRTO: if (flags & RESOLVE_FINALIZE) { pass2 = type->ty_RawPtrType.et_Type; resolveTypeAlign(pass2, &pass2->ty_TmpAlignMask, flags); } /* has nothing to do with initializing the pointer */ /* subtype1 = type->ty_RawPtrType.et_Type; */ break; case TY_REFTO: if (flags & RESOLVE_FINALIZE) { pass2 = type->ty_RefType.et_Type; resolveTypeAlign(pass2, &pass2->ty_TmpAlignMask, flags); } /* has nothing to do with initializing the pointer */ /* subtype1 = type->ty_RefType.et_Type; */ break; case TY_STORAGE: case TY_DYNAMIC: /* * nothing to be done here. */ break; case TY_UNRESOLVED: /* should be no unresolved types now */ default: dassert_type(type, 0); } if (subtype1) { resolveTypeAlign(subtype1, &subtype1->ty_TmpAlignMask, flags); if (subtype1->ty_AssExp && (flags & RESOLVE_FINALIZE)) { resolveExpAlign(subtype1->ty_AssExp, &subtype1->ty_TmpAlignMask, flags); } if (type->ty_TmpAlignMask < subtype1->ty_TmpAlignMask) type->ty_TmpAlignMask = subtype1->ty_TmpAlignMask; } if (subtype2) { resolveTypeAlign(subtype2, &subtype2->ty_TmpAlignMask, flags); if (subtype2->ty_AssExp && (flags & RESOLVE_FINALIZE)) { resolveExpAlign(subtype2->ty_AssExp, &subtype2->ty_TmpAlignMask, flags); } if (type->ty_TmpAlignMask < subtype2->ty_TmpAlignMask) type->ty_TmpAlignMask = subtype2->ty_TmpAlignMask; } if (type->ty_AssExp && (flags & RESOLVE_FINALIZE)) { resolveExpAlign(type->ty_AssExp, &type->ty_TmpAlignMask, flags); } if (sg) { dassert(sg->sg_Flags & SGF_RESOLVED); /* ResolveSemGroup(sg, 0); */ resolveSemGroupAlign(sg, flags); if (type->ty_TmpAlignMask < sg->sg_TmpAlignMask) type->ty_TmpAlignMask = sg->sg_TmpAlignMask; } if (*expalignp < type->ty_TmpAlignMask) *expalignp = type->ty_TmpAlignMask; } /* * Resolve the storage offsets (after alignment pass). * * RESOLVE_ISGLOB - Global storage is separately calculated from * type-instantiated storage. * * RESOLVE_FINALIZE - Indicates that we should resolve all underlying * types, typically because the type was referenced * via an expression. */ static void resolveTypeStorage(Type *type, int flags, urunesize_t base, urunesize_t *limitp) { SemGroup *sg = NULL; Type *subtype1 = NULL; Type *subtype2 = NULL; Type *pass2; /* * NOTE: This function will unconditionally max *limitp against base * so no need to do it right here. */ /* * State machine for Storage (not used for cleaning) * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect storage for (type). */ if (flags & RESOLVE_FINALIZE) { dassert_type(type, type->ty_RState & RSF_SUB_ALIGN); if (type->ty_RState & RSF_SUB_STORAGE) { base = BASEALIGN(base, type->ty_TmpAlignMask); SIZELIMIT(base, type->ty_TmpBytes, limitp); return; } type->ty_RState |= RSF_STORAGE | RSF_SUB_STORAGE; } else { dassert_type(type, type->ty_RState & RSF_ALIGN); if (type->ty_RState & RSF_STORAGE) { base = BASEALIGN(base, type->ty_TmpAlignMask); SIZELIMIT(base, type->ty_TmpBytes, limitp); return; } type->ty_RState |= RSF_STORAGE; } switch (type->ty_Op) { case TY_CLASS: sg = type->ty_ClassType.et_SemGroup; break; case TY_ARYOF: subtype1 = type->ty_AryType.et_Type; break; case TY_COMPOUND: sg = type->ty_CompType.et_SemGroup; break; case TY_PROC: subtype1 = type->ty_ProcType.et_ArgsType; subtype2 = type->ty_ProcType.et_RetType; break; case TY_IMPORT: sg = type->ty_ImportType.et_SemGroup; break; case TY_ARGS: sg = type->ty_ArgsType.et_SemGroup; break; case TY_VAR: sg = type->ty_VarType.et_SemGroup; break; case TY_PTRTO: if (flags & RESOLVE_FINALIZE) { pass2 = type->ty_RawPtrType.et_Type; resolveTypeStorage(pass2, flags, 0, &pass2->ty_TmpBytes); } break; case TY_REFTO: if (flags & RESOLVE_FINALIZE) { pass2 = type->ty_RefType.et_Type; resolveTypeStorage(pass2, flags, 0, &pass2->ty_TmpBytes); } break; case TY_STORAGE: case TY_DYNAMIC: /* * nothing to be done here. */ break; case TY_UNRESOLVED: /* should be no unresolved types now */ default: dassert_type(type, 0); } if (subtype1) { resolveTypeStorage(subtype1, flags, 0, &subtype1->ty_TmpBytes); if (subtype1->ty_AssExp && (flags & RESOLVE_FINALIZE)) { /* XXX base is 0? */ resolveExpStorage(subtype1->ty_AssExp, flags, 0, &subtype1->ty_TmpBytes); } base = BASEALIGN(base, subtype1->ty_TmpAlignMask); SIZELIMIT(base, subtype1->ty_TmpBytes, limitp); if (type->ty_TmpAlignMask < subtype1->ty_TmpAlignMask) type->ty_TmpAlignMask = subtype1->ty_TmpAlignMask; } if (subtype2) { resolveTypeStorage(subtype2, flags, 0, &subtype2->ty_TmpBytes); if (subtype2->ty_AssExp && (flags & RESOLVE_FINALIZE)) { /* XXX base is 0? */ resolveExpStorage(subtype2->ty_AssExp, flags, 0, &subtype2->ty_TmpBytes); } base = BASEALIGN(base, subtype2->ty_TmpAlignMask); SIZELIMIT(base, subtype2->ty_TmpBytes, limitp); if (type->ty_TmpAlignMask < subtype2->ty_TmpAlignMask) type->ty_TmpAlignMask = subtype2->ty_TmpAlignMask; } if (type->ty_AssExp && (flags & RESOLVE_FINALIZE)) { /* XXX base is 0? */ resolveExpStorage(type->ty_AssExp, flags, 0, &type->ty_TmpBytes); } if (sg) { dassert(sg->sg_Flags & SGF_RESOLVED); resolveSemGroupStorage(sg, flags, 0, NULL, 0, NULL); if (flags & RESOLVE_ISGLOB) { /* XXX */ base = BASEALIGN(base, sg->sg_GlobalAlignMask); base = BASEALIGN(base, sg->sg_TmpAlignMask); SIZELIMIT(base, sg->sg_GlobalTmpBytes, limitp); } else { base = BASEALIGN(base, sg->sg_TmpAlignMask); SIZELIMIT(base, sg->sg_TmpBytes, limitp); } /* * Re-resolve the type flags. XXX mostly fixed once I handled * CBase/DBase/GBase in resolveSemGroup1(). */ if (sg->sg_Flags & SGF_HASASS) type->ty_Flags |= TF_HASASS; if (sg->sg_Flags & SGF_HASPTR) type->ty_Flags |= TF_HASPTR; if (sg->sg_SRBase) type->ty_Flags |= TF_HASLVREF; if (sg->sg_Flags & SGF_VARARGS) type->ty_Flags |= TF_HASLVREF; /* XXX TF_VARARGS */ if (sg->sg_CBase) type->ty_Flags |= TF_HASCONSTRUCT; if (sg->sg_DBase) type->ty_Flags |= TF_HASDESTRUCT; } /* * Catch-all (catches caller adjustment to base) */ SIZELIMIT(base, 0, limitp); } /* * This is used to resolve temporary storage requirements for SemGroup's * related to classes and compound types. Temporary storage requirements are * calculated on a SemGroup-by-SemGroup basis and not aggregated into any * parent. * * In the final pass we also reverse the constructor and destructor lists * (sg_CBase and sg_DBase), and the pointer/lvalue list (SRBase). These * lists were originally constructed by prepending and are thus in the wrong * order. */ static void resolveSemGroupAlign(SemGroup *sg, int flags) { Declaration *d; /* * State machine. * * RESOLVE_CLEAN - If set, RESOLVE_FINALIZE is also always set * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect alignment of (type). * * NOTE: SGF_RESOLVED might not be set, indicating that we were able to * pick-out individual declarations in (global) SGs without having * to resolve the whole group. This allows unused declarations * to be omitted by the code generator. * */ if (flags & RESOLVE_CLEAN) { if ((sg->sg_RState & (RSF_ALIGN | RSF_SUB_ALIGN)) == 0) return; sg->sg_RState &= ~(RSF_ALIGN | RSF_SUB_ALIGN | RSF_STORAGE | RSF_SUB_STORAGE); } else if (flags & RESOLVE_FINALIZE) { if (sg->sg_RState & RSF_SUB_ALIGN) return; sg->sg_RState |= RSF_ALIGN | RSF_SUB_ALIGN; } else { if (sg->sg_RState & RSF_ALIGN) return; sg->sg_RState |= RSF_ALIGN; } RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { #if 0 if ((d->d_ScopeFlags & (SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR))) { if ((sg->sg_Flags & SGF_RESOLVED) == 0 && (sg->sg_Op == SG_MODULE || sg->sg_Op == SG_CLASS)) { ResolveSemGroup(sg, 0); } } #endif if ((d->d_Flags & DF_RESOLVED) == 0) continue; resolveDeclAlign(d, &sg->sg_TmpAlignMask, flags); if (d->d_ScopeFlags & SCOPE_GLOBAL) { if (sg->sg_GlobalAlignMask < d->d_AlignMask) sg->sg_GlobalAlignMask = d->d_AlignMask; } else { if (sg->sg_AlignMask < d->d_AlignMask) sg->sg_AlignMask = d->d_AlignMask; } } } static void resolveSemGroupStorage(SemGroup *sg, int flags, urunesize_t base, urunesize_t *limitp, urunesize_t gbase, urunesize_t *glimitp) { Declaration *d; Declaration *d2; urunesize_t dummy_limit = 0; urunesize_t dummy_glimit = 0; /* * sync-up any adjustments to base made by the caller */ if (limitp && *limitp < base) *limitp = base; if (glimitp && *glimitp < gbase) *glimitp = gbase; /* * State machine for Storage (not used for cleaning) * * RESOLVE_FINALIZE - Must also resolve indirect dependencies that * do not directly affect storage for (sg). */ if (flags & RESOLVE_FINALIZE) { dassert_sg(sg, sg->sg_RState & RSF_SUB_ALIGN); if (sg->sg_RState & RSF_SUB_STORAGE) return; sg->sg_RState |= RSF_STORAGE | RSF_SUB_STORAGE; } else { dassert_sg(sg, sg->sg_RState & RSF_ALIGN); if (sg->sg_RState & RSF_STORAGE) return; sg->sg_RState |= RSF_STORAGE; } if (limitp == NULL) { limitp = &dummy_limit; glimitp = &dummy_glimit; } /* * Final pass */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { if (d->d_Flags & DF_RESOLVED) { resolveDeclStorage(d, flags, base, limitp, gbase, glimitp); } } /* * Reverse order */ if (flags & RESOLVE_FINALIZE) { if ((d2 = sg->sg_CBase) != NULL) { sg->sg_CBase = NULL; while ((d = d2) != NULL) { d2 = d->d_CNext; d->d_CNext = sg->sg_CBase; sg->sg_CBase = d; } } if ((d2 = sg->sg_DBase) != NULL) { sg->sg_DBase = NULL; while ((d = d2) != NULL) { d2 = d->d_DNext; d->d_DNext = sg->sg_DBase; sg->sg_DBase = d; } } if ((d2 = sg->sg_GBase) != NULL) { sg->sg_GBase = NULL; while ((d = d2) != NULL) { d2 = d->d_GNext; d->d_GNext = sg->sg_GBase; sg->sg_GBase = d; } } if ((d2 = sg->sg_SRBase) != NULL) { sg->sg_SRBase = NULL; while ((d = d2) != NULL) { d2 = d->d_SRNext; d->d_SRNext = sg->sg_SRBase; sg->sg_SRBase = d; } } } sg->sg_TmpBytes = *limitp; sg->sg_GlobalTmpBytes = *glimitp; } /* * If we are resolving to a dynamic method call we need to flag all matching * current subclass decls for (d) not yet resolved to ensure they get * resolved if their related class is used at all, since the dynamic method * call might be trying to call any of them. */ static void resolveDynamicDecl(Declaration *d); static void resolveDynamicProcedure(SemGroup * isg __unused, SemGroup * sg __unused, Exp * exp, int flags __unused) { Declaration *d; Type *type; Exp *lhs; lhs = exp->ex_Lhs; type = lhs->ex_Lhs->ex_Type; d = lhs->ex_Decl; if (lhs->ex_Token != TOK_STRIND || type->ty_Op != TY_REFTO) return; type = type->ty_RefType.et_Type; dassert_exp(exp, type->ty_Op == TY_CLASS); resolveDynamicDecl(d); } static void resolveDynamicProcedureAlign(Exp *exp, urunesize_t *expalignp, int flags) { Declaration *d; Type *type; Exp *lhs; lhs = exp->ex_Lhs; type = lhs->ex_Lhs->ex_Type; d = lhs->ex_Decl; if (lhs->ex_Token != TOK_STRIND || type->ty_Op != TY_REFTO) return; type = type->ty_RefType.et_Type; dassert_exp(exp, type->ty_Op == TY_CLASS); resolveDynamicDeclAlign(d, expalignp, flags); } static void resolveDynamicProcedureStorage(Exp *exp, int flags, urunesize_t base, urunesize_t *limitp, urunesize_t gbase, urunesize_t *glimitp) { Declaration *d; Type *type; Exp *lhs; /* * sync-up any adjustments to base made by the caller */ if (limitp && *limitp < base) *limitp = base; if (glimitp && *glimitp < gbase) *glimitp = gbase; lhs = exp->ex_Lhs; type = lhs->ex_Lhs->ex_Type; d = lhs->ex_Decl; if (lhs->ex_Token != TOK_STRIND || type->ty_Op != TY_REFTO) return; type = type->ty_RefType.et_Type; dassert_exp(exp, type->ty_Op == TY_CLASS); resolveDynamicDeclStorage(d, flags, base, limitp, gbase, glimitp); } static void resolveDynamicDecl(Declaration *d) { Declaration *scan; for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { scan->d_Flags |= DF_DYNAMICREF; if (scan->d_MyGroup && (scan->d_MyGroup->sg_Flags & (SGF_RESOLVING | SGF_RESOLVED))) { ResolveDecl(scan, 0); } } for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_SubBase) resolveDynamicDecl(scan); } } /* * Handle everything required to inline a procedure. Small procedures are * automatically inlined unless 'noinline' is specified. 'inline' must be * specified to inline large procedures. We can only inline when we know the * exact procedure in question, so ref-based method calls tend to prevent * inlining. */ typedef struct xinline { struct xinline *prev; struct xinline *next; Declaration *d; } xinline_t; xinline_t XInlineTop; xinline_t *XInlineBot = &XInlineTop; static void resolveProcedureInline(SemGroup *isg __unused, SemGroup *sg __unused, Exp *exp, int flags) { Declaration *d; Exp *lhs; Stmt *st __unused; xinline_t *xin; lhs = exp->ex_Lhs; d = lhs->ex_Decl; /* * Do not inline of internal, clang call, marked as noinline, or * threaded. Do not inline a function which will probably return a * constant (and be optimized into one directly, inlining will slower * things down in that situation). */ if (d->d_ScopeFlags & (SCOPE_INTERNAL | SCOPE_CLANG | SCOPE_NOINLINE)) return; if (d->d_ScopeFlags & (SCOPE_THREAD)) return; if (exp->ex_Flags & EXF_PROBCONST) return; /* * XXX optimize this if the reference type is known explicitly, otherwise * we can't inline since it requires a dynamic call. */ if (lhs->ex_Token == TOK_STRIND && lhs->ex_Lhs->ex_Type->ty_Op == TY_REFTO) return; /* * For now do not try to combine global data because each inline will get * its own instantiation, which is not what the programmer expects. */ st = d->d_ProcDecl.ed_ProcBody; if (st == NULL) return; if (st->st_MyGroup->sg_GlobalBytes || st->st_MyGroup->sg_GlobalTmpBytes) return; /* * XXX we should be able to allow var-args inlines, why doesn't this * work? */ if (d->d_ProcDecl.ed_Type->ty_ProcType.et_ArgsType-> ty_CompType.et_SemGroup->sg_Flags & SGF_VARARGS) { return; } /* * Do not inline the same procedure recursively, or if we can optimize * the procedure call into a constant by interpreting it once. */ if (d->d_Flags & DF_INLINING) return; if (exp->ex_Flags & EXF_CONST) return; /* * Do not inline if we do not know the precise procedure at resolve-time. */ if (d->d_Op != DOP_PROC || lhs->ex_Type->ty_Op == TY_REFTO) return; xin = zalloc(sizeof(*xin)); xin->prev = XInlineBot; xin->d = d; XInlineBot->next = xin; XInlineBot = xin; /* * We inline the procedure by duplicating the procedure body and changing * the procedure call ex. Disallow recursive inlining. * * Set PARSE_TYPE on exLhs to retain exLhs->ex_Type across any further * duplication for the TOK_INLINE_CALL switch. */ d->d_Flags |= DF_INLINING; dassert((exp->ex_Flags & EXF_DUPEXP) == 0); exp->ex_Lhs->ex_Flags |= EXF_PARSE_TYPE; st = d->d_ProcDecl.ed_ProcBody; if (st->st_MyGroup->sg_Complexity < RuneInlineComplexity) { SemGroup *altsg; if (DebugOpt) { char buf[RUNE_IDTOSTR_LEN]; xinline_t *xscan; if (DebugOpt) printf("InlineTest: %5d", st->st_MyGroup->sg_Complexity); for (xscan = XInlineTop.next; xscan; xscan = xscan->next) { printf(".%s", runeid_text(xscan->d->d_Id, buf)); } printf("\n"); } altsg = st->st_MyGroup->sg_Parent; dassert(st->st_Flags & STF_SEMANTIC); /* * Each inlining inserts a fresh copy of the procedure in question. */ st = DupStmt(st->st_MyGroup, NULL, d->d_ProcDecl.ed_OrigBody); st->st_ProcStmt.es_Decl = d; st->st_ProcStmt.es_Scope = d->d_Scope; st->st_Flags |= STF_INLINED_PROC; exp->ex_Token = TOK_INLINE_CALL; exp->ex_AuxStmt = st; dassert(st->st_RState == 0); /* * XXX sg_AltContext is actually what we want to have priority for * searches, not sg_Parent! */ ResolveStmt(d->d_ImportSemGroup, st, flags); st->st_MyGroup->sg_AltContext = altsg; st->st_MyGroup->sg_Flags |= SGF_ALTPRIORITY; /* * Link the inlined procedure's semantic context with our own so * stack storage is properly calculated. We must clear STF_SEMTOP * here or the alignment recursion will restart at 0. */ dassert(st->st_Flags & STF_SEMTOP); dassert(st->st_Flags & STF_SEMANTIC); st->st_Flags &= ~STF_SEMTOP; st->st_MyGroup->sg_Parent = sg; /* ResolveExp(isg, sg, exp, exp->ex_Type, flags); */ } d->d_Flags &= ~DF_INLINING; XInlineBot->next = NULL; XInlineBot = xin->prev; zfree(xin, sizeof(*xin)); } static int SpecialSemGroupGet(runeid_t id) { int s; switch(id) { case RUNEID_NULL: s = SPECIAL_NULL; break; case RUNEID_VA_COUNT: s = SPECIAL_COUNT; break; case RUNEID_VA_TYPE: s = SPECIAL_TYPE; break; case RUNEID_VA_DATA: s = SPECIAL_DATA; break; case RUNEID_VA_VARCOUNT: s = SPECIAL_VAR_COUNT; break; case RUNEID_VA_VARTYPE: s = SPECIAL_VAR_TYPE; break; case RUNEID_VA_VARDATA: s = SPECIAL_VAR_DATA; break; case RUNEID_VA_TYPEID: s = SPECIAL_TYPEID; break; case RUNEID_VA_TYPESTR: s = SPECIAL_TYPESTR; break; default: s = 0; break; } return s; } /* * Fixup the 'this' argument to the correct subclass and add d_Search to * point to the original superclass. This allowed replicated method * procedures to operate on all fields of a subclass via 'this', even * if they moved around or changed type. * * parse2.c added the 'this' argument so we can assert that it exists * * NOTE: This occurs in pass1 (ResolveClasses()) and cannot resolve * types, declarations, or anything else at this time. */ static void ResolveMethodProcedureThisArg(SemGroup *subsg, Declaration *pd) { Declaration *d; SemGroup *sg; Type *type; type = pd->d_ProcDecl.ed_Type; sg = type->ty_ProcType.et_ArgsType->ty_ArgsType.et_SemGroup; d = RUNE_FIRST(&sg->sg_DeclList); dassert_decl(d, d->d_Id == RUNEID_THIS && (d->d_Op == DOP_ARGS_STORAGE || d->d_Op == DOP_TYPEDEF)); /* * 'this' inherits d_Search from the method procedure declaration, * which is set when superclass declarations are merged into a subclass * in ResolveClasses() */ d->d_Search = pd->d_Search; /* * Type of 'this' argument. Must be: * * class @this * class *this * lvalue class @this * lvalue class *this * * Automatic fixup if the 'this' argument. Change from 'class' to * 'subclass', and retain the SF_LVALUE qualifier. * * We do this unconditionally, ignoring AUTOTHIS * * d_Search will have already been set to the original class. */ if (/*(d->d_Flags & DF_AUTOTHIS) && */ d->d_Op == DOP_ARGS_STORAGE) { Type *stype; int sqflags; int vis; Exp *sexp; stype = d->d_StorDecl.ed_Type; sqflags = stype->ty_SQFlags; sexp = stype->ty_OrigAssExp; vis = stype->ty_Visibility; if (d->d_Flags & DF_AUTOTHIS) { /* * The parser put this here so just adjust what we know is * already there. */ if (subsg->sg_ClassType->ty_SQFlags & SF_STRUCT) { stype = TypeToRawPtrType(subsg->sg_ClassType); } else { stype = TypeToRefType(subsg->sg_ClassType); } } else { /* * Replace the programmer-supplied this class with the proper * subclass. Retain the LVALUE qualifier and type default exp, * if any. */ if (stype->ty_Op == TY_PTRTO) stype = TypeToRawPtrType(subsg->sg_ClassType); else if (stype->ty_Op == TY_REFTO) stype = TypeToRefType(subsg->sg_ClassType); else dassert_decl(d, 0); if (stype->ty_SQFlags != (sqflags & SF_LVALUE) || sexp) stype = TypeToQualType(stype, NULL, sexp, sqflags & SF_LVALUE, vis); } d->d_StorDecl.ed_Type = stype; } if (/*(d->d_Flags & DF_AUTOTHIS) && */ d->d_Op == DOP_TYPEDEF) { d->d_TypedefDecl.ed_Type = subsg->sg_ClassType; } } static void checkUnrestrictedType(Declaration *d, Type *type) { for (;;) { switch(type->ty_Op) { case TY_ARYOF: type = type->ty_AryType.et_Type; break; case TY_CLASS: if ((type->ty_SQFlags & SCOPE_STRUCT) == 0) dfatal_decl(d, TOK_ERR_CLASS_STRUCT_EMBED, NULL); return; default: return; } } /* not reached */ }