/* * RESOLVE.C - Resolve the parser tree and prepare for code generation * or interpretation. * * (c)Copyright 1993-2016, Matthew Dillon, All Rights Reserved. See the * COPYRIGHT file at the base of the distribution. * * Pass1 - ResolveClasses() - Handles superclass/subclass merging for the * entire import set. * * Pass2 - Resolve*() - Resolves identifiers and identifier paths, * plus the size and alignment for Types, Decls, * and SemGroups. * * Utilizes a deferred work mechanic to avoid * circular loops. This mechanism allows types * to be partially resolved (enough to satisfy * the caller), then finishes up via the deferred * work queue. */ #include "defs.h" #include struct ResVis; static void ResolveClasses(Stmt *st, int flags); static void ResolveAlignment(Stmt *st, int flags); static void ResolveStorage(Stmt *st, int flags); static void ResolveSemGroup(SemGroup *sg, int retry); static void errorDottedId(string_t *ary, const char *ctl, ...); static void ResolveStmt(SemGroup *isg, Stmt *st, int flags); static Type *ResolveType(Type *type, struct ResVis *vis, int retry); static void ResolveDecl(Declaration *d, int retry); static Exp *ResolveExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags); static Type *resolveReturnType(SemGroup *sg, int flags); static Type *resolveArgsType(SemGroup *sg, int flags); static Exp *resolveConstExp(SemGroup *isg, SemGroup *sg, Exp *exp, int flags); static Exp *resolveConstExpBool(SemGroup *isg, SemGroup *sg, Exp *exp, int flags, RunTmpStor *ts); static Exp *resolveCompoundExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags); static Exp *resolveBracketedExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags); static Exp *resolveExpCast(SemGroup *isg, SemGroup *sg, Exp *exp, Type *ltype, int flags); static Exp *resolveExpOper(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags); static void resolveSuperClass(Type *super); static void resolveDeclAlign(Declaration *d, runesize_t *expalignp, int flags); static void resolveExpAlign(Exp *exp, runesize_t *expalignp, int flags); static void resolveTypeAlign(Type *type, runesize_t *expalignp, int flags); static void resolveSemGroupAlign(SemGroup *sg, int flags); static void resolveDeclStorage(Declaration *d, runesize_t base, runesize_t *limitp, runesize_t gbase, runesize_t *glimitp); static void resolveStorageExpOnly(Exp *exp, runesize_t base, runesize_t *limitp); static void resolveStorageExpSub(Exp *exp, runesize_t base, runesize_t *limitp); static void resolveStorageExp(Exp *exp, runesize_t base, runesize_t *limitp); static Declaration *findOper(Type *btype, string_t id, Type *ltype, Type *rtype, int flags); static Declaration *findExpOper(Exp *exp, int flags); static Declaration *findCast(Type *btype, Type *ltype, Type *rtype, int flags); static void resolveStorageType(Type *type, int isglob, runesize_t base, runesize_t *limitp); static void resolveStorageSemGroup(SemGroup *sg, runesize_t base, runesize_t *limitp, runesize_t gbase, runesize_t *glimitp); static int methodProcThisIsPointer(Type *type); static void resolveProcedureInline(SemGroup *isg, SemGroup *sg, Exp *exp, int flags); static void resolveDynamicProcedure(SemGroup *isg, SemGroup *sg, Exp *exp, int flags); static void resolveDynamicProcedureAlign(Exp *exp, runesize_t *expalignp, int flags); static void resolveDynamicProcedureStorage(Exp *exp, runesize_t base, runesize_t *limitp, runesize_t gbase, runesize_t *glimitp); #define ADD_LVALUE(type) ResolveType(AddTypeQual((type), SF_LVALUE), NULL, 0) #define DEL_LVALUE(type) ResolveType(DelTypeQual((type), SF_LVALUE), NULL, 0) #define RESOLVE_AUTOCAST 0x0001 /* autocast to expected type */ #define RESOLVE_CONSTEXP 0x0002 /* resolve for const interpretation */ #define RESOLVE_CLEAN 0x0004 /* cleanup after const interp */ #define RESOLVE_FAILOK 0x0008 /* cleanup after const interp */ #define BASEALIGN(base, alignmask) \ (((base) + alignmask) & ~(runesize_t)(alignmask)) #define SIZELIMIT(base, bytes, limitp) \ if ((base) + (bytes) > *(limitp)) \ *(limitp) = ((base) + (bytes)) /* * Deferred work queue */ typedef Type *type_p; typedef Exp *exp_p; typedef struct ResVis { struct ResVis *next; int *visp; } resvis_t; typedef struct ResDefer { struct ResDefer *next; enum { RES_STMT, RES_DECL, RES_TYPE, RES_EXP, RES_SEMGROUP } which; union { struct { SemGroup *isg; Stmt *st; int flags; } stmt; struct { Declaration *d; } decl; struct { Type *type; } type; struct { SemGroup *isg; SemGroup *sg; Exp *exp; Type *itype; int flags; } exp; struct { SemGroup *sg; int flags; } sg; }; } resdelay_t; static resdelay_t *ResDeferBase; static resdelay_t **ResDeferTail = &ResDeferBase; static int ResPass; int RuneInlineComplexity = 20; /* * Do a pass on all deferred work. Returns non-zero if there * is more deferred work after the pass is complete. */ static int runDeferredWork(void) { resdelay_t *res; resdelay_t **last = ResDeferTail; Type *type; Exp *exp; while ((res = ResDeferBase) != NULL) { if ((ResDeferBase = res->next) == NULL) ResDeferTail = &ResDeferBase; switch(res->which) { case RES_STMT: ResolveStmt(res->stmt.isg, res->stmt.st, res->stmt.flags); break; case RES_DECL: ResolveDecl(res->decl.d, 1); break; case RES_TYPE: type = ResolveType(res->type.type, NULL, 1); dassert(type == res->type.type); break; case RES_EXP: exp = ResolveExp(res->exp.isg, res->exp.sg, res->exp.exp, res->exp.itype, res->exp.flags); dassert(exp == res->exp.exp); break; case RES_SEMGROUP: ResolveSemGroup(res->sg.sg, 1); break; default: dassert(0); break; } zfree(res, sizeof(*res)); if (&res->next == last) /* storage freed, ok to test ptr */ break; } return (ResDeferBase != NULL); } __unused static void deferStmt(SemGroup *isg, Stmt *st, int flags) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_STMT; res->stmt.isg = isg; res->stmt.st = st; res->stmt.flags = flags; *ResDeferTail = res; ResDeferTail = &res->next; } __unused static void deferDecl(Declaration *d) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_DECL; res->decl.d = d; *ResDeferTail = res; ResDeferTail = &res->next; } __unused static void deferExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_EXP; res->exp.isg = isg; res->exp.sg = sg; res->exp.exp = exp; res->exp.itype = itype; res->exp.flags = flags; *ResDeferTail = res; ResDeferTail = &res->next; } /* * Note that visibility is set immediately by the call chain, NOT in any * deferral. */ static void deferType(Type *type) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_TYPE; res->type.type = type; *ResDeferTail = res; ResDeferTail = &res->next; } __unused static void deferSG(SemGroup *sg) { resdelay_t *res; res = zalloc(sizeof(*res)); res->which = RES_SEMGROUP; res->sg.sg = sg; *ResDeferTail = res; ResDeferTail = &res->next; } void ResolveProject(Parse *p, Stmt *st) { Declaration *d; Stmt *main_st; string_t id; int i; int eno; dassert_stmt(st, st->st_Op == ST_Import); /* * Interpreter or Generator may reference our global internal types * directly, so make sure they are all resolved. */ ResolveClasses(st, 0); for (i = 0; BaseTypeAry[i]; ++i) ResolveType(BaseTypeAry[i], NULL, 0); id = StrTableAlloc("main", 4, 0); main_st = RUNE_FIRST(&st->st_List); d = FindDeclId(main_st->st_MyGroup, id, &eno); if (d == NULL) { fprintf(stderr, "Top-level module missing main()\n"); exit(1); } /* * */ ResPass = 0; ResolveDecl(d, 0); runDeferredWork(); runDeferredWork(); runDeferredWork(); ResPass = 1; while (runDeferredWork()); /* * Resolve all types registered by DLLs */ { TypeRegNode *tr; RUNE_FOREACH(tr, &TypeRegList, tr_Node) ResolveType(tr->tr_Type, NULL, 0); } ResolveAlignment(st, 0); ResolveStorage(st, 0); p->p_Format = PFMT_RESOLVED; CollapseProject(st); } /* * ResolveClasses() - Resolve superclasses and do class merge * * This code implements the most complex feature of the language: * subclassing and refinement. * * The hardest thing we have to do is 'dup' declarations and code in * order to implement subclassing and refinement. For example, a * procedure defined in Integer must be dup'd for each subclass of * Integer. We have to do this because storage requirements will * change due to both subclassing and refinement. Even auto variables * may wind up with different types between superclass and subclass. * * We must scan ST_Import and ST_Class statements. */ static void ResolveClasses(Stmt *st, int flags) { SemGroup *sg = st->st_MyGroup; /* * Resolver interlock. Assert that we aren't looping. A loop can * occur if class A embeds class B and class B embeds class A * (verses a pointer to A). */ dassert_stmt(st, (st->st_Flags & STF_RESOLVING) == 0); if (st->st_Flags & STF_RESOLVED) return; st->st_Flags |= STF_RESOLVING; /* * If this is a subclass, integrate the superclass into it */ if (st->st_Op == ST_Class && st->st_ClassStmt.es_Super) { Type *super = st->st_ClassStmt.es_Super; Stmt *sst; Declaration *d; Declaration *rd; SemGroup *tsg; /* * Locate the superclass. 'super' does not appear in any * other list.. this is a unique Type structure. */ dassert_stmt(st, super->ty_Op == TY_UNRESOLVED); do { resolveSuperClass(super); } while (super->ty_Op == TY_UNRESOLVED); dassert_stmt(st, super->ty_Op == TY_CLASS); /* * Cleanup (XXX free qualified segments??) */ st->st_ClassStmt.es_Super = super; st->st_ClassStmt.es_Decl->d_ClassDecl.ed_Super = super; /* * Inherit internal unsigned integer and floating point flags * and a few others. */ sg->sg_Flags |= super->ty_ClassType.et_SemGroup->sg_Flags & (SGF_ISINTEGER | SGF_ISUNSIGNED | SGF_ISFLOATING | SGF_ISBOOL | SGF_HASASS | SGF_GHASASS | SGF_HASLVPTR | SGF_GHASLVPTR | SGF_ABICALL); /* * Locate the class statement associated with the superclass * and resolve it. */ sst = super->ty_ClassType.et_SemGroup->sg_Stmt; dassert_stmt(st, sst != NULL); dassert_stmt(st, sst->st_Op == ST_Class); ResolveClasses(sst, flags); /* * Assign the sg_Level for the subclass. This is used * for semantic searches when a subclass is passed to a * procedure expecting the superclass. */ sg->sg_Level = sst->st_MyGroup->sg_Level + 1; /* * XXX Subclasses can inherit locking scope here. Currently * we do not. */ #if 0 if (sst->u.ClassStmt.es_Decl->d_ScopeFlags & SCOPE_HARD) { st->u.ClassStmt.es_Decl->d_ScopeFlags |= SCOPE_HARD; } else if (st->u.ClassStmt.es_Decl->d_ScopeFlags & SCOPE_HARD) { StmtFatalError(st, TOK_ERR_ILLEGAL_LOCKING_REFINEMENT); } #endif /* * First move all the declarations from sg to tsg so we * can merge the superclass properly (keep all the d_Index's * correct). Note that tsg is not 100% integrated so we can * only use it for search purposes. We absolutely CANNOT * DupDeclaration() into tsg! */ tsg = AllocSemGroup(SG_CLASS, sg->sg_Parse, NULL, sg->sg_Stmt); while ((d = RUNE_FIRST(&sg->sg_DeclList)) != NULL) { RenameDecl(d, tsg); } sg->sg_DeclCount = 0; /* reset */ /* * Merge the superclass into this class, in sequence. * Iterate through declarations in the superclass and pull * them into the subclass. Figure out compatibility between * super and subclasses. * * d - iterates the superclass * nd - subclass declaration refining the superclass decl */ RUNE_FOREACH(d, &sst->st_MyGroup->sg_DeclList, d_Node) { Declaration *nd; int eno = 0; dassert(d->d_Level != NULL && d->d_Level->sg_Level < sg->sg_Level); /* * See if the superclass decl conflicts with a * subclass decl. If there is no conflict pull it * into the subclass and adjust the visibility. * Note that the superclass may have duplicate ids, * but they will be at different levels if so. * * The super linkage is required when findDecl() * checks visibility of a declaration hidden relative * to our subclass, but not necessarily hidden * relative to the superclass. * * XXX overloading */ rd = FindDeclId(tsg, d->d_Id, &eno); if (rd == NULL) { /* XXX proliferates decls/types? */ nd = DupDeclaration(sg, d); dassert(d->d_Index == nd->d_Index); nd->d_ScopeFlags &= ~SCOPE_ALL_VISIBLE | super->ty_Visibility; nd->d_ScopeFlags &= ~SCOPE_REFINE; /* * Superclass decl is brought in unrefined * (though it might be an implied refinement * depending on side-effects). */ nd->d_SubNext = d->d_SubBase; d->d_SubBase = nd; continue; } /* * If there is a conflict and we are not refining * the superclass entity, then pull in the superclass * entity and make it invisible to sg_Level searches. * This could bring in multiple levels of the same id. * * Note that this may result in multiple ids, but * they will be at different levels. In this case * rd will be at the current level and nd will be * at some prior level. * * Order is important here. */ if ((rd->d_ScopeFlags & SCOPE_REFINE) == 0) { /* XXX proliferates decls/types? */ nd = DupDeclaration(sg, d); dassert(d->d_Index == nd->d_Index); nd->d_ScopeFlags &= ~(SCOPE_ALL_VISIBLE | SCOPE_REFINE); #if 0 printf(" conflict, not refined, overload\n"); #endif /* * Superclass decl is brought in unrefined * (though it might be an implied refinement * depending on side-effects). */ nd->d_SubNext = d->d_SubBase; d->d_SubBase = nd; continue; } /* * Ok, we need to refine. But the superclass may * contain multiple levels of the same id. We only * refine the one that is visible to us. None of * these other declarations will be at the same level. */ if ((d->d_ScopeFlags & SCOPE_ALL_VISIBLE) == 0) { nd = DupDeclaration(sg, d); dassert(d->d_Index == nd->d_Index); nd->d_ScopeFlags &= ~(SCOPE_ALL_VISIBLE | SCOPE_REFINE); #if 0 printf(" conflict, refined (skip this one)\n"); #endif /* * Superclass decl is brought in unrefined * (though it might be an implied refinement * depending on side-effects). */ nd->d_SubNext = d->d_SubBase; d->d_SubBase = nd; continue; } /* * Whew! Finally, we found the superclass decl * that we wish to refine. We had better not have * already refined it or there's something wrong * with the algorithm. * * Since we inherit the superclass method's level * our method will run in the superclass instead * of the original, but d_Super still must be set * for findDecl() to track down visibility relative * to the superclass methods. */ RenameDecl(rd, sg); dassert_decl(rd, rd->d_Super == NULL); dassert(d->d_Index == rd->d_Index); rd->d_Level = d->d_Level; rd->d_Super = d; /* * super->subclass(es) list */ rd->d_SubNext = d->d_SubBase; d->d_SubBase = rd; /* * This is for the superclass method access special * case below. */ if (d->d_Op == DOP_PROC) { d->d_Flags |= DF_SUPERCOPY; } /* * Refinements inherit the locking mode from the * superclass and are not allowed to change it. */ if ((rd->d_ScopeFlags & SCOPE_LOCKING_MASK) && (d->d_ScopeFlags ^ rd->d_ScopeFlags) & SCOPE_LOCKING_MASK) { StmtFatalError(st, TOK_ERR_ILLEGAL_LOCKING_REFINEMENT); } rd->d_ScopeFlags |= d->d_ScopeFlags & SCOPE_LOCKING_MASK; } /* * Deal with any remaining elements in tsg. These are * 'extensions' to the superclass. There may also be * invisible DOP_PROC's to handle the special superclass * method call case descibed above. */ while ((rd = RUNE_FIRST(&tsg->sg_DeclList)) != NULL) { if (rd->d_ScopeFlags & SCOPE_REFINE) { if (rd->d_Super == NULL) { fprintf(stderr, "Unable to refine %s, it " "does not exist in " "superclass\n", rd->d_Id); dassert_decl(rd, 0); } } RenameDecl(rd, sg); } FreeSemGroup(tsg); /* * We have to special case super.method() for a refined method. * Normally this makes the original method inaccessible (for * storage), but we want it to work for a procedure so we make * a copy in tsg. (we can't dup it directly into sg because it * will screw up the d_Index). * * We must not only clear the scope visibility and the * temporary refine flag, we also have to clear * constructor/destructor scope in the copy so only the * refined constructor/destructor is called, not both the * refined and the superclass constructor/destructor. */ RUNE_FOREACH(d, &sst->st_MyGroup->sg_DeclList, d_Node) { Declaration *nd; if (d->d_Flags & DF_SUPERCOPY) { d->d_Flags &= ~DF_SUPERCOPY; nd = DupDeclaration(sg, d); nd->d_ScopeFlags &= ~(SCOPE_ALL_VISIBLE | SCOPE_REFINE | SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR); } } } else if (st->st_Op == ST_Class) { sg->sg_Level = 0; } st->st_Flags &= ~STF_RESOLVING; st->st_Flags |= STF_RESOLVED; /* * If this is an ST_Import we must recurse through it. The only * statements under an Import should be Modules. Well, really just * one module. And under that module we only care about ST_Import * and ST_Class statements. * * If this is a shared import the statement list will be empty (later * it may be used for import refinement, I dunno). This is what we * want since we should only resolve a shared import once. */ if (st->st_Op == ST_Import) { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { Stmt *scan2; dassert_stmt(scan, scan->st_Op == ST_Module); RUNE_FOREACH(scan2, &scan->st_List, st_Node) { if (scan2->st_Op == ST_Import || scan2->st_Op == ST_Class) { ResolveClasses(scan2, flags); } } } if (st->st_ImportStmt.es_DLL) { void (*func)(void) = dlsym(st->st_ImportStmt.es_DLL, "resolveClasses"); if (func) func(); } } } /* * ResolveStmt() - Resolve all types, declarations, and semantic refs * * Resolves all types, declarations, and identifiers. Additionally * this function resolves intermediate types for expressions. Storage * sizes are resolved but offsets are not assigned to declarations. * * Returns a complexity count. */ static void ResolveStmt(SemGroup *isg, Stmt *st, int flags) { /* * Process whether we detached as a thread already or not. */ if (st->st_Parent) st->st_Flags |= st->st_Parent->st_Flags & STF_DIDRESULT; /* * Deal with unresolved types here */ if (st->st_Flags & STF_SEMANTIC) { SemGroup *sg = st->st_MyGroup; Type *type; RUNE_FOREACH(type, &sg->sg_ClassList, ty_Node) { if (type->ty_Op == TY_UNRESOLVED) { resolveSuperClass(type); } } } /* * Resolve statements. Don't worry about declarations, those are * handled after this switch. */ switch(st->st_Op) { case ST_Import: /* * This will just flag the import declaration as resolved * so the code generator dives it for generation. */ if (st->st_ImportStmt.es_Decl) ResolveDecl(st->st_ImportStmt.es_Decl, 0); /* fall through */ case ST_Module: /* * Recursively resolve contents */ #if 0 /* if (isg == NULL || (isg->sg_Flags & SGF_ENTRY))*/ { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { /* * XXX pass isg for import, st_MyGroup for * module?? */ ResolveStmt(st->st_MyGroup, scan, flags); } if (st->st_Op == ST_Import && st->st_ImportStmt.es_DLL) { void (*func)(void) = dlsym(st->st_ImportStmt.es_DLL, "resolveTypes"); if (func) func(); } } #endif break; case ST_Class: #if 0 ResolveDecl(st->st_ClassStmt.es_Decl, 0); #endif break; case ST_Typedef: ResolveDecl(st->st_TypedefStmt.es_Decl, 0); break; case ST_Decl: /* * Resolve declarations, skipping any whos context was * moved to a class (e.g. a declaration at the top level * of a file like Fd.setfd(...) also exists in the Fd class). */ { Declaration *d = st->st_DeclStmt.es_Decl; int i; for (i = 0; i < st->st_DeclStmt.es_DeclCount; ++i) { if (st->st_MyGroup == d->d_MyGroup) ResolveDecl(d, 0); d = RUNE_NEXT(d, d_Node); } } break; case ST_Block: { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { ResolveStmt(isg, scan, flags); } } break; case ST_Nop: break; case ST_Loop: if (st->st_LoopStmt.es_Init) ResolveStmt(isg, st->st_LoopStmt.es_Init, flags); if (st->st_LoopStmt.es_BCond) { /* * NOTE: BoolType global implies an rvalue. */ st->st_LoopStmt.es_BCond = ResolveExp(isg, st->st_MyGroup, st->st_LoopStmt.es_BCond, &BoolType, RESOLVE_AUTOCAST); } if (st->st_LoopStmt.es_ACond) { /* * NOTE: BoolType global implies an rvalue. */ st->st_LoopStmt.es_ACond = ResolveExp(isg, st->st_MyGroup, st->st_LoopStmt.es_ACond, &BoolType, RESOLVE_AUTOCAST); } if (st->st_LoopStmt.es_AExp) { /* * NOTE: VoidType global implies an rvalue. */ st->st_LoopStmt.es_AExp = ResolveExp(isg, st->st_MyGroup, st->st_LoopStmt.es_AExp, &VoidType, RESOLVE_AUTOCAST); } if (st->st_LoopStmt.es_Body) { ResolveStmt(isg, st->st_LoopStmt.es_Body, flags); #if 0 /* remove handled in ResolveDecl DOP_PROC */ if ((st->st_LoopStmt.es_Body->st_Flags & STF_RESOLVED) == 0) { ResolveAlignment(st->st_LoopStmt.es_Body, flags); ResolveStorage(st->st_LoopStmt.es_Body, flags); } #endif } break; case ST_BreakCont: break; case ST_Bad: break; case ST_IfElse: /* * NOTE: BoolType global implies an rvalue. */ st->st_IfStmt.es_Exp = ResolveExp(isg, st->st_MyGroup, st->st_IfStmt.es_Exp, &BoolType, RESOLVE_AUTOCAST); ResolveStmt(isg, st->st_IfStmt.es_TrueStmt, flags); if (st->st_IfStmt.es_FalseStmt) ResolveStmt(isg, st->st_IfStmt.es_FalseStmt, flags); break; case ST_Return: /* * NOTE: lvalue/rvalue depends on return type. */ st->st_RetStmt.es_ProcRetType = resolveReturnType(st->st_MyGroup, flags); if (st->st_RetStmt.es_Exp) { if (st->st_Flags & STF_DIDRESULT) StmtFatalError(st, TOK_ERR_RESULT_SEQUENCING); st->st_RetStmt.es_Exp = ResolveExp(isg, st->st_MyGroup, st->st_RetStmt.es_Exp, st->st_RetStmt.es_ProcRetType, RESOLVE_AUTOCAST); } break; case ST_Result: /* * NOTE: lvalue/rvalue depends on return type. */ if (st->st_Flags & STF_DIDRESULT) StmtFatalError(st, TOK_ERR_RESULT_SEQUENCING); if ((st->st_Parent->st_Flags & STF_SEMTOP) == 0) StmtFatalError(st, TOK_ERR_RESULT_SEQUENCING); st->st_ResStmt.es_ProcRetType = resolveReturnType(st->st_MyGroup, flags); if (st->st_ResStmt.es_Exp) { st->st_ResStmt.es_Exp = ResolveExp(isg, st->st_MyGroup, st->st_ResStmt.es_Exp, st->st_ResStmt.es_ProcRetType, RESOLVE_AUTOCAST); } /* * Flag that we executed result; */ { Stmt *scan; for (scan = st; scan; scan = scan->st_Parent) { scan->st_Flags |= STF_DIDRESULT; scan->st_MyGroup->sg_Flags |= SGF_DIDRESULT; if (scan->st_Flags & STF_SEMTOP) break; } } break; case ST_Switch: /* * NOTE: Switch type must be an rvalue. * * NOTE: It is possible to switch on a type. See ST_Case * below for more detail. */ st->st_SwStmt.es_Exp->ex_Flags |= EXF_REQ_TYPE; st->st_SwStmt.es_Exp = ResolveExp(isg, st->st_MyGroup, st->st_SwStmt.es_Exp, NULL, 0); /* * Switch-on-expression() expects an rvalue. */ if ((st->st_SwStmt.es_Exp->ex_Flags & EXF_RET_TYPE) == 0) { st->st_SwStmt.es_Exp->ex_Type = DEL_LVALUE(st->st_SwStmt.es_Exp->ex_Type); } { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { ResolveStmt(isg, scan, flags); } } break; case ST_Case: /* * Handle a case/default. Note that when switching on a type, * each case expression must return a type. * * NOTE: Case type must be an rvalue. We use the switch type * to cast, so it will be. */ { Stmt *scan; Exp *exp; Type *type; /* * Set type to cast cases to if we are switching on * an expression, otherwise we are switching on a * type and should not try to coerce the cases (it * doesn't make sense to). */ dassert_stmt(st, st->st_Parent->st_Op == ST_Switch); if (st->st_Parent->st_SwStmt.es_Exp->ex_Flags & EXF_RET_TYPE) type = NULL; else type = st->st_Parent->st_SwStmt.es_Exp->ex_Type; /* * case: (if es_Exp is NULL, this is a default: ) */ if ((exp = st->st_CaseStmt.es_Exp) != NULL) { if (type == NULL) exp->ex_Flags |= EXF_REQ_TYPE; exp = ResolveExp(isg, st->st_MyGroup, exp, type, RESOLVE_AUTOCAST); if (type == NULL) dassert(exp->ex_Flags & EXF_RET_TYPE); st->st_CaseStmt.es_Exp = exp; } /* * Elements of the case/default */ RUNE_FOREACH(scan, &st->st_List, st_Node) { ResolveStmt(isg, scan, flags); } } break; case ST_Exp: /* * NOTE: VoidType global implies an rvalue. * * NOTE: If ResolveExp() doesn't cast to void for * us, we will do it here. */ { Exp *exp; exp = ResolveExp(isg, st->st_MyGroup, st->st_ExpStmt.es_Exp, &VoidType, RESOLVE_AUTOCAST); if (exp->ex_Type != &VoidType) { exp = resolveExpCast(isg, st->st_MyGroup, exp, &VoidType, flags); } st->st_ExpStmt.es_Exp = exp; } break; case ST_Proc: { Stmt *scan; RUNE_FOREACH(scan, &st->st_List, st_Node) { ResolveStmt(isg, scan, flags); } } break; case ST_ThreadSched: break; default: dassert_stmt(st, 0); } /* * Calculate and propagate complexity upward. */ { SemGroup *sg; if ((sg = st->st_MyGroup) != NULL) { ++sg->sg_Complexity; if ((st->st_Flags & STF_SEMTOP) == 0 && sg->sg_Parent && RUNE_NEXT(st, st_Node) == NULL) { sg->sg_Parent->sg_Complexity += sg->sg_Complexity; } /* * Head of procedure needs to know if any ABI * calls will be made so it can reserve stack * space. */ if ((st->st_Flags & STF_SEMTOP) == 0 && sg->sg_Parent) { sg->sg_Parent->sg_Flags |= sg->sg_Flags & SGF_ABICALL; } } } } /* * Locate the ST_Proc statement and resolve & return its return type */ static Type * resolveReturnType(SemGroup *sg, int flags __unused) { Declaration *d; Type *type; Stmt *st; /* * Locate the ST_Proc statement */ while (sg && (sg->sg_Stmt == NULL || sg->sg_Stmt->st_Op != ST_Proc)) sg = sg->sg_Parent; dassert(sg != NULL); st = sg->sg_Stmt; d = st->st_ProcStmt.es_Decl; /* decl is already resolved */ dassert_decl(d, d->d_Op == DOP_PROC); dassert_decl(d, d->d_Flags & (DF_RESOLVING|DF_RESOLVED)); type = d->d_ProcDecl.ed_Type; dassert_decl(d, type->ty_Op == TY_PROC); return(type->ty_ProcType.et_RetType); } Type * resolveArgsType(SemGroup *sg, int flags __unused) { Declaration *d; Type *type; Stmt *st; /* * Locate the ST_Proc statement */ while (sg && (sg->sg_Stmt == NULL || sg->sg_Stmt->st_Op != ST_Proc)) sg = sg->sg_Parent; dassert(sg != NULL); st = sg->sg_Stmt; d = st->st_ProcStmt.es_Decl; /* decl is already resolved */ dassert_decl(d, d->d_Op == DOP_PROC); dassert_decl(d, d->d_Flags & (DF_RESOLVING|DF_RESOLVED)); type = d->d_ProcDecl.ed_Type; dassert_decl(d, type->ty_Op == TY_PROC); return(type->ty_ProcType.et_ArgsType); } /* * ResolveDecl() - resolve a declaration * * If the declaration represents a procedure argument, special * processing of LVALUE scope is required to pass the declaration * by reference instead of by value. Note that the size of the * underlying type DOES NOT CHANGE... it may be much larger. * * NOTE: we do not resolve d_Offset here. */ static void ResolveDecl(Declaration *d, int retry) { Type *type; Stmt *st; SemGroup *sg = NULL; int ok = 0; if (d->d_Flags & DF_RESOLVED) return; if (d->d_Flags & DF_RESOLVING) { if (retry == 0) return; } d->d_Flags |= DF_RESOLVING; switch(d->d_Op) { case DOP_CLASS: if (d->d_ClassDecl.ed_Super) ResolveType(d->d_ClassDecl.ed_Super, NULL, 0); sg = d->d_ClassDecl.ed_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { d->d_Bytes = d->d_ClassDecl.ed_SemGroup->sg_Bytes; d->d_AlignMask = d->d_ClassDecl.ed_SemGroup->sg_AlignMask; ok = 1; } break; case DOP_ALIAS: /* * Alias access is a barrier and always returns an rvalue. * * DupExp is absolutely required due to the alias's target * context being different for each consumer. */ type = ResolveType(d->d_AliasDecl.ed_Type, NULL, 0); if (type->ty_Flags & TF_RESOLVED) ok = 1; if (d->d_AliasDecl.ed_AssExp) { d->d_AliasDecl.ed_AssExp = ResolveExp(d->d_ImportSemGroup, d->d_MyGroup, d->d_AliasDecl.ed_AssExp, DEL_LVALUE(type), RESOLVE_AUTOCAST); } /* handled in DOT and STRIND resolver */ if ((d->d_Flags & DF_DIDEXPDUP) == 0) { d->d_Flags |= DF_DIDEXPDUP; SetDupExp(NULL, d->d_AliasDecl.ed_AssExp); } break; case DOP_TYPEDEF: d->d_Flags |= DF_RESOLVED; /*XXX*/ type = ResolveType(d->d_TypedefDecl.ed_Type, NULL, 0); d->d_Flags &= ~DF_RESOLVED; if (type->ty_Flags & DF_RESOLVED) ok = 1; break; case DOP_IMPORT: /* * This only occurs when resolving an import's semantic group. * Since we are scanning statements in that context we do not * have to recurse here, ResolveStmt() will do it for us. */ ok = 1; break; case DOP_PROC: /* * XXX global procedure, later on, make the argument a * type instead of storage? * * Avoid a circular loop failure when the procedure * declaration references the class it is defined in * by marking the resolve complete even if the type * isn't. We can do this because the procedure takes * no field storage. */ ResolveType(d->d_ProcDecl.ed_Type, NULL, 0); ok = 1; /* * Deal with constructor/destructor chaining. The chaining * winds up being reversed and will be corrected by the caller. * * NOTE: Constructors and destructors might be referenced * without the entire SG being resolved, so be sure * to set the ABI flags here. */ if (d->d_ScopeFlags & SCOPE_GLOBAL) { if ((d->d_Flags & DF_ONGLIST) == 0 && (d->d_ScopeFlags & (SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR))) { d->d_GNext = d->d_MyGroup->sg_GBase; d->d_Flags |= DF_ONGLIST; d->d_MyGroup->sg_GBase = d; d->d_MyGroup->sg_Flags |= SGF_GABICALL; } } else { if ((d->d_Flags & DF_ONCLIST) == 0 && (d->d_ScopeFlags & SCOPE_CONSTRUCTOR)) { d->d_CNext = d->d_MyGroup->sg_CBase; d->d_Flags |= DF_ONCLIST; d->d_MyGroup->sg_CBase = d; d->d_MyGroup->sg_Flags |= SGF_ABICALL; } if ((d->d_Flags & DF_ONDLIST) == 0 && (d->d_ScopeFlags & SCOPE_DESTRUCTOR)) { d->d_DNext = d->d_MyGroup->sg_DBase; d->d_Flags |= DF_ONDLIST; d->d_MyGroup->sg_DBase = d; d->d_MyGroup->sg_Flags |= SGF_ABICALL; } } /* * If this procedure is bound to a DLL we have to resolve * it here. */ if (d->d_ScopeFlags & SCOPE_CLANG) { d->d_ProcDecl.ed_DLLFunc = FindDLLSymbol(NULL, d->d_ImportSemGroup, d->d_Id); } break; case DOP_ARGS_STORAGE: case DOP_STACK_STORAGE: case DOP_GLOBAL_STORAGE: case DOP_GROUP_STORAGE: type = ResolveType(d->d_StorDecl.ed_Type, NULL, 0); /* * Complete if the underlying type is resolved. */ if (type->ty_Flags & TF_RESOLVED) ok = 1; /* * Promote the lvalue storage qualifier (e.g. from a typedef) * into the declaration's scope. This is what ultimately * controls lvalue vs rvalue arguments to procedures and such. */ if ((type->ty_SQFlags & SF_LVALUE) && (d->d_ScopeFlags & SCOPE_LVALUE) == 0 ) { d->d_ScopeFlags |= SCOPE_LVALUE; } /* * If the resolve adjusted locking modes the declaration * scope needs to be adjusted. The declaration's d_Storage * mechanics drive the code generator. */ if (type->ty_SQFlags & SF_UNTRACKED) { d->d_ScopeFlags &= ~SCOPE_LOCKING_MASK; d->d_ScopeFlags |= SCOPE_UNTRACKED; } if (type->ty_SQFlags & SF_UNLOCKED) { d->d_ScopeFlags &= ~SCOPE_LOCKING_MASK; d->d_ScopeFlags |= SCOPE_UNLOCKED; } if (type->ty_SQFlags & SF_SOFT) { d->d_ScopeFlags &= ~SCOPE_LOCKING_MASK; d->d_ScopeFlags |= SCOPE_SOFT; } if (type->ty_SQFlags & SF_HARD) { d->d_ScopeFlags &= ~SCOPE_LOCKING_MASK; d->d_ScopeFlags |= SCOPE_HARD; } /* * Default assignment handling expects an rvalue. */ if (d->d_StorDecl.ed_AssExp) { d->d_StorDecl.ed_AssExp = ResolveExp(d->d_ImportSemGroup, d->d_MyGroup, d->d_StorDecl.ed_AssExp, DEL_LVALUE(type), RESOLVE_AUTOCAST); } if (d->d_ScopeFlags & SCOPE_LVALUE) { /* * Object is passed as a LValueStor structure. Note * that d_Bytes is going to be different then the * underlying type (which represents the actual * object). */ d->d_Bytes = sizeof(LValueStor); d->d_AlignMask = LVALUESTOR_ALIGN; } else { /* * Object is passed by value. */ d->d_AlignMask = type->ty_AlignMask; d->d_Bytes = type->ty_Bytes; } /* * If the declaration represents or contains an * argument-lvalue or a pointer we have to add it to * the SemGroup's SRBase list to properly reference or * dereference the elements. XXX only do this for non-global * storage. * * If the declaration has LVALUE scope we must do the same * because the ref is tracked. */ if ((d->d_Flags & DF_ONSRLIST) == 0) { if (d->d_Op != DOP_GLOBAL_STORAGE && (type->ty_Flags & TF_HASLVPTR)) { d->d_SRNext = d->d_MyGroup->sg_SRBase; d->d_MyGroup->sg_SRBase = d; d->d_Flags |= DF_ONSRLIST; } else if (d->d_ScopeFlags & SCOPE_LVALUE) { d->d_SRNext = d->d_MyGroup->sg_SRBase; d->d_MyGroup->sg_SRBase = d; d->d_Flags |= DF_ONSRLIST; } } /* * Deal with constructor/destructor chaining. The chaining * winds up being reversed and will be corrected by the * caller. * * NOTE: Constructors and destructors might be referenced * without the entire SG being resolved, so be sure * to set the ABI flags here. */ if ((d->d_Flags & DF_ONCLIST) == 0 && (type->ty_Flags & TF_HASCONSTRUCT)) { d->d_CNext = d->d_MyGroup->sg_CBase; d->d_MyGroup->sg_CBase = d; d->d_MyGroup->sg_Flags |= SGF_ABICALL; d->d_Flags |= DF_ONCLIST; } if ((d->d_Flags & DF_ONDLIST) == 0 && (type->ty_Flags & TF_HASDESTRUCT)) { d->d_DNext = d->d_MyGroup->sg_DBase; d->d_MyGroup->sg_DBase = d; d->d_MyGroup->sg_Flags |= SGF_ABICALL; d->d_Flags |= DF_ONDLIST; } if ((d->d_Flags & DF_ONGLIST) == 0 && (type->ty_Flags & (TF_HASGCONSTRUCT|TF_HASGDESTRUCT))) { d->d_GNext = d->d_MyGroup->sg_GBase; d->d_MyGroup->sg_GBase = d; d->d_MyGroup->sg_Flags |= SGF_GABICALL; d->d_Flags |= DF_ONGLIST; } #if 0 /* * XXX This whole thing has changed. We don't adjust default * SCOPE or SF locking flags any more, we let the code * generator and interpreter detect that a default mode is * being used. * * We set content-locking defaults generically. With no * SCOPE_* flags set the default will be normally-locked * (GENSTAT_LOCK). * * SCOPE_UNTRACKED- GENSTAT_NONE (no ref, no lock). * SCOPE_UNLOCKED - GENSTAT_REFD * SCOPE_SOFT - GENSTAT_LOCK * SCOPE_HARD - GENSTAT_LOCKH * * Content-locking is only applicable to an lvalue, * pointer, or reference object, but we still want to * set the proper defaults more generically. * * The contents of classes and arrays are never content- * locked. Compound types (that are not procedure arguments) * are also not content-locked for now. */ if ((d->d_Op & DOPF_STORAGE) && (d->d_Scope.s_Flags & SCOPE_LVALUE) == 0) { if (type->ty_Op == TY_CLASS || type->ty_Op == TY_ARYOF || type->ty_Op == TY_COMPOUND) { d->d_ScopeFlags |= SCOPE_UNLOCKED; } } #endif break; default: dassert_decl(d, 0); } if (ok) { d->d_Flags &= ~DF_RESOLVING; d->d_Flags |= DF_RESOLVED; } else { deferDecl(d); } /* * Post resolution flag resolving (to handle recursion) */ switch(d->d_Op) { case DOP_PROC: /* * Create copies of procedures as they are needed (thus * avoiding an XxY matrix effect). */ if ((st = d->d_ProcDecl.ed_OrigBody) == NULL) { Declaration *super = d->d_Super; while (super && super->d_ProcDecl.ed_OrigBody == NULL) { super = super->d_Super; } if (super) { st = super->d_ProcDecl.ed_OrigBody; if (super->d_MyGroup->sg_Stmt->st_Op == ST_Class) { /* * Copy-down a procedure from a * superclass. The procedure must * still be linked into its superclass * for semantic searches to work as * expected, hence the use of super-> * d_MyGroup and st->st_Parent. * * Note that this code is not reached * in the case of a nested procedure, * since nested procedures are copied * with the parent. */ st = DupStmt(super->d_MyGroup, st->st_Parent, st); } else { /* * Copy-down a nested procedure. * The procedure must be linked * into the copy of the parent * prodedure, not the original * parent procedure, or it will * never be resolved. */ st = DupStmt(d->d_Stmt->st_MyGroup, d->d_Stmt, st); } } else { /* * Internal procedure (we do not need to do * anything), there is no statement body * to duplicate. */ st = NULL; } d->d_ProcDecl.ed_OrigBody = st; } if (st && (d->d_Flags & DF_DIDPULLDOWN) == 0) { /* * Procedure is being used in the primary class it * was defined in. * * Link the procedure body to the declaration and * resolve the procedure body. */ d->d_Flags |= DF_DIDPULLDOWN; st = DupStmt(d->d_MyGroup, st->st_Parent, st); dassert_stmt(st, d->d_ProcDecl.ed_ProcBody == NULL); d->d_ProcDecl.ed_ProcBody = st; st->st_ProcStmt.es_Decl = d; st->st_ProcStmt.es_Scope = d->d_Scope; ResolveStmt(d->d_ImportSemGroup, st, 0); #if 0 ResolveAlignment(st); ResolveStorage(st); #endif } break; default: break; } /* * __align(%d) scope qualifier, override the type's alignment */ if ((d->d_Scope.s_Flags & SCOPE_ALIGN) && d->d_Scope.s_AlignOverride) d->d_AlignMask = d->d_Scope.s_AlignOverride - 1; #if 1 sg = d->d_MyGroup; if (sg && (sg->sg_Type == SG_MODULE || sg->sg_Type == SG_CLASS)) { /* SG_COMPOUND too? maybe not */ ResolveSemGroup(d->d_MyGroup, 0); } #endif #if 0 /* * We specifically do not try to fully resolve the decl's SG, * which allows us to avoid procedures and storage which are * never used. However, the presence of constructors or destructors * requires a scan. */ if ((d->d_MyGroup->sg_Flags & (SGF_RESOLVING|SGF_RESOLVED)) == 0) { Declaration *d2; RUNE_FOREACH(d2, &d->d_MyGroup->sg_DeclList, d_Node) { if ((d2->d_ScopeFlags & (SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR)) && (d2->d_Flags & DF_RESOLVED) == 0) { ResolveDecl(d2, 0); } } } #endif } /* * ResolveExp() - resolve expression * * Resolve an expression. We are expected to resolve all ex_Type's * for the expression tree as well as expected to track down * operators and base identifiers. * * itype is a type hint. If non-NULL, the caller would like our * expression to return the specified type. There are a few special * cases: * * EXF_REQ_ARRAY - when OBRACKET requests an array optimization it * passes a post-array-indexed typehint (as if * you had done the optimization). You must ignore * itype if you are unable to do the optimization. * * NOTE: Even rvalues may have refstor side-effects at run-time. */ #define exFlags exp->ex_Flags #define exFlags2 exp->ex_Flags2 #define exType exp->ex_Type #define exToken exp->ex_Token #define exDecl exp->ex_Decl #define exLhs exp->ex_Lhs #define exVisibility exp->ex_Visibility #define exRhs exp->ex_Rhs #define exId exp->ex_Id static Exp * ResolveExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { int couldconst; if (exp->ex_Flags & EXF_DUPEXP) exp = DupExp(sg, exp); couldconst = 0; /* * Ensure that the cast target type hint is resolved. */ if (itype) ResolveType(itype, NULL, 0); /* * note: certain cases below call other resolver functions and assume * that ex* variables are unchanged. */ dassert((exFlags & EXF_DUPEXP) || (exFlags & EXF_RESOLVED) == 0); switch(exToken) { case TOK_ASS: /* * An assignment. Note that we optimize void returns * (such as when an assignment is a statement like 'a = 4;' * ... the result of the assignment is cast to void. * * NOTE: Left-hand-side must be an LVALUE, return type * inherits this feature unless the parent turns off * the bit so the TOK_ASS run-time must deal with that. */ exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); dassert_exp(exLhs, exLhs->ex_Type->ty_SQFlags & SF_LVALUE); exRhs = ResolveExp(isg, sg, exRhs, DEL_LVALUE(exLhs->ex_Type), flags | RESOLVE_AUTOCAST); if (exLhs->ex_Type->ty_SQFlags & SF_CONST) { ExpFatalError(exp, TOK_ERR_READONLY); } /* AssExp handles this optimization */ if (itype == &VoidType) { exType = itype; exFlags |= EXF_RET_VOID; } else { exType = exLhs->ex_Type; } #if 0 /* * Check @ref assignment compatibility. */ if (exLhs->ex_Type->ty_Op == TY_REFTO) { switch(MatchType(exLhs->ex_Type, exRhs->ex_Type)) { case SG_COMPAT_FULL: printf("assign %s compatibility FULL\n", exLhs->ex_Id); break; case SG_COMPAT_PART: printf("assign %s compatibility PART\n", exLhs->ex_Id); break; case SG_COMPAT_SUBCLASS: printf("assign %s compatibility SUBCL\n", exLhs->ex_Id); break; case SG_COMPAT_FAIL: printf("assign %s compatibility FAIL\n", exLhs->ex_Id); break; } } #endif break; case TOK_ANDAND: /* * NOTE: BoolType global implies an rvalue. */ couldconst = 1; exLhs = ResolveExp(isg, sg, exLhs, &BoolType, flags | RESOLVE_AUTOCAST); #if 1 /* * If left-side can terminate the operation, mark the * expression as PROBCONST for the interpreter and code * generator (allowing the rhs to not be a constant). */ if (exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) { RunTmpStor ts; exLhs = resolveConstExpBool(isg, sg, exLhs, flags, &ts); if (ts.ts_Bool == 0) exFlags |= EXF_PROBCONST; } #endif /* * Resolve rhs, and we can also flag PROBCONST if both sides * are constants. */ exRhs = ResolveExp(isg, sg, exRhs, &BoolType, flags | RESOLVE_AUTOCAST); if ((exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) && (exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST))) { exFlags |= EXF_PROBCONST; } exType = &BoolType; break; case TOK_OROR: /* * NOTE: BoolType global implies an rvalue. */ couldconst = 1; exLhs = ResolveExp(isg, sg, exLhs, &BoolType, flags | RESOLVE_AUTOCAST); #if 1 /* * If left-side can terminate the operation, mark the * expression as PROBCONST for the interpreter and code * generator (allowing the rhs to not be a constant). */ if (exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) { RunTmpStor ts; exLhs = resolveConstExpBool(isg, sg, exLhs, flags, &ts); if (ts.ts_Bool) exFlags |= EXF_PROBCONST; } #endif /* * Resolve rhs, and we can also flag PROBCONST if both sides * are constants. */ exRhs = ResolveExp(isg, sg, exRhs, &BoolType, flags | RESOLVE_AUTOCAST); if ((exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) && (exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST))) { exFlags |= EXF_PROBCONST; } exType = &BoolType; break; case TOK_DECL: /* * This synthesized token occurs when we are able to collapse * a structural indirection or dotted element into a * declaration. For example, 'module.routine'. */ /* XXX couldconst? */ break; case TOK_DOT: case TOK_STRIND: /* * Structual field access. The left hand side may be an object * (class or compound), a class type, or a compound type. * * A dotted access requires an lvalue on the left hand side * if the left hand side represents storage. * * The result will be an lvalue if the right hand side * represents storage. We only loop if the right hand side * is an alias replacement. */ { string_t id; Declaration *d; SemGroup *sg2; Type *type; int globalOnly = 0; int s; int visibility; int isRefTo = 0; int procedureOnly = 0; int eno = TOK_ERR_ID_NOT_FOUND; /* * NOTE: Hint must 'always happen' since we may be * modifying an expression that will later be * Dup'd. * * NOTE: Lhs is always an lvalue for TOK_DOT, but * does not have to be for TOK_STRIND. */ exLhs->ex_Flags |= EXF_REQ_TYPE; if (exToken == TOK_DOT) exLhs->ex_Flags |= exFlags & EXF_ADDRUSED; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); /* * The RHS may have been turned into a TOK_SEMGRP_ID * in a previous duplicate. The change is considered * permanent. */ if (exRhs->ex_Token != TOK_SEMGRP_ID) { dassert_exp(exRhs, exRhs->ex_Token == TOK_STRUCT_ID); exRhs = ResolveExp(isg, sg, exRhs, NULL, flags & ~RESOLVE_AUTOCAST); } id = exRhs->ex_Id; type = exLhs->ex_Type; /* * Calculate scope and SemGroup to search. Note * that it is legal to do a structural '.' selection * on a pointer, but it works differently then * indirecting through a pointer via '->'. In the * case of '.' on a pointer, we first search the * system Pointer class. */ if (exLhs->ex_Flags & EXF_RET_TYPE) { globalOnly = 1; } /* * Figure out the base type used to look-up the * identifier. An identifier that resolves into a * procedure winds up only being a hint for a * reference type. */ if (exToken == TOK_STRIND) { switch(type->ty_Op) { case TY_CPTRTO: type = type->ty_CPtrType.et_Type; break; case TY_PTRTO: type = type->ty_PtrType.et_Type; break; case TY_REFTO: type = type->ty_RefType.et_Type; isRefTo = 1; break; default: dassert_exp(exp, 0); /* not reached */ } } again: switch(type->ty_Op) { case TY_CLASS: sg2 = type->ty_ClassType.et_SemGroup; break; case TY_COMPOUND: sg2 = type->ty_CompType.et_SemGroup; break; case TY_ARGS: sg2 = type->ty_ArgsType.et_SemGroup; break; case TY_VAR: sg2 = type->ty_VarType.et_SemGroup; break; case TY_IMPORT: sg2 = type->ty_ImportType.et_SemGroup; break; case TY_CPTRTO: /* YYY */ dassert_exp(exp, PointerType.ty_Op == TY_CLASS); sg2 = PointerType.ty_ClassType.et_SemGroup; break; case TY_PTRTO: dassert_exp(exp, PointerType.ty_Op == TY_CLASS); sg2 = PointerType.ty_ClassType.et_SemGroup; break; case TY_REFTO: /* YYY */ dassert_exp(exp, PointerType.ty_Op == TY_CLASS); sg2 = PointerType.ty_ClassType.et_SemGroup; break; default: /* * Possibly a pointer, aka ptr.NULL */ sg2 = NULL; } visibility = exLhs->ex_Visibility; /* * Locate the identifier normally, via its type. * ty_TypeVisbility is the initial visibility (scope) * that the semantic search should use in locating * the identifier. */ if (sg2) { string_t ary[2] = { id, NULL }; int level; if (exLhs->ex_Token == TOK_ID || exLhs->ex_Token == TOK_DECL) { if (exLhs->ex_Decl->d_Search) { level = exLhs->ex_Decl-> d_Search->sg_Level; } else { level = sg2->sg_Level; } /* * XXX BIG HACK */ if (exLhs->ex_Flags & EXF_SUPER) { if (isRefTo) { fprintf(stderr, "Can't super with reference type\n"); dassert_exp(exp, 0); } if (level == 0) { fprintf(stderr, "No superclass available\n"); dassert_exp(exp, 0); } --level; } } else { level = sg2->sg_Level; /* may be -1 */ } visibility &= type->ty_Visibility; d = FindDeclPath(&exp->ex_LexRef, NULL, sg2, NULL, ary, FDC_NOBACK, &visibility, level, &eno); /* * XXX more hack. If the super is visible * and a procedure we just found our own * refinement, not the superclass method. * This is because there is no 'superclass * method' per say, refinements *REPLACE* * superclass declarations and inherit the * superclass's level. However, we still * want to be able to chain method calls so * what we do instead is go through and find * the procedure that we smacked when we did * the refinement. This procedure has * already been conveniently brought into * the subclass context as an 'invisible' * entity at the same d_Level. */ if ((exLhs->ex_Flags & EXF_SUPER) && d && d->d_Op == DOP_PROC && (d->d_ScopeFlags & SCOPE_ALL_VISIBLE) ) { string_t id2 = d->d_Id; SemGroup *olevel = d->d_Level; dassert_exp(exp, isRefTo == 0); while ((d = RUNE_NEXT(d, d_Node)) != NULL) { if (d->d_Id == id2 && d->d_Level == olevel && d->d_Op == DOP_PROC) { break; } } } } else { d = NULL; } if (d && procedureOnly && d->d_Op != DOP_PROC) { fprintf(stderr, "PTR.ELEMENT may be used for special " "pointer method calls, but not to " "access storage elements. " "Use PTR->ELEMENT instead\n"); dassert_exp(exp, 0); } /* * If referencing actual storage the storage must be * declared global. */ if (d && globalOnly && (d->d_Op & DOPF_STORAGE) && (d->d_ScopeFlags & SCOPE_GLOBAL) == 0 ) { fprintf(stderr, "%s is not global. Only globals " "can be accessed through a type\n", d->d_Id); dassert_exp(exp, 0); } if (d) { /* * Identifier found. Note that if we are * going through a reference type the * declaration is not the actual one we * use at run time. It's just a template. */ ResolveDecl(d, 0); exDecl = d; exVisibility = visibility; if (exFlags & EXF_REQ_ADDROF) d->d_Flags |= DF_ADDROF; if (exFlags & EXF_ADDRUSED) d->d_Flags |= DF_ADDRUSED; /* * XXX this is in wrong place * * ADDROF content-locked storage is not * allowed, except for the SCOPE_LVALUE case * if the underlying type is acceptable. * * If we are running through a LValueStor, * UNTRACKED and UNLOCKED apply to it and * not its contents. Check to see if the * contents are acceptable. */ if ((exFlags & EXF_REQ_ADDROF) && (d->d_Op & DOPF_STORAGE) && (d->d_Scope.s_Flags & (SCOPE_SOFT | SCOPE_HARD))) { type = d->d_StorDecl.ed_Type; if ((type->ty_Flags & TF_HASLVPTR) && type->ty_Op != TY_CLASS && type->ty_Op != TY_ARYOF) { ExpFatalError(exp, TOK_ERR_ILLEGAL_ADDRLOCKED); } } /* * Misc. */ switch(d->d_Op) { case DOP_PROC: exType = d->d_ProcDecl.ed_Type; if (d->d_ProcDecl.ed_Type->ty_SQFlags & SF_METHOD) { /* * Method call, do not * collapse the expression into * a direct declaration because * the object is needed later. */ if (exLhs->ex_Flags & EXF_RET_TYPE) ExpPrintError(exLhs, TOK_ERR_METHOD_REQUIRES_OBJ); dassert((exLhs->ex_Flags & EXF_RET_TYPE) == 0); } else if (isRefTo) { /* * Call via reference. The * lhs is required to evaluate * the actual method call at * run-time. */ } else { /* * Global method call or normal * call. For the global method * case the lhs is not needed * because the parser entered * the first argument as a * type already. * * Degenerate into a TOK_DECL. * We depend on this later. * (mark ex_Type as parse-time * for DupExp). */ exFlags &= ~EXF_BINARY; exFlags |= EXF_PARSE_TYPE; exLhs = NULL; exRhs = NULL; exToken = TOK_DECL; } break; case DOP_ALIAS: exType = DEL_LVALUE(d->d_AliasDecl.ed_Type); dassert_decl(d, d->d_AliasDecl.ed_AssExp != NULL); /* * NOTE: exLhs must be NULL if exp is * unresolved. exp tree duplications * do not duplicate the alias's exLHS * even though UNARY is set. * * DupExp is absolutely required due * to the alias's target context being * different for each consumer. */ dassert_exp(exp, exRhs->ex_Lhs == NULL); exRhs->ex_Flags |= EXF_ALIAS | EXF_UNARY; exRhs->ex_Lhs = DupExp(sg2, d->d_AliasDecl.ed_AssExp); exRhs->ex_Lhs = ResolveExp(isg, sg2, exRhs->ex_Lhs, exType, flags | RESOLVE_AUTOCAST); break; case DOP_ARGS_STORAGE: case DOP_STACK_STORAGE: case DOP_GLOBAL_STORAGE: case DOP_GROUP_STORAGE: /* * Set type. The Rhs is a STRUCT_ID * and does not require a type to be * assigned to it. * * Return type is always an LVALUE, * parent may adjust. */ exType = ADD_LVALUE(d->d_StorDecl.ed_Type); /* * Pull up global constants */ if (exToken == TOK_DOT && d->d_Op == DOP_GLOBAL_STORAGE && (d->d_ScopeFlags & SCOPE_CONSTANT) && (exLhs->ex_Flags & EXF_RET_TYPE)) { exFlags |= EXF_PROBCONST; } break; case DOP_TYPEDEF: /* * XXX make sure this is only used * in the lhs of a structural * reference. XXX * * XXX what if we went through a * TY_RETO type? This type will * be wrong. * * collapse the exp node. */ exType = d->d_TypedefDecl.ed_Type; exToken = TOK_DECL; exFlags &= ~EXF_BINARY; break; case DOP_IMPORT: /* * Do not collapse an import, we * require more resolution. e.g. * import. will be collapsed, * but 'import' cannot be. */ if (exFlags & EXF_REQ_TYPE) { exType = AllocImportType( &d->d_ImportDecl.ed_SemGroup->sg_ClassList, d->d_ImportDecl.ed_SemGroup, visibility); exFlags |= EXF_RET_TYPE; break; } break; case DOP_CLASS: /* * Do not collapse a class, we require * more resolution. e.g. class. * will be collapsed, but 'class' * cannot be. */ if (exFlags & EXF_REQ_TYPE) { exType = AllocClassType( &d->d_ClassDecl.ed_SemGroup->sg_ClassList, d->d_ClassDecl.ed_Super, d->d_ClassDecl.ed_SemGroup, visibility); exFlags |= EXF_RET_TYPE; break; } break; default: dassert_exp(exp, 0); break; } if (d->d_Op == DOP_PROC) { if (d->d_ScopeFlags & SCOPE_PURE) couldconst = 1; } else if (exType->ty_SQFlags & SF_CONST) { couldconst = 1; } } else if ((s = StrTableSpecial(id)) & SPECIALF_SEMGROUP) { /* * Identifier not found, check for a special * identifier. */ exRhs->ex_Token = TOK_SEMGRP_ID; exRhs->ex_Int32 = s; exDecl = NULL; switch(s) { case SPECIAL_NULL: dassert(type->ty_Op == TY_PTRTO || type->ty_Op == TY_REFTO || type->ty_Op == TY_CPTRTO); /* NULL is not an lvalue */ exType = DEL_LVALUE(type); exFlags |= EXF_NULL; break; case SPECIAL_COUNT: dassert(type->ty_Op != TY_PTRTO && type->ty_Op != TY_REFTO && type->ty_Op != TY_CPTRTO); exType = &Int32Type; break; case SPECIAL_DATA: /* * typeof(self.__data[]) vs * (cast)self.__data[] */ dassert(type->ty_Op != TY_PTRTO && type->ty_Op != TY_REFTO && type->ty_Op != TY_CPTRTO); dassert(exFlags & EXF_REQ_ARRAY); exFlags |= EXF_RET_ARRAY; if (exFlags & EXF_REQ_TYPE) { exFlags |= EXF_RET_TYPE; exType = &DynamicLValueType; } else if (itype) { exType = itype; } else { /* * dynamic data must be cast */ dassert_exp(exp, 0); exType = &DynamicLValueType; } break; case SPECIAL_VAR_COUNT: dassert(type->ty_Op != TY_PTRTO && type->ty_Op != TY_REFTO && type->ty_Op != TY_CPTRTO); exType = &Int32Type; sg->sg_Flags |= SGF_ABICALL; break; case SPECIAL_VAR_DATA: /* * typeof(self.__vardata[]) vs * (cast)self.__vardata[] */ dassert(type->ty_Op != TY_PTRTO && type->ty_Op != TY_REFTO && type->ty_Op != TY_CPTRTO); dassert(exFlags & EXF_REQ_ARRAY); exFlags |= EXF_RET_ARRAY; if (exFlags & EXF_REQ_TYPE) { exFlags |= EXF_RET_TYPE; exType = &DynamicLValueType; } else if (itype) { exType = itype; } else { /* * dynamic data must be cast */ dassert_exp(exp, 0); exType = &DynamicLValueType; } sg->sg_Flags |= SGF_ABICALL; break; case SPECIAL_TYPEID: exType = &Int32Type; break; case SPECIAL_TYPESTR: exType = &StrType; break; default: dassert_exp(exRhs, 0); break; } } else { /* * This is nasty, I admit. If we have a * pointer or reference type try again. */ exDecl = NULL; if (type->ty_Op == TY_PTRTO) { type = type->ty_PtrType.et_Type; procedureOnly = 1; goto again; } if (type->ty_Op == TY_REFTO) { type = type->ty_RefType.et_Type; procedureOnly = 1; goto again; } if (type->ty_Op == TY_CPTRTO) { type = type->ty_CPtrType.et_Type; procedureOnly = 1; goto again; } ExpFatalError(exRhs, eno); /* NOT REACHED */ } } dassert_exp(exp, exType != NULL); break; case TOK_STRUCT_ID: /* * NOTE: unresolved identifiers should not have alias * expression sub-tree duplications attached to them. * assert it. */ dassert_exp(exp, exLhs == NULL); break; case TOK_OPER: /* * NOTE: LVALUE/RVALUE for elements and return type depends * on the operator. Operator functions normally * self-optimize the cases at run-time. */ couldconst = 1; exp = resolveExpOper(isg, sg, exp, itype, flags & ~RESOLVE_AUTOCAST); break; case TOK_PTRIND: /* * Indirect through an expression. * * Return type is typically an LVALUE (if representing * storage). Exp parent might turn it off so run-time * must test. Lhs may or may not be. */ { Type *type; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); type = exLhs->ex_Type; switch(type->ty_Op) { case TY_REFTO: if ((exFlags & EXF_INDREF) == 0) { fprintf(stderr, "You cannot use '*' on a reference type\n"); dassert_exp(exLhs, 0); } exType = ADD_LVALUE(type->ty_RefType.et_Type); break; case TY_PTRTO: exType = ADD_LVALUE(type->ty_PtrType.et_Type); break; case TY_CPTRTO: exType = ADD_LVALUE(type->ty_CPtrType.et_Type); break; default: dassert_exp(exLhs, 0); break; } } break; case TOK_ADDR: /* * Take the address of an (LVALUE) expression. Returns an * RVALUE. Allow for a short-cut optimization which replaces * the TOK_ADDR sequence with its argument in the &ary[n] * case. */ { Type *type; /* * Hint must 'always happen' since we may be * modifying an expression that will later be * Dup'd. * * It is sufficient to test EXF_ADDRUSED to determine * if SRSGET/SRSPUT is needed for the procedure. */ exLhs->ex_Flags |= EXF_REQ_ADDROF | EXF_ADDRUSED; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); if (exLhs->ex_Flags & EXF_RET_ADDROF) { exp = exLhs; } else { type = exLhs->ex_Type; dassert_exp(exLhs, type->ty_SQFlags & SF_LVALUE); exType = ResolveType(TypeToPtrType(type), NULL, 0); /* DEL_LVALUE() not needed here */ } } break; case TOK_OBRACKET: /* * Array index, takes an RVALUE, returns an LVALUE. * * Note: we have to convert the special __data[exp] case. * * Note: ex_Flags hints must 'always happen' since we may be * modifying an expression that will later be Dup'd. */ exRhs = ResolveExp(isg, sg, exRhs, NULL, flags & ~RESOLVE_AUTOCAST); if (exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) { exRhs = resolveConstExp(isg, sg, exRhs, flags | RESOLVE_FAILOK); } exLhs->ex_Flags |= EXF_REQ_ARRAY | (exFlags & EXF_REQ_TYPE); exLhs->ex_Flags |= EXF_ADDRUSED /* | (exFlags & EXF_REQ_ADDROF)*/; exLhs->ex_AuxExp = exRhs; exLhs = ResolveExp(isg, sg, exLhs, itype, flags & ~RESOLVE_AUTOCAST); /* * If we are indexing an actual array we have to retain * EXF_ADDRUSED to prevent it from being cached in a * register. Otherwise we are indirecting through a * pointer and not taking the address of the pointer itself. * (tests/cat.d uses gets() which is a good test of this). */ if (exLhs->ex_Type && exLhs->ex_Type->ty_Op != TY_ARYOF) exLhs->ex_Flags &= ~(EXF_ADDRUSED | EXF_REQ_ADDROF); if (MatchType(&IntegralType, exRhs->ex_Type) >= SG_COMPAT_FAIL) { ExpPrintError(exRhs, TOK_ERR_EXPECTED_INTEGRAL_TYPE); dassert_exp(exp, 0); } if (exLhs->ex_Flags & EXF_RET_ARRAY) { /* * __data and __vardata specials */ /*don't modify ex_Token, EXF_DUPEXP might be set */ /*exp->ex_Token = TOK_ERR_EXP_REMOVED;*/ return(exLhs); } else if (exFlags & EXF_REQ_ADDROF) { /* * &ary[i] optimization - allows us to create * a bounded pointer (returns an RVALUE). */ Type *type; exFlags |= EXF_RET_ADDROF; dassert((exLhs->ex_Flags & EXF_RET_TYPE) == 0); exLhs->ex_AuxExp = NULL; type = exLhs->ex_Type; switch(type->ty_Op) { case TY_ARYOF: type = type->ty_AryType.et_Type; break; case TY_CPTRTO: type = type->ty_CPtrType.et_Type; break; case TY_PTRTO: type = type->ty_PtrType.et_Type; break; case TY_REFTO: /* Cannot take address of a reference type */ dassert_exp(exp, 0); break; } exType = ResolveType(TypeToPtrType(type), NULL, 0); /* returns an RVALUE */ } else { /* * Unoptimized array lookup, returns an lvalue */ Type *type; dassert((exLhs->ex_Flags & EXF_RET_TYPE) == 0); exLhs->ex_AuxExp = NULL; type = exLhs->ex_Type; switch(type->ty_Op) { case TY_ARYOF: type = type->ty_AryType.et_Type; break; case TY_CPTRTO: type = type->ty_CPtrType.et_Type; break; case TY_PTRTO: type = type->ty_PtrType.et_Type; break; case TY_REFTO: fprintf(stderr, "Cannot index a reference type\n"); dassert_exp(exp, 0); break; } exType = ADD_LVALUE(type); /* returns an LVALUE */ } break; case TOK_OPAREN: dassert_exp(exp, 0); /* XXX */ break; case TOK_DSTRING: case TOK_BSTRING: /* * XXX we should return a bounded pointer here. */ exType = &StrType; exFlags |= EXF_CONST; couldconst = 1; if ((exFlags2 & EX2F_ESCDONE) == 0) { string_t id; exFlags2 |= EX2F_ESCDONE; id = StrTableEscapeQuotedString(exId, strlen(exId), 1); ReplaceStrTable(&exp->ex_Id, id); } break; case TOK_SSTRING: couldconst = 1; exType = &UInt8Type; /* XXX make wide? */ exFlags |= EXF_CONST; if ((exFlags2 & EX2F_ESCDONE) == 0) { string_t id; exFlags2 |= EX2F_ESCDONE; id = StrTableEscapeQuotedString(exId, strlen(exId), 0); dassert(StrTableLen(id) == 1); ReplaceStrTable(&exp->ex_Id, id); } break; case TOK_INTEGER: couldconst = 1; { char *ptr; int size = 4; strtol(exp->ex_Id, &ptr, 0); while (*ptr) { switch(*ptr) { case 'u': case 'U': size |= 0x1000; break; case 's': case 'S': size &= ~0xFFF; break; case 'b': case 'B': size = (size & ~0xFFF) | 1; break; case 'w': case 'W': size = (size & ~0xFFF) | 2; break; case 'i': case 'I': size = (size & ~0xFFF) | 4; break; case 'l': case 'L': size = (size & ~0xFFF) | 8; break; case 'x': case 'X': size = (size & ~0xFFF) | 16; break; case 'z': case 'Z': size = (size & ~0xFFF) | sizeof(runesize_t); break; default: ExpFatalError(exp, TOK_ERR_ILLEGAL_SUFFIX); /* NOT REACHED */ break; } ++ptr; } switch(size) { case 1: exType = &Int8Type; break; case 2: exType = &Int16Type; break; case 4: exType = &Int32Type; break; case 8: exType = &Int64Type; break; case 16: exType = &Int128Type; break; case 0x1000 | 1: exType = &UInt8Type; break; case 0x1000 | 2: exType = &UInt16Type; break; case 0x1000 | 4: exType = &UInt32Type; break; case 0x1000 | 8: exType = &UInt64Type; break; case 0x1000 | 16: exType = &UInt128Type; break; default: exType = &Int32Type; break; } } exFlags |= EXF_CONST; break; case TOK_FLOAT: couldconst = 1; { char *ptr; exType = &Float64Type; strtod(exp->ex_Id, &ptr); while (*ptr) { switch(*ptr) { case 'f': case 'F': exType = &Float32Type; break; case 'd': case 'D': exType = &Float64Type; break; case 'x': case 'X': exType = &Float128Type; break; } ++ptr; } } exFlags |= EXF_CONST; break; case TOK_VOIDEXP: exType = &VoidType; break; case TOK_SELF: /* * The self identifier represents the current procedure's * arguments. A varargs procedure will actually be called * with an extended version of this type, but for resolution * purposes we can use this time. * * This is an LVALUE to support things like self.new() XXX. */ exType = ADD_LVALUE(resolveArgsType(sg, flags)); break; case TOK_DOLLAR: /* * The '$' identifier represents the current procedure's * return storage. */ if (sg->sg_Flags & SGF_DIDRESULT) ExpFatalError(exp, TOK_ERR_RESULT_SEQUENCING); exType = ADD_LVALUE(resolveReturnType(sg, flags)); break; case TOK_ID: case TOK_CLASSID: /* * Lookup the identifier. The returned declaration could * represent a class, typedef, module, or storage, but for * this case we only allow storage or a constant. Since * we are starting from our own semantic group, visibility * is initially ALL (private, library, and public). * * The identifier might represent something at a higher scoping * layer. For example, a nested procedure accessing a variable * in the parent procedure or a method procedure in a class * accessing an element of the object. * * It is also possible for the current execution scoping layer * (sg) to have a secondary contextual layer from which global * constants can be accessed. This is typically set when * resolving procedure arguments for procedures called through * objects or types. Only type globals can be accesed via * this shortcut. * * This returns an LVALUE if the id represents storage. */ { string_t ary[2]; int eno = TOK_ERR_ID_NOT_FOUND; exDecl = NULL; /* * Special case 'super'. XXX TY_REFTO * * Make an in-place change to the expression * structure. 'super' is actually 'this' with the * EXF_SUPER flag set. */ if (exId == String_Super) { exId = String_This; ReplaceStrTable(&exp->ex_Id, exId); exFlags |= EXF_SUPER; } ary[0] = exp->ex_Id; ary[1] = NULL; exDecl = FindDeclPath(&exp->ex_LexRef, isg, sg, NULL, ary, FDC_NULL, &exVisibility, -1, &eno); if (exDecl == NULL) { exDecl = FindDeclPathAltContext( &exp->ex_LexRef, isg, sg, NULL, ary, FDC_NULL, &exVisibility, -1, &eno); } if (exDecl == NULL) { ExpPrintError(exp, eno); dassert_exp(exp, 0); } /* * The EXF flag is set by TOK_ADDR, possibly * propagated down via TOK_DOT. Use this to flag * that the stack context might be used outside of * its normal life. LValue scoped declarations do * not count because they have their own RefStor. * * (This code is primarily responsible for causing * SRSGET and SRSPUT instructions to be emitted). */ if ((exFlags & EXF_ADDRUSED) && (exDecl->d_Scope.s_Flags & SCOPE_LVALUE) == 0) { exDecl->d_MyGroup->sg_Flags |= SGF_ADDRUSED; } /* * We have to resolve the declaration here, we * no longer have the redundancy to resolve it * elsewhere. */ #if 1 if ((exDecl->d_Flags & DF_RESOLVING) == 0) ResolveDecl(exDecl, 0); #endif #if 0 /* * Try to delay resolving the procedure declaration * (which will resolve the procedure body). We cannot * delay the resolution if resolving a constant * that the resolver needs immediately. */ if (flags & RESOLVE_CONSTEXP) { ResolveDecl(exDecl, 0); } #endif } /* * Taking the address of content-locked storage is illegal. * * If we are running through an LValueStor, UNTRACKED and * UNLOCKED apply to it and not its contents. Check to see * if the contents are acceptable. */ if ((exFlags & EXF_REQ_ADDROF) && (exDecl->d_Scope.s_Flags & (SCOPE_SOFT | SCOPE_HARD))) { Type *type = exDecl->d_StorDecl.ed_Type; if ((type->ty_Flags & TF_HASLVPTR) && type->ty_Op != TY_CLASS && type->ty_Op != TY_ARYOF) { ExpPrintError(exp, TOK_ERR_ILLEGAL_ADDRLOCKED); dassert_exp(exp, 0); } } switch(exDecl->d_Op) { case DOP_ARGS_STORAGE: if (sg->sg_Flags & SGF_DIDRESULT) ExpFatalError(exp, TOK_ERR_RESULT_SEQUENCING); /* fall through */ case DOP_STACK_STORAGE: case DOP_GLOBAL_STORAGE: case DOP_GROUP_STORAGE: /* * Storage identifiers are lvalues. * * Try to delay this step, giving the language more * flexibility in avoiding resolver loops from * interdependencies that can cause it to fail. * * We can't delay this step when resolving an * expression that the resolver needs an actual * constant result for. */ exType = ADD_LVALUE(exDecl->d_StorDecl.ed_Type); if (exFlags & EXF_ADDRUSED) exDecl->d_Flags |= DF_ADDRUSED; if (exFlags & EXF_REQ_ADDROF) exDecl->d_Flags |= DF_ADDROF; if (exType->ty_SQFlags & SF_CONST) couldconst = 1; #if 0 if (flags & RESOLVE_CONSTEXP) { Exp **asexpp = &exDecl->d_StorDecl.ed_AssExp; if (*asexpp) { *asexpp = DupExp(sg, *asexpp); *asexpp = ResolveExp(isg, sg, *asexpp, DEL_LVALUE(exType), flags | RESOLVE_AUTOCAST); *asexpp = SetDupExp(sg, *asexpp); } } #endif break; case DOP_ALIAS: /* * Aliases are rvalues (even if they could be lvalues). */ exType = DEL_LVALUE(exDecl->d_AliasDecl.ed_Type); exFlags |= EXF_ALIAS | EXF_UNARY; /* * NOTE: exLhs must be NULL if exp is unresolved. * exp tree duplications do not duplicate * the alias's exLHS even though UNARY is set. * However, because we probably have not * actually duplicated exp yet, we have to * clear the field in our pre-dup copy. * * NOTE: DupExp is absolutely required due * to the alias's target context being * different for each consumer. */ if (exFlags & EXF_DUPEXP) exLhs = NULL; dassert_exp(exp, exLhs == NULL); exLhs = DupExp(sg, exDecl->d_AliasDecl.ed_AssExp); exLhs = ResolveExp(isg, sg, exLhs, exType, flags | RESOLVE_AUTOCAST); /* * Inherit EXF_NULL (NULL pointer special) through * the alias, otherwise it will not be assignable * to arbitrary pointers. */ exFlags |= exLhs->ex_Flags & EXF_NULL; break; case DOP_PROC: /* * A procedural identifier. * * Note: procedural pointers cannot be changed so * they are not lvalues. */ dassert_exp(exp, (exFlags & EXF_REQ_PROC)); exType = exDecl->d_ProcDecl.ed_Type; if (exDecl->d_ScopeFlags & SCOPE_PURE) couldconst = 1; break; case DOP_TYPEDEF: if (exFlags & EXF_REQ_TYPE) { exType = exDecl->d_TypedefDecl.ed_Type; exFlags |= EXF_RET_TYPE; break; } dassert_exp(exp, 0); break; case DOP_CLASS: if (exFlags & EXF_REQ_TYPE) { exType = AllocClassType( &exDecl->d_ClassDecl.ed_SemGroup->sg_ClassList, exDecl->d_ClassDecl.ed_Super, exDecl->d_ClassDecl.ed_SemGroup, exVisibility); exFlags |= EXF_RET_TYPE; break; } dassert_exp(exp, 0); break; case DOP_IMPORT: if (exFlags & EXF_REQ_TYPE) { exType = AllocImportType( &exDecl->d_ImportDecl.ed_SemGroup->sg_ClassList, exDecl->d_ImportDecl.ed_SemGroup, exVisibility); exFlags |= EXF_RET_TYPE; break; } dassert_exp(exp, 0); break; default: dassert_exp(exp, 0); } break; case TOK_NOT: /* * NOTE: BoolType global implies an rvalue. */ couldconst = 1; exLhs = ResolveExp(isg, sg, exLhs, &BoolType, flags | RESOLVE_AUTOCAST); break; case TOK_TYPE: if (exFlags & EXF_REQ_TYPE) { ResolveType(exType, NULL, 0); exFlags |= EXF_RET_TYPE; } else { dassert_exp(exp, 0); } break; case TOK_CAST: /* * User cast (or maybe the parser inserted it). Try to * resolve the expression with the requested type hint * but tell ResolveExp() not to force the cast. * * Then check the result. If ResolveExp() was not able to * optimize the requested cast then resolve the cast. * * If the types are compatible we still keep the TOK_CAST * node in place for the moment. XXX we really need to * formalized how ex_Type is set Similar vs Exact. * * NOTE: Cast results are always an RVALUE. XXX validate here. */ couldconst = 1; if ((exFlags & EXF_PARSE_TYPE) == 0) { exRhs->ex_Flags |= EXF_REQ_TYPE; exRhs = ResolveExp(isg, sg, exRhs, NULL, flags & ~RESOLVE_AUTOCAST); exType = exRhs->ex_Type; } exLhs = ResolveExp(isg, sg, exLhs, exType, flags & ~RESOLVE_AUTOCAST); if (SimilarType(exType, exLhs->ex_Type) == 0) { exp = resolveExpCast(isg, sg, exLhs, exType, flags); } #if 0 /* propagate NULL flag to allow cast to any pointer type */ if (exLhs->ex_Flags & EXF_NULL) printf("LHS NULL\n"); exp->ex_Flags |= exLhs->ex_Flags & EXF_NULL; #endif break; case TOK_CALL: /* * Calls require the RHS to be a compound expression * representing the procedure arguments. * * XXX deal with pointer-to-function verses function * XXX the lhs must at the moment resolve to the procedure * itself. * * In regards to procedure pointers, the declaration * will require a pointer to the procedure's statement * body. XXX this pointer can be the physical storage * associated with the lhs data but thus requires the * type to be a pointer. We do not support the 'C' * (*ptr_to_func)(...) form. You have to use ptr_to_func(...). */ { Type *ltype; Type *atype; /* type for alt context */ SemGroup *save_asg; /* save old alt context */ dassert_exp(exRhs, exRhs->ex_Token == TOK_COMPOUND); /* * Note: ex_Flags hints must 'always happen' since * we may be modifying an expression that will later * be Dup'd. */ exLhs->ex_Flags |= EXF_REQ_PROC; exLhs->ex_Flags |= EXF_ADDRUSED; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); ltype = exLhs->ex_Type; #if 0 if (ltype->ty_Op == TY_PTRTO) ltype = type->ty_PtrType.et_Type; /* XXX */ #endif #if 0 dassert_exp(exLhs, exLhs->ex_Token == TOK_DECL || exLhs->ex_Token == TOK_ID); #endif dassert_exp(exLhs, ltype != NULL && ltype->ty_Op == TY_PROC); dassert_exp(exLhs, exLhs->ex_Decl != NULL); dassert_exp(exRhs, exRhs->ex_Token == TOK_COMPOUND); /* * If the lhs type indicates a method procedure, then * it's lhs is the object we wish to pass as the * first argument to the method. We move the lhs lhs * exp. For a STRIND TY_PTRTO method call we * indirect the element and convert it to a TOK_DOT * lvalue argument of the underlying object. * * A method call via a reference object is a very * weird case. * * Since the method called through an object winds up * being a method taylored for that object, and we * are calling through a reference to an object, * the actual method will be looked up at run time * and will match the object. Thus we can safely * indirect through the reference object for this * one case. Since (*ref_obj) is not normally * allowed this will be special-cased at * compile/run-time. * * Note that this occurs before we evaluate the * compound expression on the right hand side. Also * note that since the resolver can be called multiple * times on a shared expression, we have to be * careful to shift the arguments around only once. */ if ((ltype->ty_SQFlags & SF_METHOD) && (exRhs->ex_Flags & EXF_CALL_CONV) == 0 ) { Exp *obj; exRhs->ex_Flags |= EXF_CALL_CONV; switch(exLhs->ex_Token) { case TOK_STRIND: /* indirect */ /* * NOTE: Do not set EXF_RESOLVED, we * need to call the resolver to * properly propagate ADDRUSED. */ obj = exLhs->ex_Lhs; if (methodProcThisIsPointer(ltype)) { ; } else if (obj->ex_Type->ty_Op == TY_PTRTO) { Exp *nexp = AllocExp(NULL); nexp->ex_Lhs = obj; nexp->ex_Token = TOK_PTRIND; nexp->ex_Type = ADD_LVALUE(obj->ex_Type->ty_PtrType.et_Type); nexp->ex_Flags |= EXF_UNARY; LexDupRef(&obj->ex_LexRef, &nexp->ex_LexRef); exLhs->ex_Token = TOK_DOT; obj = nexp; } else if (obj->ex_Type->ty_Op == TY_CPTRTO) { Exp *nexp = AllocExp(NULL); nexp->ex_Lhs = obj; nexp->ex_Token = TOK_PTRIND; nexp->ex_Type = ADD_LVALUE(obj->ex_Type->ty_CPtrType.et_Type); nexp->ex_Flags |= EXF_UNARY; LexDupRef(&obj->ex_LexRef, &nexp->ex_LexRef); exLhs->ex_Token = TOK_DOT; obj = nexp; } else if (obj->ex_Type->ty_Op == TY_REFTO) { Exp *nexp = AllocExp(NULL); nexp->ex_Lhs = obj; nexp->ex_Token = TOK_PTRIND; nexp->ex_Type = ADD_LVALUE(obj->ex_Type->ty_RefType.et_Type); nexp->ex_Flags |= EXF_UNARY | EXF_INDREF; LexDupRef(&obj->ex_LexRef, &nexp->ex_LexRef); obj = nexp; } else { dassert_exp(obj, 0); } break; case TOK_DOT: /* * Pass directly as an lvalue. If this * is a pointer or reference only the * builtin methods for the Pointer * class are possible. These methods * require a content-locked pointer. */ obj = exLhs->ex_Lhs; if (obj->ex_Type->ty_Op != TY_PTRTO && obj->ex_Type->ty_Op != TY_REFTO) { break; } break; default: dassert_exp(exp, 0); obj = NULL; break; } /* * Make sure atype survives DupExp(). */ obj->ex_Flags |= EXF_PARSE_TYPE; atype = obj->ex_Type; /* * Leave the lhs intact, but set the * duplication flag in case things get * nasty later. */ exLhs->ex_Lhs = SetDupExp(sg, exLhs->ex_Lhs); obj->ex_Next = exRhs->ex_Lhs; exRhs->ex_Lhs = obj; } else if (ltype->ty_SQFlags & SF_METHOD) { Exp *obj; obj = exRhs->ex_Lhs; atype = obj->ex_Type; } else { atype = NULL; } /* * Try to set an alternative search context during * resolution of the procedure arguments. This context * is only searched if an identifier cannot be found * through normal means so local variables and such * will override it as the programmer should expect. * Since the local semantic stack is under the * programmer's control, unexpected collisions should * either not occur or be easily fixed. */ if (atype) { switch(atype->ty_Op) { case TY_REFTO: atype = atype->ty_PtrType.et_Type; break; case TY_PTRTO: atype = atype->ty_PtrType.et_Type; break; } if (atype->ty_Op != TY_CLASS) atype = NULL; } if (atype) { save_asg = sg->sg_AltContext; sg->sg_AltContext = atype->ty_ClassType.et_SemGroup; } else { save_asg = NULL; } /* * Resolve the right hand side, which are the * procedure arguments as a compound type. This * can get tricky. XXX * * NOTE: We inherit the SF_LVALUE flag from the * return type. Parent might turn it off. */ /*d = exLhs->ex_Decl;*/ exRhs = ResolveExp(isg, sg, exRhs, ltype->ty_ProcType.et_ArgsType, flags | RESOLVE_AUTOCAST); exType = ltype->ty_ProcType.et_RetType; if (atype) { /* * Restore AltContext after resolving rhs. */ sg->sg_AltContext = save_asg; } else if ((exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) && (exLhs->ex_Decl->d_ScopeFlags & SCOPE_PURE)) { /* * atype NULL (not method call, which requires * an object), arguments can become constants, * pure function, so result can become a * constant. */ exFlags |= EXF_PROBCONST; } /* * Additional work to inline the procedure */ resolveDynamicProcedure(isg, sg, exp, flags); resolveProcedureInline(isg, sg, exp, flags); } break; case TOK_INLINE_CALL: /* * An inlined call has already resolved via TOK_CALL. It * will not be a constant, and any argument modifications * have already been performed. */ { Type *ltype; Declaration *d; Type *atype; /* type for alt context */ SemGroup *save_asg; /* save old alt context */ exLhs->ex_Flags |= EXF_REQ_PROC; exLhs->ex_Flags |= EXF_ADDRUSED; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); d = exLhs->ex_Decl; ltype = exLhs->ex_Type; dassert(ltype); /* * Try to set an alternative search context during * resolution of the procedure arguments. This context * is only searched if an identifier cannot be found * through normal means so local variables and such * will override it as the programmer should expect. * Since the local semantic stack is under the * programmer's control, unexpected collisions should * either not occur or be easily fixed. */ if (ltype->ty_SQFlags & SF_METHOD) { Exp *obj; obj = exRhs->ex_Lhs; atype = obj->ex_Type; } else { atype = NULL; } if (atype) { switch(atype->ty_Op) { case TY_REFTO: atype = atype->ty_PtrType.et_Type; break; case TY_PTRTO: atype = atype->ty_PtrType.et_Type; break; } if (atype->ty_Op != TY_CLASS) atype = NULL; } if (atype) { save_asg = sg->sg_AltContext; sg->sg_AltContext = atype->ty_ClassType.et_SemGroup; } else { save_asg = NULL; } exRhs = ResolveExp(isg, sg, exRhs, ltype->ty_ProcType.et_ArgsType, flags | RESOLVE_AUTOCAST); if (atype) { sg->sg_AltContext = save_asg; } exType = ltype->ty_ProcType.et_RetType; ResolveStmt(d->d_ImportSemGroup, exp->ex_AuxStmt, flags); } break; case TOK_COMPOUND: /* * (NOTE EARLY RETURN) * * A compound expression should always be an RVALUE, but * might contain LVALUEs (XXX). */ couldconst = 1; exp = resolveCompoundExp(isg, sg, exp, itype, flags); return(exp); /* not reached */ case TOK_BRACKETED: /* * (NOTE EARLY RETURN) */ couldconst = 1; exp = resolveBracketedExp(isg, sg, exp, itype, flags); return(exp); /* not reached */ case TOK_TYPEOF: /* * The caller must be able to handle a type return when * typeof() is used. */ dassert_exp(exp, exFlags & EXF_REQ_TYPE); /* fall through */ case TOK_SIZEOF: case TOK_ARYSIZE: /* * If an expression was supplied, convert it to a type. * * NOTE: ex_Flags hints must 'always happen' since we may be * modifying an expression that will later be Dup'd. */ couldconst = 1; if ((exFlags & EXF_RET_TYPE) == 0) { dassert(exLhs != NULL); exLhs->ex_Flags |= EXF_REQ_TYPE; exLhs = ResolveExp(isg, sg, exLhs, NULL, flags & ~RESOLVE_AUTOCAST); exType = exLhs->ex_Type; #if 1 /* do not clear EXF_UNARY, messes up tmp exp storage */ /* exFlags &= ~EXF_UNARY; */ #endif exFlags |= EXF_RET_TYPE; /* XXX delete the lhs */ } else { ResolveType(exType, NULL, 0); } switch(exToken) { case TOK_SIZEOF: exId = StrTableInt(exType->ty_Bytes); exp->ex_Token = TOK_INTEGER; exType = &SizeType; exFlags &= ~EXF_RET_TYPE; exFlags |= EXF_CONST; break; case TOK_ARYSIZE: dassert_exp(exp, (exType->ty_Flags & TF_RESOLVING) == 0); dassert_exp(exp, exType->ty_Op == TY_ARYOF); if (exType->ty_AryType.et_Type->ty_Bytes) { exId = StrTableInt(exType->ty_Bytes / exType->ty_AryType.et_Type->ty_Bytes); } else { exId = StrTableInt(0); } exp->ex_Token = TOK_INTEGER; exType = &SizeType; exFlags &= ~EXF_RET_TYPE; exFlags |= EXF_CONST; /* exLhs = NULL; */ break; case TOK_TYPEOF: /* type is returned */ break; } break; default: dassert_exp(exp, 0); break; } /* * Ensure that the cast target type is resolved. */ if (exType) { ResolveType(exType, NULL, 0); /* XXX exType was ex_Type */ /* * If the type hint did not succeed we may have to cast the * expression to the requested type. Note that if the itype * was set as part of an array optimization request which could * not be handled, we must ignore itype. * * Note that SimilarType() will allow exp->ex_Type to be a * var-args TY_ARGS, and since the original Rhs of a call * is set to the procedure arguments type, VarType.et_Type * should match exactly. */ if (itype && (exFlags & (EXF_REQ_ARRAY|EXF_RET_ARRAY)) != EXF_REQ_ARRAY ) { if ((itype->ty_Flags & TF_RESOLVED) == 0) ResolveType(itype, NULL, 0); if ((itype->ty_SQFlags & SF_LVALUE) && (exType->ty_SQFlags & SF_LVALUE) == 0 ) { /* XXX */ fprintf(stderr, "Exp must be an lvalue here\n"); dassert_exp(exp, 0); } if (!SimilarType(itype, exType) && (flags & RESOLVE_AUTOCAST)) { if (exp->ex_Flags & EXF_DUPEXP) { Exp *nexp = AllocExp(NULL); nexp->u = exp->u; LexDupRef(&exp->ex_LexRef, &nexp->ex_LexRef); exp = nexp; exFlags &= ~EXF_DUPEXP; /*exp = DupExp(sg, exp);*/ } exFlags |= EXF_RESOLVED; exp = resolveExpCast(isg, sg, exp, itype, flags); } } } /* * Generic constant evaluation flag. Note that EXF_PROBCONST * could also be set above (TOK_CALL). */ if (couldconst && (exLhs == NULL || (exLhs->ex_Flags & (EXF_CONST|EXF_PROBCONST))) && (exRhs == NULL || (exRhs->ex_Flags & (EXF_CONST|EXF_PROBCONST)))) { exp->ex_Flags |= EXF_PROBCONST; } exp->ex_Flags |= EXF_RESOLVED; return(exp); } /* * Resolve an expression for which the resolver needs the result * immediately. */ Exp * resolveConstExp(SemGroup *isg, SemGroup *sg, Exp *exp, int flags) { runesize_t tmpbytes; runesize_t tmpalign; runesize_t ooffset; int oflags; flags &= ~RESOLVE_AUTOCAST; if ((exp->ex_Flags & EXF_RESOLVED) == 0) { exp = ResolveExp(isg, sg, exp, NULL, flags); } #if 0 /* XXX can't do this atm, it messes up ARYSIZE resolving */ if (ResPass == 0) { exp->ex_Flags &= ~EXF_RESOLVED; return exp; } #endif if ((exp->ex_Flags & EXF_RESOLVED) == 0) { printf("early resolve failed\n"); return exp; } oflags = exp->ex_Flags; ooffset = exp->ex_TmpOffset; tmpbytes = 0; tmpalign = 0; resolveExpAlign(exp, &tmpalign, RESOLVE_CONSTEXP); resolveStorageExp(exp, 0, &tmpbytes); if ((exp->ex_Flags & (EXF_CONST | EXF_PROBCONST)) == 0) { if (flags & RESOLVE_FAILOK) return exp; ExpPrintError(exp, TOK_ERR_EXPECTED_INTEGRER_CONST); dassert_exp(exp, 0); } /* * Special interpreter execution to resolve the expression. */ { RunContext ct; size_t align; rundata_t data; union { int64_t tmpbuf[128]; float128_t tmpflt[64]; } u; bzero(&ct, sizeof(ct)); ct.ct_Flags |= CTF_RESOLVING; /* * NOTE: minimum alignment for posix_memalign() is * sizeof(void *). */ align = sg->sg_TmpAlignMask + 1; if (align < sizeof(void *)) align = sizeof(void *); if (sg->sg_TmpBytes < (runesize_t)sizeof(u.tmpbuf) && sg->sg_TmpAlignMask < (runesize_t)sizeof(float128_t)) ct.ct_TmpData = (char *)u.tmpbuf; else posix_memalign((void *)&ct.ct_TmpData, align, sg->sg_TmpBytes); ct.ct_TmpBytes = sg->sg_TmpBytes; ct.ct_CtxRefStor.rs_Refs = 1; exp->ex_Run(&ct, &data, exp); if ((exp->ex_Flags & EXF_CONST) == 0) { ExpPrintError(exp, TOK_ERR_EXPECTED_INTEGRER_CONST); dassert_exp(exp, 0); } if (ct.ct_TmpData != (char *)u.tmpbuf) free(ct.ct_TmpData); } /* * exp is now a constant, restore the original ex_TmpOffset * for normal execution/operation (the storage may be needed for * large constants). */ if (oflags & EXF_TMPRESOLVED) { exp->ex_TmpOffset = ooffset; /*resolveStorageExp(exp, &tmpbytes);*/ } else { exp->ex_TmpOffset = -1; exp->ex_Flags &= ~EXF_TMPRESOLVED; } resolveExpAlign(exp, &tmpalign, RESOLVE_CLEAN); return exp; } __unused Exp * resolveConstExpBool(SemGroup *isg, SemGroup *sg, Exp *exp, int flags, RunTmpStor *ts) { runesize_t tmpbytes; runesize_t tmpalign; runesize_t ooffset; int oflags; flags &= ~RESOLVE_AUTOCAST; if ((exp->ex_Flags & EXF_RESOLVED) == 0) { exp = ResolveExp(isg, sg, exp, NULL, flags); } /* * [re]-resolve the storage from 0 so we can execute the expression. */ oflags = exp->ex_Flags; ooffset = exp->ex_TmpOffset; tmpbytes = 0; tmpalign = 0; resolveExpAlign(exp, &tmpalign, RESOLVE_CONSTEXP); resolveStorageExp(exp, 0, &tmpbytes); if ((exp->ex_Flags & (EXF_CONST | EXF_PROBCONST)) == 0) { ExpPrintError(exp, TOK_ERR_EXPECTED_INTEGRER_CONST); dassert_exp(exp, 0); } /* * Special interpreter execution to resolve the expression. */ { RunContext ct; RunTmpStor *rts; rundata_t data; union { int64_t tmpbuf[128]; float128_t tmpflt[64]; } u; bzero(&ct, sizeof(ct)); ct.ct_Flags |= CTF_RESOLVING; /* * NOTE: minimum alignment for posix_memalign() is * sizeof(void *). * * XXX */ if (tmpbytes < (runesize_t)sizeof(u.tmpbuf)) ct.ct_TmpData = (char *)u.tmpbuf; else posix_memalign((void *)&ct.ct_TmpData, 16, tmpbytes); ct.ct_TmpBytes = tmpbytes; ct.ct_CtxRefStor.rs_Refs = 1; exp->ex_Run(&ct, &data, exp); rts = data.data; if ((exp->ex_Flags & EXF_CONST) == 0) { ExpPrintError(exp, TOK_ERR_EXPECTED_INTEGRER_CONST); dassert_exp(exp, 0); } ts->ts_Bool = rts->ts_Bool; if (ct.ct_TmpData != (char *)u.tmpbuf) free(ct.ct_TmpData); } /* * exp is now a constant, restore the original ex_TmpOffset * for normal execution/operation (the storage may be needed for * large constants). */ if (oflags & EXF_TMPRESOLVED) { exp->ex_TmpOffset = ooffset; tmpbytes = 0; resolveStorageExp(exp, exp->ex_TmpOffset, &tmpbytes); } else { exp->ex_TmpOffset = -1; exp->ex_Flags &= ~EXF_TMPRESOLVED; } resolveExpAlign(exp, &tmpalign, RESOLVE_CLEAN); return exp; } /* * Extract constant from already-constant-resolved expression. * resolveConstExp() must have previously been called on exp. * * Expression must have already been constant-optimized, meaning * that we should be able to execute it without a context to access * the cached results in exp->u. * * (This can also be called by the generator) */ int64_t resolveGetConstExpInt64(Exp *exp) { rundata_t data; int64_t value; dassert_exp(exp, (exp->ex_Flags & EXF_CONST)); exp->ex_Run(NULL, &data, exp); if (exp->ex_Type->ty_Flags & TF_ISUNSIGNED) { switch(exp->ex_Type->ty_Bytes) { case 1: value = *(uint8_t *)data.data; break; case 2: value = *(uint16_t *)data.data; break; case 4: value = *(uint32_t *)data.data; break; case 8: value = *(uint64_t *)data.data; break; default: value = 0; dassert_exp(exp, 0); break; } } else { switch(exp->ex_Type->ty_Bytes) { case 1: value = *(int8_t *)data.data; break; case 2: value = *(int16_t *)data.data; break; case 4: value = *(int32_t *)data.data; break; case 8: value = *(int64_t *)data.data; break; default: value = 0; dassert_exp(exp, 0); break; } } return value; } float128_t resolveGetConstExpFloat128(Exp *exp) { rundata_t data; float128_t value; dassert_exp(exp, exp->ex_Token == TOK_FLOAT || (exp->ex_Flags & EXF_CONST)); exp->ex_Run(NULL, &data, exp); switch(exp->ex_Type->ty_Bytes) { case 4: value = (float128_t)*(float32_t *)data.data; break; case 8: value = (float128_t)*(float64_t *)data.data; break; case 16: value = *(float128_t *)data.data; break; default: value = 0; dassert_exp(exp, 0); break; } return value; } /* * resolveCompoundExp() - resolve a compound expression (called from * ResolveExp() and resolveExpOper()). * * Resolve a compound expression. Compound expressions require * a compound type to normalize against. This will work for * direct assignments, return values, casts, and procedure arguments * only. * * NOTE: We can't use itype if EXF_REQ_ARRAY is specified because * its hinting for the array optimization case, which we cannot do. * * Compound expressions may be used in conjuction with types * reprsenting classes, compound types, and procedure arguments. The * compound expression may contain subclasses of the superclasses expected * by itype. This is only allowed if the procedure's body has not yet been * generated (for example, a method call in a subclass). * * Partially resolved operators are typically converted into procedure calls * and method calls are also partially resolved, so some elements may already * be resolved. * * XXX named initialization, missing elements (structural * initialization), and so forth needs to be dealt with. */ Exp * resolveCompoundExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { Exp **pscan; Exp *scan; Declaration *d; SemGroup *sg2; int varargs = 0; int isconst = 1; Type *type; Type *stype; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ /* * Expression dup()ing */ if (exp->ex_Flags & EXF_DUPEXP) { #if DUPEXP_DEBUG static int count; fprintf(stderr, "DUPEXPC %d\n", ++count); #endif exp = DupExp(sg, exp); } if (itype && (exp->ex_Flags & EXF_REQ_ARRAY) == 0) exp->ex_Type = itype; /* * If we don't have a SemGroup to normalize against, XXX how should * we normalize the compound expression? */ if (exp->ex_Type == NULL) { dassert_exp(exp, 0); } /* * Normalize the compound expression based on the * argument types expected by the procedure. We have * to resolve the type before we start the scan in order * to ensure that d_Offset is properly assigned. * * Use the declarations found in the compound type * semantic group to coerce the procedure arguments to * generate the correct compound type. Note that ResolveExp() * recursion must still use the SemGroup that was passed to us. * * XXX deal with defaults and pre-resolved arguments. XXX */ type = ResolveType(exp->ex_Type, NULL, 0); switch(type->ty_Op) { case TY_ARGS: sg2 = type->ty_ArgsType.et_SemGroup; break; case TY_VAR: sg2 = type->ty_VarType.et_SemGroup; break; case TY_COMPOUND: sg2 = type->ty_CompType.et_SemGroup; break; case TY_CLASS: sg2 = type->ty_ClassType.et_SemGroup; break; default: dassert_exp(exp, 0); sg2 = NULL; /* NOT REACHED */ break; } pscan = &exp->ex_Lhs; /* * Scan the compound expression and match it up against the compound * type. */ d = RUNE_FIRST(&sg2->sg_DeclList); while ((scan = *pscan) != NULL) { if (scan->ex_ArgId != NULL) { /* * Named argument, find it * * (Overloading not allowed) */ int eno = TOK_ERR_ID_NOT_FOUND; Declaration *nd; nd = FindDeclId(sg2, scan->ex_ArgId, &eno); if (nd == NULL) { ExpFatalError(scan, eno); /* NOT REACHED */ } /* * XXX for now, punt on setting EXF_PROBCONST * if the named argument skips a declaration. */ if (nd != d && (d == NULL || nd != RUNE_NEXT(d, d_Node))) { isconst = 0; } d = nd; } else { /* * Unnamed argument, run through sequentially. Skip * any non-storage or global storage. */ while (d && d->d_Op != DOP_ARGS_STORAGE && d->d_Op != DOP_STACK_STORAGE && d->d_Op != DOP_GROUP_STORAGE ) { d = RUNE_NEXT(d, d_Node); } /* * Ran out of storage declarations. If this is a * var-args SemGroup then we actually create a new * SemGroup (and eventually a new type) to represent * it. * * We then extend the varargs SemGroup. This isn't * pretty. */ if (d == NULL) { if (varargs == 0 && (sg2->sg_Flags & SGF_VARARGS)) { sg2 = DupSemGroup(sg2->sg_Parent, NULL, sg2, 1); #if 0 ResolveSemGroup(sg3, 0); sg2 = sg3; #endif varargs = 1; } if (varargs == 0) { fprintf(stderr, "Too many arguments in " "expression\n"); dassert_exp(scan, 0); } } } /* * Unlink the expression from the compound list temporarily * so we can safely resolve it. Either cast the expression * to the compound element, or create a compound element * (e.g. varargs call) to match the expression. * * Due to the resolver moving things around, the elements of * a compound expression are sometimes resolved multiple times. */ *pscan = scan->ex_Next; scan->ex_Next = NULL; if (d) { Type *dtype = d->d_StorDecl.ed_Type; int sflags; /* * HACK! XXX YYY */ if (SimilarType(dtype, &PointerType) && (dtype->ty_SQFlags & SF_LVALUE) == SF_LVALUE ) { dtype = NULL; sflags = flags & ~RESOLVE_AUTOCAST; } else { sflags = flags | RESOLVE_AUTOCAST; } /* * LValueStor needs a RS, set ADDRUSED to make sure * its available to the generator. */ if (d->d_ScopeFlags & SCOPE_LVALUE) scan->ex_Flags |= EXF_ADDRUSED; if ((scan->ex_Flags & EXF_RESOLVED) == 0) { scan = ResolveExp(isg, sg, scan, dtype, sflags); } else if (dtype) { /* * Since we have already resolved the * expression we need to do the same sanity * checking that it would do to cast. */ dassert_exp(scan, (dtype->ty_SQFlags & SF_LVALUE) == 0 || (scan->ex_Type->ty_SQFlags & SF_LVALUE)); if (!SimilarType(dtype, scan->ex_Type)) { scan = resolveExpCast(isg, sg, scan, dtype, flags); } } } else { Scope tscope = INIT_SCOPE(0); if ((scan->ex_Flags & EXF_RESOLVED) == 0) scan = ResolveExp(isg, sg, scan, NULL, flags & ~RESOLVE_AUTOCAST); dassert(varargs != 0); d = AllocDeclaration(sg2, DOP_ARGS_STORAGE, &tscope); d->d_StorDecl.ed_Type = DEL_LVALUE(scan->ex_Type); ++sg2->sg_VarCount; d->d_Bytes = scan->ex_Type->ty_Bytes; d->d_AlignMask = scan->ex_Type->ty_AlignMask; /* * __align(%d) scope qualifier, override the type's * alignment */ if ((d->d_Scope.s_Flags & SCOPE_ALIGN) && d->d_Scope.s_AlignOverride) { d->d_AlignMask = d->d_Scope.s_AlignOverride - 1; } #if 0 sg2->sg_Bytes = BASEALIGN(sg2->sg_Bytes, d->d_AlignMask); #endif d->d_Offset = sg2->sg_Bytes; d->d_Storage = GENSTAT_MEMDEF; #if 0 sg2->sg_Bytes += d->d_Bytes; if (sg2->sg_AlignMask < d->d_AlignMask) sg2->sg_AlignMask = d->d_AlignMask; #endif } /* * Relink and check if constant */ scan->ex_Next = *pscan; *pscan = scan; if ((scan->ex_Flags & (EXF_CONST | EXF_PROBCONST)) == 0) isconst = 0; stype = scan->ex_Type; /* * If the declaration requires an LVALUE, assert that * we have an lvalue. Otherwise set the direct-store * request (also see InterpCompoundExp). */ if (d->d_ScopeFlags & SCOPE_LVALUE) { if ((stype->ty_SQFlags & SF_LVALUE) == 0) fprintf(stderr, "argument must be an lvalue\n"); dassert_exp(scan, stype->ty_SQFlags & SF_LVALUE); } #if 1 /* * Check content locking state against scan. Only matters * when passing a pointer as an lvalue since only pointers * can be content-locked. * * We don't have to worry if we are passing a pointer as an * rvalue since the code generator will fixup the locking in * that case. */ if ((d->d_ScopeFlags & SCOPE_LVALUE) && (stype->ty_Op == TY_PTRTO || stype->ty_Op == TY_REFTO)) { int scope1; int scope2; scope1 = d->d_ScopeFlags & SCOPE_LOCKING_MASK; if (d->d_Id == String_This) { /* XXX temporarily ignore e.g. ptr.new() */ scope2 = scope1; } else if (scan->ex_Decl) { scope2 = scan->ex_Decl->d_ScopeFlags & SCOPE_LOCKING_MASK; } else { /* * Var-args or unspecified, allow the * default or explicitly unlocked? XXX */ scope2 = scope1 & SCOPE_UNLOCKED; } if (scope1 != scope2) { fprintf(stderr, "scopes: %08x, %08x\n", scope1, scope2); if (d->d_Id == String_This) { ExpFatalError(scan, TOK_ERR_SCOPE_MISMATCH_THIS); } else { ExpFatalError(scan, TOK_ERR_SCOPE_MISMATCH); } } } #endif /* * accounting */ d = RUNE_NEXT(d, d_Node); pscan = &scan->ex_Next; } /* * Make sure the caller knows its a var-args function even if * we didn't supply any additional args. Otherwise the backend * may not generate the correct form for calls to the target. */ if (varargs == 0 && (sg2->sg_Flags & SGF_VARARGS)) { sg2 = DupSemGroup(sg2->sg_Parent, NULL, sg2, 1); varargs = 1; } /* * Resolve the varargs sg2 after building it. */ if (varargs) { ResolveSemGroup(sg2, 0); } /* * If we made a var-args call, adjust the expression's type */ if (varargs) { dassert(type->ty_Op == TY_ARGS); exp->ex_Type = ResolveType(TypeToVarType(type, sg2), NULL, 0); } if (isconst) exp->ex_Flags |= EXF_PROBCONST; exp->ex_Flags |= EXF_RESOLVED; return(exp); } /* * resolveBracketedExp() - resolve a bracketed expression. * * Resolve a bracketed expression. Bracketed expressions require * an array type to normalize against. * * The bracketed expressions may contain subclasses of the superclasses * expected by itype. */ Exp * resolveBracketedExp(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { Exp **pscan; Exp *scan; int isconst = 1; Type *type; Type *stype; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ /* * Expression dup()ing */ if (exp->ex_Flags & EXF_DUPEXP) { #if DUPEXP_DEBUG static int count; fprintf(stderr, "DUPEXPC %d\n", ++count); #endif exp = DupExp(sg, exp); } /* * Expression type is the hinted type. */ if (itype && (exp->ex_Flags & EXF_REQ_ARRAY) == 0) exp->ex_Type = itype; /* * We need a type to normalize against. */ if (exp->ex_Type == NULL) { dassert_exp(exp, 0); /* NOT REACHED */ } /* * Normalize the bracketed expression based on the array * type. We have to resolve the type before we start the * scan in order to ensure that d_Offset is properly assigned. */ type = ResolveType(exp->ex_Type, NULL, 0); if (type->ty_Op != TY_ARYOF) { dassert_exp(exp, 0); /* NOT REACHED */ } type = type->ty_AryType.et_Type; /* element type */ /* * Scan the bracketed expression and match each element against * the element type. */ pscan = &exp->ex_Lhs; while ((scan = *pscan) != NULL) { Type *dtype; int sflags; /* * Unlink the expression from the compound list temporarily * so we can safely resolve it. Either cast the expression * to the compound element, or create a compound element * (e.g. varargs call) to match the expression. * * Due to the resolver moving things around, the elements of * a compound expression are sometimes resolved multiple times. */ *pscan = scan->ex_Next; scan->ex_Next = NULL; dtype = type; /* * HACK! XXX YYY */ if (SimilarType(dtype, &PointerType) && (dtype->ty_SQFlags & SF_LVALUE) == SF_LVALUE ) { dtype = NULL; sflags = flags & ~RESOLVE_AUTOCAST; } else { sflags = flags | RESOLVE_AUTOCAST; } /* * LValueStor needs a RS, set ADDRUSED to make sure * its available to the generator. */ if (dtype->ty_SQFlags & SF_LVALUE) scan->ex_Flags |= EXF_ADDRUSED; if ((scan->ex_Flags & EXF_RESOLVED) == 0) { scan = ResolveExp(isg, sg, scan, dtype, sflags); } else { /* * Since we have already resolved the * expression we need to do the same sanity * checking that it would do to cast. */ dassert_exp(scan, (dtype->ty_SQFlags & SF_LVALUE) == 0 || (scan->ex_Type->ty_SQFlags & SF_LVALUE)); if (!SimilarType(dtype, scan->ex_Type)) { scan = resolveExpCast(isg, sg, scan, dtype, flags); } } /* * Relink and check if constant */ scan->ex_Next = *pscan; *pscan = scan; if ((scan->ex_Flags & (EXF_CONST | EXF_PROBCONST)) == 0) isconst = 0; stype = scan->ex_Type; /* * If the declaration requires an LVALUE, assert that * we have an lvalue. Otherwise set the direct-store * request (also see InterpCompoundExp). */ if (dtype->ty_SQFlags & SF_LVALUE) { if ((stype->ty_SQFlags & SF_LVALUE) == 0) fprintf(stderr, "argument must be an lvalue\n"); dassert_exp(scan, stype->ty_SQFlags & SF_LVALUE); } #if 0 /* * XXX not applicable? * * Check content locking state against scan. Only matters * when passing a pointer as an lvalue since only pointers * can be content-locked. * * We don't have to worry if we are passing a pointer as an * rvalue since the code generator will fixup the locking in * that case. */ if ((dtype->ty_SQFlags & SF_LVALUE) && (stype->ty_Op == TY_PTRTO || stype->ty_Op == TY_REFTO)) { int scope1; int scope2; #if 0 scope1 = d->d_ScopeFlags & (SCOPE_UNTRACKED | SCOPE_UNLOCKED | SCOPE_HARD); #endif scope1 = SCOPE_UNLOCKED; if (scope1 != scope2) { fprintf(stderr, "scopes: %08x, %08x\n", scope1, scope2); ExpFatalError(scan, TOK_ERR_SCOPE_MISMATCH); } } #endif pscan = &scan->ex_Next; } if (isconst) exp->ex_Flags |= EXF_PROBCONST; exp->ex_Flags |= EXF_RESOLVED; return(exp); } /* * resolveExpCast() - Cast the expression to the specified type and return * the cast expression. * * Note that expression nodes depend on their ex_Type being correct, * and also expressions may be shared, so be careful not to modify the * ex_Type (or anything else) in the existing expression. * * This code is somewhat different then resolveExpOper() and friends. * The Exp argument has already been resolved so do not resolve it * again, and the cast type already has SF_LVALUE set or cleared as * appropriate (had better be cleared!) * * As with operators we have to locate the cast declaration matching * the cast we want to do. */ static Exp * resolveExpCast(SemGroup *isg, SemGroup *sg, Exp *exp, Type *ltype, int flags) { Type *rtype; Declaration *d; int didagain = 0; int oflags = flags; flags &= ~RESOLVE_AUTOCAST; again: rtype = exp->ex_Type; dassert(rtype && ltype); /* * XXX attempt to cast from subclass to superclass? */ /* * XXX look in our local semantic hierarchy for a compatible cast ? */ dassert(ltype->ty_Op != TY_UNRESOLVED); dassert(rtype->ty_Op != TY_UNRESOLVED); /* * Look in the right hand (source) type for the cast */ d = findCast(rtype, ltype, rtype, flags); /* * If that fails then look in the left hand (destination) type for * the cast. */ if (d == NULL) { d = findCast(ltype, ltype, rtype, flags); } #if 1 if (d == NULL && (rtype->ty_Op == TY_PTRTO || rtype->ty_Op == TY_REFTO || rtype->ty_Op == TY_CPTRTO)) { d = findCast(&PointerType, ltype, rtype, flags); } #endif if (d == NULL) { /* * We could not find a specific cast operator. There are * some inherent casts that we can do. We run through these * in attempt to come up with matching types. */ if (ltype->ty_Op != rtype->ty_Op && (ltype->ty_Op == TY_PTRTO || ltype->ty_Op == TY_CPTRTO || ltype->ty_Op == TY_ARYOF) && (rtype->ty_Op == TY_PTRTO || rtype->ty_Op == TY_CPTRTO || rtype->ty_Op == TY_ARYOF)) { /* * Pointers, C pointers, or arrays can be cast to * pointers, C pointers, or arrays of the same type. * * Cast the right hand type to an equivalent * pointer/cpointer/array of the right hand type * and re-resolve the cast. */ exp = ExpToCastExp(exp, ResolveType(ChangeType(rtype, ltype->ty_Op), NULL, 0)); return(resolveExpCast(isg, sg, exp, ltype, flags)); } else if (MatchType(ltype, rtype) <= SG_COMPAT_PART) { /* * If the types are compatible (casting rtype->ltype), * we can cast trivially. */ exp = ExpToCastExp(exp, ltype); } else if (MatchType(&NumericType, ltype) <= SG_COMPAT_SUBCLASS && MatchType(&NumericType, rtype) <= SG_COMPAT_SUBCLASS) { /* * Casting from one numeric type to another must be * supported by the interpreter/compiler. */ exp = ExpToCastExp(exp, ltype); } else if (SimilarType(&VoidType, ltype)) { /* * Casting anything to void is allowed (throwing the * object away). E.g. statement-expressions. */ exp = ExpToCastExp(exp, ltype); } else if (SimilarType(&VoidPtrType, ltype)) { /* * Casting a pointer to a (void *) is trivial, but is * only allowed if the underlying structure does not * contain any pointers. * * NOTE: Generally only used when a pointer is being * cast to an integer. Rune does not allow * casting back to other pointer types. * * XXX validate integral # of objects fit in pointer * range. */ if (rtype->ty_PtrType.et_Type->ty_Flags & TF_HASLVPTR) ExpFatalError(exp, TOK_ERR_LIMITED_VOIDP_CAST); exp = ExpToCastExp(exp, ltype); } else if (SimilarType(&VoidRefType, ltype)) { /* * Casting a pointer to a (void @) is trivial. * * NOTE: Generally only used when a pointer is being * cast to an integer. Rune does not allow * casting back to other pointer types. * * XXX validate integral # of objects fit in pointer * range. */ if (rtype->ty_PtrType.et_Type->ty_Flags & TF_HASLVPTR) ExpFatalError(exp, TOK_ERR_LIMITED_VOIDP_CAST); exp = ExpToCastExp(exp, ltype); } else if (SimilarType(rtype, &VoidPtrType)) { /* * Casting from a void pointer may not be trivial * but we leave it up to the interpreter/compiler. * * Only allow if the target does not contain any * pointers or if the right-hand-side is NULL. * * XXX validate integral # of objects fit in pointer * range. */ switch(ltype->ty_Op) { case TY_REFTO: case TY_PTRTO: if ((exp->ex_Flags & EXF_NULL) == 0 && (ltype->ty_PtrType.et_Type->ty_Flags & TF_HASLVPTR)) { ExpFatalError(exp, TOK_ERR_LIMITED_VOIDP_CAST); } break; default: break; } exp = ExpToCastExp(exp, ltype); } else if (SimilarType(rtype, &CVoidPtrType)) { switch(ltype->ty_Op) { case TY_CPTRTO: if ((exp->ex_Flags & EXF_NULL) == 0 && (ltype->ty_PtrType.et_Type->ty_Flags & TF_HASLVPTR)) { ExpFatalError(exp, TOK_ERR_LIMITED_VOIDP_CAST); } break; default: break; } } else if (SimilarType(ltype, &BoolType) && (rtype->ty_Op == TY_PTRTO || rtype->ty_Op == TY_REFTO || rtype->ty_Op == TY_CPTRTO)) { /* * Any pointer can be cast to a boolean, which * tests against NULL. */ exp = ExpToCastExp(exp, ltype); } else if (ltype->ty_Op == rtype->ty_Op && (ltype->ty_Op == TY_PTRTO || ltype->ty_Op == TY_CPTRTO || ltype->ty_Op == TY_ARYOF)) { /* * We allow casts of pointers to similar numeric * types if they are the same size, though this is * really rather a hack. This is mainly to handle * the signed<->unsigned cast case. XXX */ int ok = 0; switch(ltype->ty_Op) { case TY_PTRTO: if ((ltype->ty_PtrType.et_Type->ty_SQFlags & SF_CONST) == 0 && (rtype->ty_PtrType.et_Type->ty_SQFlags & SF_CONST) != 0) { ExpFatalError(exp, TOK_ERR_READONLY); } if (MatchType(&NumericType, ltype->ty_PtrType.et_Type) <= SG_COMPAT_SUBCLASS && MatchType(&NumericType, rtype->ty_PtrType.et_Type) <= SG_COMPAT_SUBCLASS && ltype->ty_Bytes == rtype->ty_Bytes ) { exp = ExpToCastExp(exp, ltype); ok = 1; } break; case TY_CPTRTO: if ((ltype->ty_CPtrType.et_Type->ty_SQFlags & SF_CONST) == 0 && (rtype->ty_CPtrType.et_Type->ty_SQFlags & SF_CONST) != 0) { ExpFatalError(exp, TOK_ERR_READONLY); } if (MatchType(&NumericType, ltype->ty_CPtrType.et_Type) <= SG_COMPAT_SUBCLASS && MatchType(&NumericType, rtype->ty_CPtrType.et_Type) <= SG_COMPAT_SUBCLASS && ltype->ty_Bytes == rtype->ty_Bytes ) { exp = ExpToCastExp(exp, ltype); ok = 1; } break; case TY_ARYOF: if ((ltype->ty_AryType.et_Type->ty_SQFlags & SF_CONST) == 0 && (rtype->ty_AryType.et_Type->ty_SQFlags & SF_CONST) != 0) { ExpFatalError(exp, TOK_ERR_READONLY); } if (MatchType(&NumericType, ltype->ty_AryType.et_Type) <= SG_COMPAT_SUBCLASS && MatchType(&NumericType, rtype->ty_AryType.et_Type) <= SG_COMPAT_SUBCLASS && ltype->ty_Bytes == rtype->ty_Bytes ) { exp = ExpToCastExp(exp, ltype); ok = 1; } break; } if (ok == 0) { fprintf(stderr, "Unable to resolve cast from pointers " "to dissimilar numeric types " "%s to %s\n", TypeToStr(rtype, NULL), TypeToStr(ltype, NULL)); dassert_exp(exp, 0); } } else if (didagain == 0 && (oflags & RESOLVE_AUTOCAST) && (exp->ex_Flags2 & EX2F_WASCOMP) && ltype->ty_Op == TY_COMPOUND && rtype->ty_Op != TY_COMPOUND) { /* * The expression parser might have optimized-out * the TOK_COMPOUND wrapper around single-element * parenthesized expressions. Add it back in if * the cast target expects a compound expression. * * XXX Currently hack a SetDupExp() to avoid * re-resolving the already-resolved component. */ exp = ExpToCompoundExp(exp, TOK_COMPOUND); exp = resolveCompoundExp(isg, sg, exp, ltype, flags); didagain = 1; goto again; } else if (didagain == 0 && (oflags & RESOLVE_AUTOCAST) && (exp->ex_Flags2 & EX2F_WASCOMP) && ltype->ty_Op == TY_CLASS && rtype->ty_Op == TY_CLASS && ltype != &VoidType && (ltype->ty_Flags & (TF_ISBOOL | TF_ISINTEGER | TF_ISFLOATING)) == 0 && (rtype->ty_Flags & (TF_ISBOOL | TF_ISINTEGER | TF_ISFLOATING))) { /* * The expression parser might have optimized-out * the TOK_COMPOUND wrapper around single-element * parenthesized expressions used in a class iterator * (in an assignment). Add it back in if the * ltype is a non-core class and rtype is a core * class. * * XXX Currently hack a SetDupExp() to avoid * re-resolving the already-resolved component. */ exp = ExpToCompoundExp(exp, TOK_COMPOUND); exp = resolveCompoundExp(isg, sg, exp, ltype, flags); didagain = 1; goto again; } else { fprintf(stderr, "Unable to resolve cast from %s to %s\n", TypeToStr(rtype, NULL), TypeToStr(ltype, NULL)); dassert_exp(exp, 0); } } else if (d->d_ScopeFlags & SCOPE_INTERNAL) { /* * We found a cast operator and it is an internal operator */ exp = ExpToCastExp(exp, ltype); exp->ex_Decl = d; } else { /* * We found a cast operator and it is a Rune cast procedure. We * must convert the cast to a procedure call. If we want * resolveCompoundExp() to be able to generate a compatible * procedure (in a subclass) we have to tell it about the * procedure. */ Exp *sexp; sexp = ExpToCompoundExp(exp, TOK_COMPOUND); if (d->d_ProcDecl.ed_ProcBody == NULL) sexp->ex_Decl = d; sexp = resolveCompoundExp(isg, sg, sexp, d->d_ProcDecl.ed_Type->ty_ProcType.et_ArgsType, flags); exp = AllocExp(NULL); exp->ex_Lhs = AllocExp(NULL); exp->ex_Lhs->ex_Token = TOK_DECL; exp->ex_Lhs->ex_Id = StrTableDup(d->d_Id); exp->ex_Lhs->ex_Decl = d; exp->ex_Lhs->ex_Type = d->d_ProcDecl.ed_Type; exp->ex_Lhs->ex_Flags |= EXF_RESOLVED; exp->ex_Rhs = sexp; exp->ex_Flags |= EXF_BINARY; exp->ex_Token = TOK_CALL; /* XXX use ltype or procedure's rettype? */ exp->ex_Type = ltype; LexDupRef(&sexp->ex_LexRef, &exp->ex_LexRef); LexDupRef(&sexp->ex_LexRef, &exp->ex_Lhs->ex_LexRef); ResolveDecl(d, 0); /* * Additional work to inline the procedure */ resolveDynamicProcedure(isg, sg, exp, flags); resolveProcedureInline(isg, sg, exp, flags); } exp->ex_Flags |= EXF_RESOLVED; return(exp); } static Declaration * findCast(Type *btype, Type *ltype, Type *rtype, int flags) { SemGroup *sg; Declaration *d; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ dassert(rtype->ty_Op != TY_UNRESOLVED); dassert(ltype->ty_Op != TY_UNRESOLVED); /* * Locate the base type. If the base type does not have a SemGroup * there are no casts. (XXX put system operators here) */ sg = BaseType(&btype); dassert(btype->ty_Op != TY_UNRESOLVED); if (sg == NULL) return(NULL); /* * Look for the cast in the SemGroup */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { if (d->d_Op == DOP_PROC && (d->d_ScopeFlags & SCOPE_CAST)) { ResolveType(d->d_ProcDecl.ed_Type, NULL, 0); if (MatchCastTypes(d, ltype, rtype)) return(d); } } /* * Failed. If the base type is a compound type, look for the * cast in the SemGroup for each element making up the compound * type. e.g. so (mycustomtype, double) would find the cast * in mycustomtype. */ if (btype->ty_Op == TY_COMPOUND) { RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { Declaration *d2; if (d->d_Op & DOPF_STORAGE) { ResolveType(d->d_StorDecl.ed_Type, NULL, 0); d2 = findCast(d->d_StorDecl.ed_Type, ltype, rtype, flags); } else if (d->d_Op == DOP_TYPEDEF) { ResolveType(d->d_StorDecl.ed_Type, NULL, 0); d2 = findCast(d->d_TypedefDecl.ed_Type, ltype, rtype, flags); } else { d2 = NULL; } if (d2) return(d2); } } return(NULL); } /* * resolveExpOper() - resolve an operator * * This is complex enough that it is broken out into its own procedure. * Normally we just look the operator up but we have to special case * pointer arithmatic because we do will not know until now that we * have to do it. * * itype is a return-type hint only. resolveExpOper() can ignore it * if it wishes. We currently use it to detect cast-to-void, such as * when an expression like "++i" is used in a for() loop or as a * standalone statement. This allows us to optimize the case. */ static Exp * resolveExpOper(SemGroup *isg, SemGroup *sg, Exp *exp, Type *itype, int flags) { Declaration *d; int isPointerOp = 0; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ dassert_exp(exp, exp->ex_Id != NULL); if (exFlags & EXF_BINARY) { exLhs = ResolveExp(isg, sg, exLhs, NULL, flags); exRhs = ResolveExp(isg, sg, exRhs, NULL, flags); } else if (exFlags & EXF_UNARY) { exLhs = ResolveExp(isg, sg, exLhs, NULL, flags); } else { dassert_exp(exp, 0); } /* * If the lhs is a pointer look the operator up in the Pointer * class first. Operators in the Pointer class are special-cased. * A second pointer argument or a pointer return value must match * the lhs pointer. * * If this fails, or if the ltype is not a pointer, then look * the operator up normally. */ if (exLhs->ex_Type->ty_Op == TY_PTRTO || exLhs->ex_Type->ty_Op == TY_REFTO || exLhs->ex_Type->ty_Op == TY_CPTRTO ) { Type *ltype; Type *rtype; if (exFlags & EXF_BINARY) { rtype = exRhs->ex_Type; ltype = exLhs->ex_Type; } else { dassert(exFlags & EXF_UNARY); rtype = NULL; ltype = exLhs->ex_Type; } d = findOper(&PointerType, exp->ex_Id, ltype, rtype, flags); if (d) isPointerOp = 1; else d = findExpOper(exp, flags); } else { d = findExpOper(exp, flags); } /* * Fall through to finish up resolving the operator. We just set * ex_Decl for internal operators, and construct a call for * non-internal procedural operators. */ if (d) { Declaration *d2; Type *type; SemGroup *sg2; int count = 0; dassert_exp(exp, d != NULL); dassert_exp(exp, d->d_Op == DOP_PROC); dassert_exp(exp, d->d_ProcDecl.ed_Type->ty_Op == TY_PROC); type = d->d_ProcDecl.ed_Type; exType = type->ty_ProcType.et_RetType; /* * Special case for internal Pointer ops. The return type is * the left-hand type (we may still optimize it to void later). */ if (isPointerOp && (d->d_ScopeFlags & SCOPE_INTERNAL) && SimilarType(&VoidRefType, exType) ) { if (exType->ty_SQFlags & SF_LVALUE) exType = ADD_LVALUE(exLhs->ex_Type); else exType = DEL_LVALUE(exLhs->ex_Type); } type = d->d_ProcDecl.ed_Type->ty_ProcType.et_ArgsType; dassert(type->ty_Op == TY_ARGS); sg2 = type->ty_ArgsType.et_SemGroup; /* * Assert that LVALUE requirements are met. XXX MatchType() * code should disallow the non-lvalue-cast-to-lvalue case * so we don't have to do a check here. */ RUNE_FOREACH(d2, &sg2->sg_DeclList, d_Node) { if ((d2->d_Op & DOPF_STORAGE) && d2->d_Op != DOP_GLOBAL_STORAGE) { if (count == 0) { if ((d2->d_ScopeFlags & SCOPE_LVALUE) && (exLhs->ex_Type->ty_SQFlags & SF_LVALUE) == 0 ) { fprintf(stderr, "lhs of exp must be " "lvalue\n"); dassert_exp(exp, 0); } } else if (count == 1) { if ((d2->d_ScopeFlags & SCOPE_LVALUE) && (exRhs->ex_Type->ty_SQFlags & SF_LVALUE) == 0 ) { fprintf(stderr, "rhs of exp must be " "lvalue\n"); dassert_exp(exp, 0); } } ++count; } } if (d->d_ScopeFlags & SCOPE_INTERNAL) { /* * Internal operator. Optimize any cast to void * by having the internal function deal with it. * (since we aren't setting exType the optimization * currently doesn't do anything, see ST_Exp) */ exDecl = d; if (itype == &VoidType) { /*exType = itype;*/ exFlags |= EXF_RET_VOID; } } else { /* * Normal procedural operator. Convert the left and * right hand sides to a compound expression and * convert exp to a TOK_CALL. NOTE! ex_Rhs may be * NULL (unary op). * * The compound expression may need to rewrite a * subclass procedure, which it can do if the * procedure's body has not yet been created (or * duplicated from the superclass). ex_Decl must * be set in this case. * * Note that the expression structure may be shared. * The conversion is permanent so that is ok. * * XXX keep the type intact? */ exLhs->ex_Next = exRhs; exRhs = exLhs; exRhs = ExpToCompoundExp(exRhs, TOK_COMPOUND); if (d->d_ProcDecl.ed_ProcBody == NULL) exRhs->ex_Decl = d; exRhs = resolveCompoundExp(isg, sg, exRhs, type, flags); exLhs = AllocExp(NULL); LexDupRef(&exp->ex_LexRef, &exLhs->ex_LexRef); exLhs->ex_Token = TOK_ID; exLhs->ex_Id = StrTableDup(d->d_Id); exLhs->ex_Decl = d; exLhs->ex_Type = d->d_ProcDecl.ed_Type; exLhs->ex_Flags |= EXF_RESOLVED; exp->ex_Token = TOK_CALL; exFlags = EXF_BINARY; ResolveDecl(d, 0); /* * Additional work to inline the procedure */ resolveDynamicProcedure(isg, sg, exp, flags); resolveProcedureInline(isg, sg, exp, flags); } } if (d == NULL) { fprintf(stderr, "Unable to resolve operator: %s\n", exp->ex_Id); dassert_exp(exp, 0); } /* * Flag a pure operator whos arguments are constants as probably * being constant. */ if (d->d_ScopeFlags & SCOPE_PURE) { if ((exLhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)) && (exRhs == NULL || (exRhs->ex_Flags & (EXF_CONST | EXF_PROBCONST)))) { exFlags |= EXF_PROBCONST; } } exp->ex_Flags |= EXF_RESOLVED; return exp; } /* * Helper, visibility must be properly set immediately, prior to any * circularity, to guarantee that search functions work without deferral. */ static void resvis_set(resvis_t *vis, int visibility) { while (vis) { *vis->visp = visibility; vis = vis->next; } } /* * ResolveType() - Resolve a type (always returns its argument) * * Resolve a type. Always returns consistent visibility information * to the caller, even if the resolution remains in-progress. Thus * all modifications to the resvis chain occurs on the front-end of * any recursion. * * Flags, Size and Alignment information might take several passes for * classes (due to chains of DF_DYNAMICREF'd processes), or arrays (due * to the * array size not being immediately resolvable). */ Type * ResolveType(Type *type, resvis_t *vis, int retry) { SemGroup *sg = NULL; int ok = 0; int dummy_vis; resvis_t myvis; myvis.next = vis; myvis.visp = &dummy_vis; /* * Detect circular loop. */ if (type->ty_Flags & TF_RESOLVED) { resvis_set(vis, type->ty_Visibility); return(type); } if (type->ty_Flags & TF_RESOLVING) { if (retry == 0) { resvis_set(vis, type->ty_Visibility); return(type); } } type->ty_Flags |= TF_RESOLVING; /* * Remember that visibility data must be set at the head of any * recursion chain. */ loop_unresolved: switch(type->ty_Op) { case TY_CLASS: /* * NOTE: Special case, PointerType fields not in classes XXX * (force alignment and bytes)? */ dassert(type->ty_SQList == &type->ty_ClassType.et_SemGroup->sg_ClassList); /* visibility already determined by resolveSuperClass? */ dassert(type->ty_Visibility != 0); resvis_set(vis, type->ty_Visibility); /* * The superclass (if any) cannot depend on our subclass, * so resolve it first. Note that resolveSuperClass() * does not do everything because it has to be called * in the ResolveClasses() stage, so finish it up here * with a real resolve. */ if (type->ty_ClassType.et_Super) { Type **superp = &type->ty_ClassType.et_Super; if ((*superp)->ty_Op == TY_UNRESOLVED) resolveSuperClass(*superp); ResolveType(*superp, NULL, 0); } /* * DEPENDENCY - SG must resolve for us to resolve. * (if we can't resolve this it is likely an embedded * object loop). */ sg = type->ty_ClassType.et_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { if (type != &PointerType) { type->ty_Bytes = sg->sg_Bytes; type->ty_AlignMask = sg->sg_AlignMask; } ok = 1; } #if 0 /* * Fixup type ty_SQFlags here XXX removed * Any hard class type must be given the SF_HARD storage * qualifier. */ if (sg->sg_Stmt->u.ClassStmt.es_Decl->d_ScopeFlags & SCOPE_HARD) type->ty_SQFlags |= SF_HARD; #endif break; case TY_CPTRTO: /* * NOTE: Do not set TF_HASLVPTR, C pointers are not tracked. * * Always complete, even if the target type is incomplete. * (allow circular references). */ type->ty_Bytes = sizeof(void *); type->ty_AlignMask = CPOINTER_ALIGN; myvis.visp = &type->ty_Visibility; ResolveType(type->ty_CPtrType.et_Type, &myvis, 0); ok = 1; break; case TY_PTRTO: /* * Set TF_HASLVPTR, pointers are tracked. * * Always complete, even if the target type is incomplete. * (allow circular references). */ type->ty_Bytes = sizeof(PointerStor); type->ty_AlignMask = POINTERSTOR_ALIGN; type->ty_Flags |= TF_HASLVPTR; myvis.visp = &type->ty_Visibility; ResolveType(type->ty_PtrType.et_Type, &myvis, 0); #if 0 /* * Pointers to hard types are also hard. XXX remove. */ if (type->ty_PtrType.et_Type->ty_SQFlags & SF_HARD) type->ty_SQFlags |= SF_HARD; #endif ok = 1; break; case TY_REFTO: /* * Set TF_HASLVPTR, references are tracked. * * Always complete, even if the target type is incomplete. * (allow circular references). */ type->ty_Bytes = sizeof(PointerStor); type->ty_AlignMask = POINTERSTOR_ALIGN; type->ty_Flags |= TF_HASLVPTR; myvis.visp = &type->ty_Visibility; ResolveType(type->ty_RefType.et_Type, &myvis, 0); ok = 1; #if 0 /* * References to hard types are also hard. XXX remove */ if (type->ty_PtrType.et_Type->ty_SQFlags & SF_HARD) type->ty_SQFlags |= SF_HARD; #endif break; case TY_ARYOF: /* * Inherit TF_HASLVPTR (if array type is or contains something * which needs to be tracked). * * The array size must resolve sufficiently for us to resolve. */ { Exp *exp = type->ty_AryType.et_ArySize; Type *atype = type->ty_AryType.et_Type; myvis.visp = &type->ty_Visibility; ResolveType(atype, &myvis, 0); exp = resolveConstExp(NULL, type->ty_AryType.et_SemGroup, exp, 0); if ((exp->ex_Flags & EXF_RESOLVED) && (atype->ty_Flags & TF_RESOLVED)) { type->ty_AryType.et_ArySize = exp; type->ty_AryType.et_Count = resolveGetConstExpInt64(exp); type->ty_AlignMask = type->ty_AryType.et_Type->ty_AlignMask; type->ty_Bytes = type->ty_AryType.et_Type->ty_Bytes * type->ty_AryType.et_Count; type->ty_Flags |= type->ty_AryType.et_Type->ty_Flags & (TF_HASLVPTR | TF_HASCONSTRUCT | TF_HASDESTRUCT | TF_HASGCONSTRUCT | TF_HASGDESTRUCT | TF_HASASS); ok = 1; } } break; case TY_COMPOUND: /* * All elements of a compound type must resolve for the * compound type to resolve. * * NOTE: TF_HASLVPTR inherited as appropriate after switch. */ sg = type->ty_CompType.et_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { type->ty_Bytes = sg->sg_Bytes; type->ty_AlignMask = sg->sg_AlignMask; type->ty_Visibility = SCOPE_ALL_VISIBLE; ok = 1; } break; case TY_VAR: /* * All elements of a compound type must resolve for the * compound type to resolve. * * NOTE: TF_HASLVPTR inherited as appropriate after switch. */ sg = type->ty_VarType.et_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { type->ty_Bytes = sg->sg_Bytes; type->ty_AlignMask = sg->sg_AlignMask; type->ty_Visibility = SCOPE_ALL_VISIBLE; ok = 1; } break; case TY_ARGS: /* * All elements of a compound type must resolve for the * compound type to resolve. * * NOTE: TF_HASLVPTR inherited as appropriate after switch. */ sg = type->ty_ArgsType.et_SemGroup; ResolveSemGroup(sg, 0); if (sg->sg_Flags & SGF_RESOLVED) { type->ty_Bytes = sg->sg_Bytes; type->ty_AlignMask = sg->sg_AlignMask; type->ty_Visibility = SCOPE_ALL_VISIBLE; ok = 1; } break; case TY_PROC: /* * We mark the type as resolved regardless of the state * of the underlying argument and return types. * * NOTE: Storage not tracked. */ type->ty_Bytes = 0; type->ty_AlignMask = 0; type->ty_Visibility = SCOPE_ALL_VISIBLE; resvis_set(vis, type->ty_Visibility); ResolveType(type->ty_ProcType.et_ArgsType, NULL, 0); ResolveType(type->ty_ProcType.et_RetType, NULL, 0); ok = 1; break; case TY_STORAGE: /* * Raw storage must always resolve. * * NOTE: Base storage is not tracked. */ type->ty_Bytes = type->ty_StorType.et_Bytes; /* XXX check pwr of 2 */ if (type->ty_Bytes) type->ty_AlignMask = type->ty_Bytes - 1; type->ty_Visibility = SCOPE_ALL_VISIBLE; resvis_set(vis, type->ty_Visibility); ok = 1; break; case TY_UNRESOLVED: /* * We loop until the type is no longer TY_UNRESOLVED. * * NOTE: resolveSuperClass() is not really a recursive * function so we don't have to pre-set visibility. */ resolveSuperClass(type); /* visibility set by resolveSuperClass() */ goto loop_unresolved; break; case TY_DYNAMIC: /* * A Dynamic type is basically unknown at compile-time. * Always resolve. * * NOTE: Tracking unknown (must be handled at run-time). */ type->ty_Visibility = SCOPE_ALL_VISIBLE; resvis_set(vis, type->ty_Visibility); ok = 1; break; case TY_IMPORT: /* * TY_IMPORT types cannot be directly referenced by the * program. They are implicitly used as a placeholder * for a module's global storage at run-time. * * NOTE: Storage is persistent, so wrapper is not tracked. */ type->ty_Visibility = SCOPE_ALL_VISIBLE;/* XXX */ resvis_set(vis, type->ty_Visibility); ok = 1; break; default: dpanic("Unknown type %d (type=%p)", type->ty_Op, type); break; } if (ok) { type->ty_Flags &= ~TF_RESOLVING; type->ty_Flags |= TF_RESOLVED; if (sg) { if (sg->sg_Flags & SGF_ISINTEGER) type->ty_Flags |= TF_ISINTEGER; if (sg->sg_Flags & SGF_ISUNSIGNED) type->ty_Flags |= TF_ISUNSIGNED; if (sg->sg_Flags & SGF_ISFLOATING) type->ty_Flags |= TF_ISFLOATING; if (sg->sg_Flags & SGF_ISBOOL) type->ty_Flags |= TF_ISBOOL; if (sg->sg_Flags & SGF_HASASS) type->ty_Flags |= TF_HASASS; if (sg->sg_SRBase) type->ty_Flags |= TF_HASLVPTR; /* XXX TF_VARARGS */ if (sg->sg_Flags & SGF_VARARGS) type->ty_Flags |= TF_HASLVPTR; if (sg->sg_CBase) type->ty_Flags |= TF_HASCONSTRUCT; if (sg->sg_DBase) type->ty_Flags |= TF_HASDESTRUCT; /* * Combine constructor/destructor hint flags for * globals because we have just one linked list * for global constructors and destructors (no * need to optimize heavily). */ if (sg->sg_GBase) type->ty_Flags |= TF_HASGCONSTRUCT | TF_HASGDESTRUCT; dassert(type->ty_Visibility != 0); } } else { /* * NOTE: visibility is always set prior to any deferral * or circularity. */ deferType(type); } /* * Resolve the default expression for the type, if any. We do not * require the expression to complete. * * XXX qualified types just copy the exp. bad bad YYY * * YYY ResolveExp() no ISG (import sem group) */ if (type->ty_AssExp) { type->ty_Flags |= TF_HASASS; type->ty_AssExp = ResolveExp(NULL, sg, type->ty_AssExp, DEL_LVALUE(type), RESOLVE_AUTOCAST); } /* * ty_DynamicVector is nominally used when a Rune binary is run, but * we also need to set up enough of it such that mixed interpretation * and execution, or even just straight interpretation, works. This * is because the interpreter calls into libruntime. */ type->ty_DynamicVector = DefaultDynamicVector; #if 0 /* * XXX messes up later Storage/StorageAlign * * Internal types may be implied during resolution, be sure to * completely resolve its alignment too. * * (If not internal we have to wait because there might be recursive * dependencies on the type). */ if (type->ty_Flags & TF_ISINTERNAL) { runesize_t dummy = 0; resolveTypeAlign(type, &dummy, 0); } #endif return(type); } /* * resolveSuperClass() - resolve an unresolved dotted id sequence into a class * * Unresolved type identifier sequences must be resolved. We are also * responsible for setting the visibility of the type's elements. */ void resolveSuperClass(Type *super) { string_t *dottedId; SemGroup *sg; Declaration *d; int visibility = SCOPE_ALL_VISIBLE; int eno = 0; dassert_type(super, super->ty_Op == TY_UNRESOLVED); dottedId = super->ty_UnresType.et_DottedId; sg = super->ty_UnresType.et_SemGroup; d = FindDeclPath(NULL, super->ty_UnresType.et_ImportSemGroup, sg, super, dottedId, FDC_NULL, &visibility, -1, &eno); if (d == NULL) { errorDottedId(dottedId, "Unable to resolve class"); dpanic("unable to continue"); } /* * Resolve the unresolved type. Note that this occurs during class * resolution and we can't call ResolveType() here without getting into * a loop, so we do not yet know storage requirements (ty_Bytes and * ty_Align). */ switch(d->d_Op) { case DOP_CLASS: sg = d->d_ClassDecl.ed_SemGroup; super->ty_Op = TY_CLASS; super->ty_ClassType.et_SemGroup = sg; super->ty_ClassType.et_Super = d->d_ClassDecl.ed_Super; super->ty_Visibility = visibility; if (super->ty_SQList) RUNE_REMOVE(super->ty_SQList, super, ty_Node); super->ty_SQList = &sg->sg_ClassList; RUNE_INSERT_TAIL(super->ty_SQList, super, ty_Node); dassert(visibility); /* can't resolve super here */ /* * XXX should we move the class from the unresolved list to * the new SemGroup's actual list? */ break; case DOP_TYPEDEF: /* * Adjust super instead of allocating a new super, so all * other references to super using this class path get * resolved too. */ { dassert_type(super, d->d_TypedefDecl.ed_Type != super); TypeToQualType( d->d_TypedefDecl.ed_Type, super, super->ty_SQFlags | d->d_TypedefDecl.ed_Type->ty_SQFlags, super->ty_AssExp ); } super->ty_Visibility = visibility; /* can't resolve super here */ break; default: errorDottedId(dottedId, "identifier is not a class or typedef"); dassert_type(super, 0); } } /* * Resolve the declarations in a non-stack semantic group. The sg is being * referenced by someone, who resolves it with this. This may take multiple * passes. We: * * - Resolve all real storage elements, referenced or not, so the * structure has a consistent size. Size and Alignment becomes * valid when primarily resolution via SGF_RESOLVED / SGF_GRESOLVED * completes. * * - Most procedures are only resolved on-demand and are not resolved * here. However, access to the SG implies that all constructors * and destructors must be active, so we resolve those. * * - We must also resolve any DF_DYNAMICREF'd procedures, which are * dynamic method calls in sub-classes. The flag is set on the * method in the subclass when a method call is made in any * super-class. * * (Any newly added DF_DYNAMICREF'd procedures will be resolved by * the code setting the flag if it finds that the SG is undergoing * resolution or already resolved). * * - We supply a dynamic index for all procedures, whether they are * referenced or not, and leave the index NULL if they are not. * This allows us to resolve the indices & extent of the dynamic * index array even if late procedures are added. * * NOTE! This code does not resolve declarations related to executable * semantic groups, such as sub-blocks within a procedure, but it * does have to resolve procedure definitions found in Class's * and such. * * NOTE! This code handles the last stage of subclass refinement, by * checking the validity of the refinement and setting sg_Compat * properly. */ static void ResolveSemGroup(SemGroup *sg, int retry) { Declaration *d; Type *type; int dyncount; int ok; if ((sg->sg_Flags & (SGF_RESOLVED | SGF_GRESOLVED)) == (SGF_RESOLVED | SGF_GRESOLVED)) { return; } if (sg->sg_Flags & (SGF_RESOLVING | SGF_GRESOLVING)) { if (retry == 0) return; } if (sg->sg_Flags & SGF_RESOLVED) goto section2; sg->sg_Flags |= SGF_RESOLVING; sg->sg_Bytes = 0; ok = 1; /* * index 0 - reserved for dynamic initialization * index 1 - reserved for dynamic destructor */ dyncount = 2; /* * SECTION1 - INSTANTIATED OBJECT RESOLUTION & PROCEDURE RESOLUTION * * Handle SCOPE_REFINE and DF_DYNAMICREF flagging. * We resolve non-global elements with real storage. */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { /* * DF_DYNAMICREF requires that the declaration be resolved * because it might be used in a dynamic method call, even * if it was not directly referenced. So if the SemGroup * (i.e. class) is referenced at all, so to must the method. */ if (d->d_Flags & DF_DYNAMICREF) { if ((d->d_Flags & (DF_RESOLVED | DF_RESOLVING)) == 0) { ResolveDecl(d, 0); } } /* * Process all procedures and any non-global instantiated * storage. */ switch(d->d_Op) { case DOP_CLASS: case DOP_TYPEDEF: case DOP_ALIAS: case DOP_IMPORT: break; case DOP_PROC: /* * Assign the dynamic index. There may be multiple * entries for the same d_Id, they are ordered such * that refinements use the same DynIndex as in * the superclass which is what allows dynamic method * calls to work properly. All non-refined subclass * elements are ordered after all refined/non=refined * superclass elements (replacing the superclass * element and using the same DynIndex when refined). * * We must assign d_DynIndex regardless of whether * the procedure is used or not to guarantee a * consistent index between super-class and sub-class. */ if ((d->d_ScopeFlags & SCOPE_INTERNAL) == 0 && (d->d_ProcDecl.ed_Type->ty_SQFlags & (SF_METHOD | SF_GMETHOD))) { d->d_DynIndex = dyncount; ++dyncount; } /* * Only process referenced procedures, plus any * that were flagged (see above), plus any * constructors or destructors. */ if ((d->d_Flags & (DF_RESOLVED | DF_RESOLVING)) == 0) { if (d->d_ScopeFlags & (SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR)) { ResolveDecl(d, 0); } } if ((d->d_Flags & (DF_RESOLVED | DF_RESOLVING)) == 0) break; if (d->d_ScopeFlags & SCOPE_GLOBAL) { if ((d->d_Flags & DF_ONGLIST) == 0 && (d->d_ScopeFlags & (SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR))) { d->d_GNext = d->d_MyGroup->sg_GBase; d->d_MyGroup->sg_GBase = d; d->d_Flags |= DF_ONGLIST; sg->sg_Flags |= SGF_GABICALL; } } else { if ((d->d_Flags & DF_ONCLIST) == 0 && (d->d_ScopeFlags & SCOPE_CONSTRUCTOR)) { d->d_CNext = d->d_MyGroup->sg_CBase; d->d_MyGroup->sg_CBase = d; d->d_Flags |= DF_ONCLIST; sg->sg_Flags |= SGF_ABICALL; } if ((d->d_Flags & DF_ONDLIST) == 0 && (d->d_ScopeFlags & SCOPE_DESTRUCTOR)) { d->d_DNext = d->d_MyGroup->sg_DBase; d->d_MyGroup->sg_DBase = d; d->d_Flags |= DF_ONDLIST; sg->sg_Flags |= SGF_ABICALL; } } break; case DOP_STACK_STORAGE: /* * can't happen. Stack storage is only used in * executable contexts. */ dassert_decl(d, 0); case DOP_ARGS_STORAGE: case DOP_GROUP_STORAGE: ResolveDecl(d, 0); if ((d->d_Flags & DF_RESOLVED) == 0) { ok = 0; break; } #if 0 if (ok == 0) /* save some time */ break; #endif /* * Update SG size, alignment, set d_Offset * and d_Storage within the SG. */ if (sg->sg_AlignMask < d->d_AlignMask) sg->sg_AlignMask = d->d_AlignMask; sg->sg_Bytes = BASEALIGN(sg->sg_Bytes, d->d_AlignMask); d->d_Offset = sg->sg_Bytes; /* * Set d_Storage based on scope and intended default * for d_Op. */ if (d->d_Op == DOP_ARGS_STORAGE) { if (d->d_ScopeFlags & SCOPE_UNTRACKED) d->d_Storage = GENSTAT_NONE; else if (d->d_ScopeFlags & SCOPE_UNLOCKED) d->d_Storage = GENSTAT_REFD; else if (d->d_ScopeFlags & SCOPE_SOFT) d->d_Storage = GENSTAT_LOCK; else if (d->d_ScopeFlags & SCOPE_HARD) d->d_Storage = GENSTAT_LOCKH; else d->d_Storage = GENSTAT_ARGDEF; } else { d->d_Storage = GENSTAT_MEMDEF; } sg->sg_Bytes += d->d_Bytes; type = d->d_StorDecl.ed_Type; if (d->d_StorDecl.ed_AssExp) sg->sg_Flags |= SGF_HASASS; if (type->ty_Flags & TF_HASASS) sg->sg_Flags |= SGF_HASASS; if (type->ty_Flags & TF_HASLVPTR) sg->sg_Flags |= SGF_HASLVPTR; if (type->ty_Flags & TF_HASCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASDESTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGCONSTRUCT) sg->sg_Flags |= SGF_GABICALL; if (type->ty_Flags & TF_HASGDESTRUCT) sg->sg_Flags |= SGF_GABICALL; break; case DOP_GLOBAL_STORAGE: /* handled in pass2 */ break; default: dassert_semgrp(sg, 0); break; } /* * Finish up any refinements. (Effects 'ok'? no for now) */ if (d->d_ScopeFlags & SCOPE_REFINE) { if (d->d_Flags & (DF_RESOLVING | DF_RESOLVED)) { ResolveDecl(d->d_Super, 0); ResolveDecl(d, 0); RefineDeclaration(sg, d->d_Super, d); } } } if (ok) { sg->sg_Bytes = BASEALIGN(sg->sg_Bytes, sg->sg_AlignMask); sg->sg_Flags &= ~SGF_RESOLVING; sg->sg_Flags |= SGF_RESOLVED; /* * If no dynamic methods and no dynamic initialization or * destruction required, set dyncount to 0. */ if (dyncount == 2 && (sg->sg_Flags & SGF_HASASS) == 0 && sg->sg_SRBase == NULL && sg->sg_CBase == NULL && sg->sg_DBase == NULL) { dyncount = 0; } sg->sg_DynCount = dyncount; sg->sg_Flags &= ~SGF_RESOLVING; } /* * SECTION2 - GLOBAL RESOLUTION */ section2: if (sg->sg_Flags & SGF_GRESOLVED) goto section3; sg->sg_Flags |= SGF_GRESOLVING; sg->sg_GlobalBytes = 0; ok = 1; RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { switch(d->d_Op) { case DOP_CLASS: case DOP_TYPEDEF: case DOP_ALIAS: case DOP_IMPORT: case DOP_PROC: break; case DOP_STACK_STORAGE: /* * can't happen. Stack storage is only used in * executable contexts. */ dassert_decl(d, 0); case DOP_ARGS_STORAGE: case DOP_GROUP_STORAGE: /* * Non-globals were handled in section1 */ break; case DOP_GLOBAL_STORAGE: /* * Global storage is handled in section2 * * NOTE: We only process referenced global storage. * This will include global elements referenced * by constructors, which are always run even * if not specifically referenced. */ ResolveDecl(d, 0); #if 1 if ((d->d_Flags & (DF_RESOLVING | DF_RESOLVED)) == 0) break; if ((d->d_Flags & DF_RESOLVED) == 0) { ok = 0; break; } #endif #if 0 if (ok == 0) /* save some time */ break; #endif if (sg->sg_GlobalAlignMask < d->d_AlignMask) sg->sg_GlobalAlignMask = d->d_AlignMask; sg->sg_GlobalBytes = (sg->sg_GlobalBytes + d->d_AlignMask) & ~d->d_AlignMask; d->d_Offset = sg->sg_GlobalBytes; d->d_Storage = GENSTAT_MEMDEF; sg->sg_GlobalBytes += d->d_Bytes; if (d->d_StorDecl.ed_AssExp) sg->sg_Flags |= SGF_GHASASS; type = d->d_StorDecl.ed_Type; if (type->ty_Flags & TF_HASASS) sg->sg_Flags |= SGF_GHASASS; if (type->ty_Flags & TF_HASLVPTR) sg->sg_Flags |= SGF_GHASLVPTR; if (type->ty_Flags & TF_HASCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASDESTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGDESTRUCT) sg->sg_Flags |= SGF_ABICALL; break; default: dassert_semgrp(sg, 0); break; } /* * Finish up any refinements. (Effects 'ok'? no for now) */ if (d->d_ScopeFlags & SCOPE_REFINE) { if (d->d_Flags & (DF_RESOLVING | DF_RESOLVED)) { ResolveDecl(d->d_Super, 0); ResolveDecl(d, 0); RefineDeclaration(sg, d->d_Super, d); } } } /* * Final alignment */ if (ok) { sg->sg_GlobalBytes = (sg->sg_GlobalBytes + sg->sg_GlobalAlignMask) & ~sg->sg_GlobalAlignMask; sg->sg_Flags &= ~SGF_GRESOLVING; sg->sg_Flags |= SGF_GRESOLVED; } /* * SECTION3 - Final rollup (future) */ section3: if ((sg->sg_Flags & (SGF_RESOLVED | SGF_GRESOLVED)) != (SGF_RESOLVED | SGF_GRESOLVED)) { deferSG(sg); } } /* * findExpOper() - Find operator declaration matching expression * * Locate the operator declaration (a DOP_PROCDEF) that matches * the expression or NULL if no match could be found. The expression's * left and right hand sides must already be resolved. * * NOTE! A temporary 'copy' Exp may be passed, not all fields are valid. */ static Declaration *testIConstantForType(Declaration *d, Type *type, Exp *exp); static Declaration *testFConstantForType(Declaration *d, Type *type, Exp *exp); static Declaration * findExpOper(Exp *exp, int flags) { Type *ltype; Type *rtype; Declaration *d; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ if (exp->ex_Flags & EXF_BINARY) { rtype = exp->ex_Rhs->ex_Type; ltype = exp->ex_Lhs->ex_Type; } else { dassert(exp->ex_Flags & EXF_UNARY); rtype = NULL; ltype = exp->ex_Lhs->ex_Type; } /* * XXX look in our local semantic hierarchy for a compatible operator ? */ /* * Attempt to find a matching operator from the left hand side * type. */ d = findOper(ltype, exp->ex_Id, ltype, rtype, flags); if (d || (exp->ex_Flags & EXF_BINARY) == 0) return(d); /* * Attempt to find a matching binary operator from the right hand * side type. */ d = findOper(rtype, exp->ex_Id, ltype, rtype, flags); /* * If that fails but either the left or right-hand sides are * constants, see if we can find an operator by casting the * constant to the non-constant. */ if (d == NULL) { if (exp->ex_Rhs->ex_Token == TOK_INTEGER && exp->ex_Lhs->ex_Token != TOK_INTEGER && exp->ex_Lhs->ex_Token != TOK_FLOAT && (ltype->ty_Flags & TF_ISINTEGER)) { d = findOper(ltype, exp->ex_Id, ltype, ltype, flags); if (d) d = testIConstantForType(d, ltype, exp->ex_Rhs); } else if (exp->ex_Lhs->ex_Token == TOK_INTEGER && exp->ex_Rhs->ex_Token != TOK_INTEGER && exp->ex_Rhs->ex_Token != TOK_FLOAT && (rtype->ty_Flags & TF_ISINTEGER)) { d = findOper(rtype, exp->ex_Id, rtype, rtype, flags); if (d) d = testIConstantForType(d, rtype, exp->ex_Lhs); } else if (exp->ex_Rhs->ex_Token == TOK_FLOAT && exp->ex_Lhs->ex_Token != TOK_INTEGER && exp->ex_Lhs->ex_Token != TOK_FLOAT && (ltype->ty_Flags & TF_ISFLOATING)) { d = findOper(ltype, exp->ex_Id, ltype, ltype, flags); if (d) d = testFConstantForType(d, ltype, exp->ex_Rhs); } else if (exp->ex_Lhs->ex_Token == TOK_FLOAT && exp->ex_Rhs->ex_Token != TOK_INTEGER && exp->ex_Rhs->ex_Token != TOK_FLOAT && (rtype->ty_Flags & TF_ISFLOATING)) { d = findOper(rtype, exp->ex_Id, rtype, rtype, flags); if (d) d = testFConstantForType(d, rtype, exp->ex_Lhs); } } return(d); } /* * Calculate whether the constant can be safely cast. If it can, * cast the constant and return d. Otherwise complain and return * NULL. */ static Declaration * testIConstantForType(Declaration *d, Type *type, Exp *exp) { int64_t v = resolveGetConstExpInt64(exp); if (type->ty_Flags & TF_ISUNSIGNED) { switch(type->ty_Bytes) { case 1: if (v != (int64_t)(uint8_t)v) d = NULL; break; case 2: if (v != (int64_t)(uint16_t)v) d = NULL; break; case 4: if (v != (int64_t)(uint32_t)v) d = NULL; break; case 8: break; default: break; } } else { switch(type->ty_Bytes) { case 1: if (v != (int64_t)(int8_t)v) d = NULL; break; case 2: if (v != (int64_t)(int16_t)v) d = NULL; break; case 4: if (v != (int64_t)(int32_t)v) d = NULL; break; case 8: break; default: break; } } /* * If successful change the constant's type and reset the * interpreter to re-evaluate it. */ if (d) { exp->ex_Type = type; exp->ex_Run = RunUnresolvedExp; exp->ex_Run64 = Run64DefaultExp; } else { ExpPrintError(exp, TOK_ERR_AUTOCAST_VALUE); } return d; } static Declaration * testFConstantForType(Declaration *d, Type *type, Exp *exp) { float128_t v = resolveGetConstExpFloat128(exp); switch(type->ty_Bytes) { case 4: if (v != (float32_t)v) d = NULL; break; case 8: if (v != (float64_t)v) d = NULL; break; case 16: break; } /* * If successful change the constant's type and reset the * interpreter to re-evaluate it. */ if (d) { exp->ex_Type = type; exp->ex_Run = RunUnresolvedExp; exp->ex_Run64 = Run64DefaultExp; } else { ExpPrintError(exp, TOK_ERR_AUTOCAST_VALUE); } return d; } static Declaration * findOper(Type *btype, string_t id, Type *ltype, Type *rtype, int flags) { SemGroup *sg; Declaration *d; int args = (rtype != NULL) ? 2 : 1; flags &= ~RESOLVE_AUTOCAST; /* not applicable to this function */ /* * Locate the base type. If the base type does not have a SemGroup * there are no operators. (XXX put system operators here) */ sg = BaseType(&btype); if (sg == NULL) return(NULL); /* * Look for the operator in the SemGroup */ for (d = FindOperId(sg, id, args); d; d = d->d_ONext) { ResolveDecl(d, 0); if (d->d_MyGroup == sg && d->d_Op == DOP_PROC && d->d_ProcDecl.ed_OperId == id && MatchOperatorTypes(d, ltype, rtype) ) { return(d); } } /* * Failed. If the base type is a compound type, look for the * operator in the SemGroup for each element making up the compound * type. e.g. so (mycustomtype, double) would find the operator * in mycustomtype. */ if (btype->ty_Op == TY_COMPOUND) { RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { Declaration *d2; if (d->d_Op & DOPF_STORAGE) { d2 = findOper(d->d_StorDecl.ed_Type, id, ltype, rtype, flags); } else if (d->d_Op == DOP_TYPEDEF) { d2 = findOper(d->d_TypedefDecl.ed_Type, id, ltype, rtype, flags); } else { d2 = NULL; } if (d2) return(d2); } } return(NULL); } static void errorDottedId(string_t *ary, const char *ctl, ...) { va_list va; int i; va_start(va, ctl); vfprintf(stderr, ctl, va); va_end(va); fprintf(stderr, ": %s", ary[0]); for (i = 1; ary[i]; ++i) fprintf(stderr, ".%s", ary[i]); fprintf(stderr, "\n"); } /* * Resolve the alignment requirements for SemGroups related to * statements, including the alignment requirements needed for * temporary expression space. */ static void ResolveAlignment(Stmt *st, int flags) { SemGroup *sg = st->st_MyGroup; Stmt *scan; if (st->st_Flags & STF_ALIGNRESOLVED) return; st->st_Flags |= STF_ALIGNRESOLVED; /* * If this is an executable semantic layer or an import layer then * assign storage to declarations up-front. Of the various * DOP_*_STORAGE ops, we should only see DOP_STACK_STORAGE and * DOP_GLOBAL_STORAGE. * * Note: if this is the root ST_Import STF_SEMANTIC is *NOT* set and * sg will be NULL. */ if ((st->st_Flags & STF_SEMANTIC) && st->st_Op != ST_Class) { Declaration *d; /* * Pre-scan for alignment. Don't try to propagate the * alignment to the parent for now as that would require * recalculating the parent(s). */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { switch(d->d_Op) { case DOP_STACK_STORAGE: case DOP_ARGS_STORAGE: case DOP_GROUP_STORAGE: if (sg->sg_AlignMask < d->d_AlignMask) sg->sg_AlignMask = d->d_AlignMask; break; case DOP_GLOBAL_STORAGE: if (sg->sg_GlobalAlignMask < d->d_AlignMask) sg->sg_GlobalAlignMask = d->d_AlignMask; break; default: break; } } } switch(st->st_Op) { case ST_Import: break; case ST_Module: case ST_Class: break; case ST_Typedef: /* XXX needed? */ if (st->st_TypedefStmt.es_Decl->d_Flags & DF_RESOLVED) { resolveDeclAlign(st->st_TypedefStmt.es_Decl, &sg->sg_TmpAlignMask, flags); } break; case ST_Decl: /* * NOTE: Don't calculate for declarations that belong in * a different context. */ { Declaration *d; int i; d = st->st_DeclStmt.es_Decl; for (i = 0; i < st->st_DeclStmt.es_DeclCount; ++i) { if (st->st_MyGroup == d->d_MyGroup && (d->d_Flags & DF_RESOLVED)) { resolveDeclAlign(d, &sg->sg_TmpAlignMask, flags); } d = RUNE_NEXT(d, d_Node); } } break; case ST_Block: break; case ST_Proc: break; case ST_Nop: break; case ST_Loop: { if (st->st_LoopStmt.es_BCond) { resolveExpAlign(st->st_LoopStmt.es_BCond, &sg->sg_TmpAlignMask, flags); } if (st->st_LoopStmt.es_ACond) { resolveExpAlign(st->st_LoopStmt.es_ACond, &sg->sg_TmpAlignMask, flags); } if (st->st_LoopStmt.es_AExp) { resolveExpAlign(st->st_LoopStmt.es_AExp, &sg->sg_TmpAlignMask, flags); } } break; case ST_BreakCont: break; case ST_Bad: break; case ST_IfElse: resolveExpAlign(st->st_IfStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Return: if (st->st_RetStmt.es_Exp) resolveExpAlign(st->st_RetStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Result: if (st->st_ResStmt.es_Exp) resolveExpAlign(st->st_ResStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Switch: /* * The switch expression's temporary data must be saved while * we are executing the sub-statements (the cases). */ resolveExpAlign(st->st_SwStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Case: if (st->st_CaseStmt.es_Exp) resolveExpAlign(st->st_CaseStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_Exp: resolveExpAlign(st->st_ExpStmt.es_Exp, &sg->sg_TmpAlignMask, flags); break; case ST_ThreadSched: break; default: dassert_stmt(st, 0); } /* * Calculate storage requirements for substatements. offset * acts as our base. We union the storage for the substatements * together. Note that often scan->sg_MyGroup == sg. */ RUNE_FOREACH(scan, &st->st_List, st_Node) { if (scan->st_Op == ST_Class) { if (scan->u.ClassStmt.es_Decl->d_Flags & DF_RESOLVED) ResolveAlignment(scan, flags); } else if (scan->st_Op == ST_Decl && scan->st_DeclStmt.es_Decl->d_MyGroup != st->st_MyGroup) { /* * Do nothing */ ; } else if (scan->st_Op == ST_Decl && (scan->st_DeclStmt.es_Decl->d_Flags & DF_RESOLVED)) { /* * See prior comments, skip declarations that * were moved to another context * * (already resolved so can use junk offsets) */ resolveDeclAlign(scan->st_DeclStmt.es_Decl, &sg->sg_TmpAlignMask, flags); } else if (scan->st_Op == ST_Proc && scan->st_ProcStmt.es_Decl->d_ProcDecl.ed_OrigBody == scan ) { /* Do not resolve template procedures! */ } else if (scan->st_Flags & STF_SEMTOP) { ResolveAlignment(scan, flags); } else { ResolveAlignment(scan, flags); } } /* * If this is a new semantic level call resolveStorageSemGroup() to * do the final cleanup of SemGroup issues. This will redundantly * calculate temporary space requirements. Also, due to type/class * references the temporary space for a class may have already been * resolved. Since a class can only contain declarations it had * better match what we calculate here. * * Note that for non-Class executable SemGroup's TmpBytes is * incorporated in a downward fashion while sg_Bytes is incorporated * in an upward fashion. It can become quite confusing. Don't ask * me why I did it that way. */ if (st->st_Flags & STF_SEMANTIC) { if ((sg->sg_Flags & SGF_TMPRESOLVED) == 0) { resolveSemGroupAlign(sg, flags); } } /* * Propagate alignment requirements upward. */ if ((st->st_Flags & (STF_SEMANTIC|STF_SEMTOP)) == STF_SEMANTIC) { if (sg->sg_Parent->sg_AlignMask < sg->sg_AlignMask) sg->sg_Parent->sg_AlignMask = sg->sg_AlignMask; if (sg->sg_Parent->sg_TmpAlignMask < sg->sg_TmpAlignMask) sg->sg_Parent->sg_TmpAlignMask = sg->sg_TmpAlignMask; } } /* * ResolveStorage() - Final storage resolution pass * * This pass carefully scans the SemGroup hierarchy and assigns * offsets to declarations. * * PROCEDURES - all the various 'executable' semantic layers in * a procedure are collapsed together for efficiency, so we only * have to manage one context. This means that the d_Offset * assigned to declarations in sub-blocks may exceed the sg_ size * of the sub-block's SemGroup. We do not attempt to resolve * procedure body templates (d_ProcDecl.ed_OrigBody). * * CLASSES - are given offsets in their SemGroup's relative to 0, if * not already resolved. * * IMPORTS - are given offsets in their SemGroup's relative to 0 * * COMPOUND TYPES - (such as procedure arguments) are given offsets * in their SemGroup's relative to 0. * * TEMPORARY STORAGE - expressions may require temporary storage * for intermediate results. That space is reserved here. * * We specifically do not resolve unrelated storage. */ static void ResolveStorage(Stmt *st, int flags) { runesize_t base; runesize_t limit; runesize_t gbase; runesize_t glimit; SemGroup *sg = st->st_MyGroup; Stmt *scan; Type *type; #if 0 if (st->st_Op != ST_Class) { dassert((st->st_Flags & STF_RESOLVING) == 0); if (st->st_Flags & STF_RESOLVED) { return; } st->st_Flags |= STF_RESOLVING; } #endif if ((st->st_Flags & STF_ALIGNRESOLVED) == 0) return; dassert((st->st_Flags & STF_TMPRESOLVED) == 0); if (st->st_Flags & STF_TMPRESOLVED) return; st->st_Flags |= STF_TMPRESOLVED; /* * If this is an executable semantic layer or an import layer then * assign storage to declarations up-front. Of the various * DOP_*_STORAGE ops, we should only see DOP_STACK_STORAGE and * DOP_GLOBAL_STORAGE. * * Note: if this is the root ST_Import STF_SEMANTIC is *NOT* set and * sg will be NULL. */ if ((st->st_Flags & STF_SEMANTIC) && st->st_Op != ST_Class) { Declaration *d; dassert((sg->sg_Flags & (SGF_FRESOLVED|SGF_FRESOLVING)) == 0); sg->sg_Flags |= SGF_FRESOLVING; /* * The base offset for sub-semantic-blocks must match the * alignment they require in order to allow us to do an * aligned BZEROing of the space. We do not include the * temporary space here (it does not need to be BZERO'd). * * NOTE: sg_TmpAlignMask is taken into accoun when the * top-level frame is allocated. */ if (st->st_Flags & STF_SEMTOP) { dassert(sg->sg_Bytes == 0); base = 0; } else { base = BASEALIGN(sg->sg_Parent->sg_Bytes, sg->sg_AlignMask); } sg->sg_BlkOffset = base; /* * Classify storage (note: class decls are handled elsewhere) */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { /* * Set d_Storage based on scope and intended default * for d_Op. */ if (d->d_ScopeFlags & SCOPE_UNTRACKED) { d->d_Storage = GENSTAT_NONE; } else if (d->d_ScopeFlags & SCOPE_UNLOCKED) { d->d_Storage = GENSTAT_REFD; } else if (d->d_ScopeFlags & SCOPE_SOFT) { d->d_Storage = GENSTAT_LOCK; } else if (d->d_ScopeFlags & SCOPE_HARD) { d->d_Storage = GENSTAT_LOCKH; } else { switch(d->d_Op) { case DOP_STACK_STORAGE: d->d_Storage = GENSTAT_STKDEF; break; case DOP_ARGS_STORAGE: d->d_Storage = GENSTAT_ARGDEF; break; case DOP_GROUP_STORAGE: d->d_Storage = GENSTAT_MEMDEF; break; case DOP_GLOBAL_STORAGE: d->d_Storage = GENSTAT_MEMDEF; break; } } switch(d->d_Op) { case DOP_STACK_STORAGE: case DOP_ARGS_STORAGE: case DOP_GROUP_STORAGE: type = d->d_StorDecl.ed_Type; base = BASEALIGN(base, d->d_AlignMask); d->d_Offset = base; base += d->d_Bytes; if (d->d_StorDecl.ed_AssExp) sg->sg_Flags |= SGF_HASASS; if (type->ty_Flags & TF_HASASS) sg->sg_Flags |= SGF_HASASS; if (type->ty_Flags & TF_HASLVPTR) sg->sg_Flags |= SGF_HASLVPTR; if (type->ty_Flags & TF_HASCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASDESTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGDESTRUCT) sg->sg_Flags |= SGF_ABICALL; break; case DOP_GLOBAL_STORAGE: type = d->d_StorDecl.ed_Type; sg->sg_GlobalBytes = BASEALIGN( sg->sg_GlobalBytes, d->d_AlignMask); d->d_Offset = sg->sg_GlobalBytes; sg->sg_GlobalBytes += d->d_Bytes; if (d->d_StorDecl.ed_AssExp) sg->sg_Flags |= SGF_GHASASS; if (type->ty_Flags & TF_HASASS) sg->sg_Flags |= SGF_GHASASS; if (type->ty_Flags & TF_HASLVPTR) sg->sg_Flags |= SGF_GHASLVPTR; if (type->ty_Flags & TF_HASCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASDESTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGCONSTRUCT) sg->sg_Flags |= SGF_ABICALL; if (type->ty_Flags & TF_HASGDESTRUCT) sg->sg_Flags |= SGF_ABICALL; break; default: break; } } /* * The byte size of the block does not have to be aligned, * but aligning it (within reason) might provide a benefit. */ sg->sg_Bytes = base; limit = base; #if 0 if (sg->sg_AlignMask < 256) { sg->sg_Bytes = BASEALIGN(base, sg->sg_AlignMask); } if (sg->sg_GlobalAlignMask < 256) { sg->sg_GlobalBytes = BASEALIGN(sg->sg_GlobalBytes, sg->sg_GlobalAlignMask); } #endif sg->sg_BlkBytes = sg->sg_Bytes - sg->sg_BlkOffset; sg->sg_Flags |= SGF_FRESOLVED; sg->sg_Flags &= ~SGF_FRESOLVING; } /* * Figure out how much temporary space we need to be able to execute * statements and expressions. Temporary space, like the main * procedural space, must be inherited from and consolidated into * the top-level SemGroup */ if (sg) { base = sg->sg_TmpBytes; gbase = sg->sg_GlobalTmpBytes; } else { /* * Root ST_Import. avoid compiler warnings */ base = 0; gbase = 0; } limit = base; glimit = gbase; switch(st->st_Op) { case ST_Import: if (st->st_ImportStmt.es_DLL) { void (*func)(void) = dlsym(st->st_ImportStmt.es_DLL, "resolveStorage"); if (func) func(); } break; case ST_Module: case ST_Class: break; case ST_Typedef: if (st->st_TypedefStmt.es_Decl->d_Flags & DF_RESOLVED) { resolveDeclStorage(st->st_TypedefStmt.es_Decl, base, &limit, gbase, &glimit); } break; case ST_Decl: /* * Temporary space for declarations are handled here. * * Resolve declarations, skipping any whos context was * moved to a class (e.g. a declaration at the top level * of a file like Fd.setfd(...) also exists in the Fd class). */ { Declaration *d; int i; d = st->st_DeclStmt.es_Decl; if (d->d_Op == DOP_GLOBAL_STORAGE) st->st_DeclStmt.es_TmpOffset = gbase; else st->st_DeclStmt.es_TmpOffset = base; for (i = 0; i < st->st_DeclStmt.es_DeclCount; ++i) { #if 1 if (st->st_MyGroup != d->d_MyGroup) { /*printf("SKIPB %s\n", d->d_Id)*/; /* resolveDeclStorage(d, base, &limit, gbase, &glimit); */ } else if (d->d_Flags & DF_RESOLVED) { resolveDeclStorage(d, base, &limit, gbase, &glimit); } #endif else { resolveDeclStorage(d, base, &limit, gbase, &glimit); } d = RUNE_NEXT(d, d_Node); } } break; case ST_Block: break; case ST_Proc: break; case ST_Nop: break; case ST_Loop: { if (st->st_LoopStmt.es_BCond) { resolveStorageExp(st->st_LoopStmt.es_BCond, base, &limit); } if (st->st_LoopStmt.es_ACond) { resolveStorageExp(st->st_LoopStmt.es_ACond, base, &limit); } if (st->st_LoopStmt.es_AExp) { resolveStorageExp(st->st_LoopStmt.es_AExp, base, &limit); } } break; case ST_BreakCont: break; case ST_Bad: break; case ST_IfElse: resolveStorageExp(st->st_IfStmt.es_Exp, base, &limit); break; case ST_Return: if (st->st_RetStmt.es_Exp) resolveStorageExp(st->st_RetStmt.es_Exp, base, &limit); break; case ST_Result: if (st->st_ResStmt.es_Exp) resolveStorageExp(st->st_ResStmt.es_Exp, base, &limit); break; case ST_Switch: /* * The switch expression's temporary data must be saved while * we are executing the sub-statements (the cases). */ { runesize_t xlimit = base; resolveStorageExp(st->st_SwStmt.es_Exp, base, &xlimit); base = xlimit; if (limit < xlimit) limit = xlimit; } break; case ST_Case: if (st->st_CaseStmt.es_Exp) resolveStorageExp(st->st_CaseStmt.es_Exp, base, &limit); break; case ST_Exp: resolveStorageExp(st->st_ExpStmt.es_Exp, base, &limit); break; case ST_ThreadSched: break; default: dassert_stmt(st, 0); } /* * Calculate storage requirements for substatements. (base) may * have been adjusted if this statement level's temporary storage * needs to be retained (aka switch() expression). * * Note that often scan->sg_MyGroup == sg. */ RUNE_FOREACH(scan, &st->st_List, st_Node) { dassert(scan->st_Op != ST_Proc); if (scan->st_Op == ST_Class) { ResolveStorage(scan, flags); } else if (scan->st_Op == ST_Decl) { /* * Ignore declarations here, they will be handled in * the semgroup scan in the next loop */ } else if (scan->st_Op == ST_Proc) { /* Do not resolve template procedures! */ fprintf(stderr, "STORAGE %s\n", scan->st_ProcStmt.es_Decl->d_Id); if (scan->st_ProcStmt.es_Decl->d_ProcDecl.ed_OrigBody == scan) { } else { } } else if (scan->st_Flags & STF_SEMTOP) { assert(scan->st_MyGroup != sg); ResolveStorage(scan, flags); } else { /* * This is a bit of a mess. The baseline * sg_TmpBytes needs to be set so calculated * temporary offsets are relative to it, and * then restored. Otherwise we might blow * away the SGF_TMPRESOLVED SemGroup * * XXX */ runesize_t save_offset; runesize_t save_goffset; save_offset = scan->st_MyGroup->sg_TmpBytes; save_goffset = scan->st_MyGroup->sg_GlobalTmpBytes; scan->st_MyGroup->sg_TmpBytes = base; scan->st_MyGroup->sg_GlobalTmpBytes = gbase; ResolveStorage(scan, flags); if (scan->st_MyGroup->sg_TmpBytes < save_offset) scan->st_MyGroup->sg_TmpBytes = save_offset; if (scan->st_MyGroup->sg_GlobalTmpBytes < save_goffset) { scan->st_MyGroup->sg_GlobalTmpBytes = save_goffset; } if (limit < scan->st_MyGroup->sg_TmpBytes) limit = scan->st_MyGroup->sg_TmpBytes; if (glimit < scan->st_MyGroup->sg_GlobalTmpBytes) glimit = scan->st_MyGroup->sg_GlobalTmpBytes; } } /* * If this is a new semantic level call resolveStorageSemGroup() to * do the final cleanup of SemGroup issues. This will redundantly * calculate temporary space requirements. Also, due to type/class * references the temporary space for a class may have already been * resolved. Since a class can only contain declarations it had * better match what we calculate here. * * Note that for non-Class executable SemGroup's TmpBytes is * incorporated in a downward fashion while sg_Bytes is incorporated * in an upward fashion. It can become quite confusing. Don't ask * me why I did it that way. */ if (st->st_Flags & STF_SEMANTIC) { if ((sg->sg_Flags & SGF_TMPRESOLVED) == 0) { resolveStorageSemGroup(sg, limit, &limit, glimit, &glimit); } else { dassert(sg->sg_TmpBytes == limit && sg->sg_GlobalTmpBytes == glimit); } } else if (sg) { sg->sg_TmpBytes = limit; sg->sg_GlobalTmpBytes = glimit; } /* else this is the Root st_Import */ if ((st->st_Flags & (STF_SEMANTIC|STF_SEMTOP)) == STF_SEMANTIC) { dassert(sg->sg_Parent->sg_Bytes <= sg->sg_Bytes); sg->sg_Parent->sg_Bytes = sg->sg_Bytes; } } /* * resolveDeclStorage() - resolve the storage reservation required to * process an expression. * * This is an expression tree traversal storage resolution procedure. * We have to traverse through declarations to get to default assignments * and such. * * If a declaration has no assigned default the underlying type may * itself have an assigned default which must be dealt with. */ static void resolveDeclAlign(Declaration *d, runesize_t *expalignp, int flags) { if (flags & RESOLVE_CLEAN) { if ((d->d_Flags & DF_ALIGNRESOLVE) == 0) return; d->d_Flags &= ~(DF_ALIGNRESOLVE | DF_TMPRESOLVED); } else { if (d->d_Flags & DF_ALIGNRESOLVE) { if (*expalignp < d->d_AlignMask) *expalignp = d->d_AlignMask; return; } d->d_Flags |= DF_ALIGNRESOLVE; } switch(d->d_Op) { case DOP_CLASS: /* recursion already dealt with */ break; case DOP_ARGS_STORAGE: case DOP_STACK_STORAGE: case DOP_GROUP_STORAGE: { Type *type = d->d_StorDecl.ed_Type; resolveTypeAlign(type, expalignp, flags); if (d->d_StorDecl.ed_AssExp) { resolveExpAlign(d->d_StorDecl.ed_AssExp, expalignp, flags); } } break; case DOP_GLOBAL_STORAGE: { Type *type = d->d_StorDecl.ed_Type; resolveTypeAlign(type, expalignp, flags); if (d->d_StorDecl.ed_AssExp) { resolveExpAlign(d->d_StorDecl.ed_AssExp, expalignp, flags); } } break; case DOP_ALIAS: /* * Never try to resolve storage considerations for an * alias's assignment in the declaration itself. The * run-time context depends on who and how many other * parts of the program reference the alias and the expression * tree will be duplicated for each. */ #if 0 resolveStorageExpExp(d->d_AliasDecl.ed_AssExp, expalignp); #endif break; case DOP_TYPEDEF: /* XXX what about ty_AssExp ? should be in global space */ break; case DOP_IMPORT: /* recursion already dealt with */ break; case DOP_PROC: /* * Resolution of procedure declarations might have been * deferred (see TOK_ID in ResolveExp()). */ /*ResolveDecl(d, 0);*/ { Stmt *st; if ((st = d->d_ProcDecl.ed_ProcBody) != NULL) { ResolveAlignment(st, 0); } } break; default: dassert_decl(d, 0); } } static void resolveDynamicDeclAlign(Declaration *d, runesize_t *expalignp, int flags) { Declaration *scan; for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_MyGroup && (scan->d_MyGroup->sg_Flags & (SGF_RESOLVING | SGF_RESOLVED))) { resolveDeclAlign(scan, expalignp, flags); } } for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_SubBase) resolveDynamicDeclAlign(scan, expalignp, flags); } } static void resolveDeclStorage(Declaration *d, runesize_t base, runesize_t *limitp, runesize_t gbase, runesize_t *glimitp) { dassert(d->d_Flags & DF_ALIGNRESOLVE); if (d->d_Flags & DF_TMPRESOLVED) return; d->d_Flags |= DF_TMPRESOLVED; switch(d->d_Op) { case DOP_CLASS: /* recursion already dealt with */ break; case DOP_ARGS_STORAGE: case DOP_STACK_STORAGE: case DOP_GROUP_STORAGE: { Type *type = d->d_StorDecl.ed_Type; resolveStorageType(type, 0, base, limitp); if (d->d_StorDecl.ed_AssExp) { resolveStorageExp(d->d_StorDecl.ed_AssExp, base, limitp); } } break; case DOP_GLOBAL_STORAGE: { Type *type = d->d_StorDecl.ed_Type; resolveStorageType(type, 1, gbase, glimitp); if (d->d_StorDecl.ed_AssExp) { resolveStorageExp(d->d_StorDecl.ed_AssExp, gbase, glimitp); } } break; case DOP_ALIAS: /* * Never try to resolve storage considerations for an * alias's assignment in the declaration itself. The * run-time context depends on who and how many other * parts of the program reference the alias and the expression * tree will be duplicated for each. */ #if 0 if (d->d_ScopeFlags & SCOPE_GLOBAL) resolveStorageExp(d->d_AliasDecl.ed_AssExp, NULL, NULL); else resolveStorageExp(d->d_AliasDecl.ed_AssExp, NULL, NULL); #endif break; case DOP_TYPEDEF: /* XXX what about ty_AssExp ? should be in global space */ break; case DOP_IMPORT: /* recursion already dealt with */ break; case DOP_PROC: { Stmt *st; if ((st = d->d_ProcDecl.ed_ProcBody) != NULL) { ResolveStorage(st, 0); } } break; default: dassert_decl(d, 0); } #if 0 /* * Make this temporary for now so we can re-run it. */ d->d_Flags &= ~DF_TMPRESOLVED; #endif } static void resolveDynamicDeclStorage(Declaration *d, runesize_t base, runesize_t *limitp, runesize_t gbase, runesize_t *glimitp) { Declaration *scan; for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_MyGroup && (scan->d_MyGroup->sg_Flags & (SGF_RESOLVING | SGF_RESOLVED))) { resolveDeclStorage(scan, base, limitp, gbase, glimitp); } } for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_SubBase) { resolveDynamicDeclStorage(scan, base, limitp, gbase, glimitp); } } } /* * resolveStorageExpOnly() * * Resolve temporary storage for this exp structure, do not recurse * sub-expressions. Any type-temporary storage is tacked onto the * end of this expression's temporary area. * * We do not need to assign storage for expressions which return * lvalues, because they will simply return a pointer into * non-temporary storage. */ static void resolveStorageExpOnly(Exp *exp, runesize_t base, runesize_t *limitp) { Type *type; /* * Stop if the expression resolves to a type rather then a value, * e.g. when you do something like switch (typeof(int)) { ... } * Types are handled as thin pointers. */ exp->ex_Flags |= EXF_TMPRESOLVED; if (exp->ex_Flags & EXF_RET_TYPE) { exp->ex_TmpOffset = BASEALIGN(base, CPOINTER_ALIGN); SIZELIMIT(base, (runesize_t)sizeof(void *), limitp); } if (exp->ex_Decl) { Declaration *d; d = exp->ex_Decl; if (d->d_Flags & DF_RESOLVED) { resolveDeclStorage(d, base, limitp, base, limitp); } } /* * Assign temporary offset. This offset does not overlap temporary * space reserved for sub-expressions. * * We must have an assigned type. Expression sequences like: * 'module.blah' are collapsed into 'blah' long before we get * here, or they should be. We should not encounter any * TOK_TCMV_ID expression tokens. Structural id's (the right hand * side of X.Y) are resolved by their parent expression node and * no typing or temporary space is required. * * Expressions that return lvalues do not need temporary space. */ type = exp->ex_Type; if (type == NULL) { switch(exp->ex_Token) { case TOK_STRUCT_ID: case TOK_SEMGRP_ID: break; default: printf("EXP %p %04x %p\n", exp, exp->ex_Token, exp->ex_Decl); dassert_exp(exp, 0); break; } exp->ex_TmpOffset = -3; } else if (type->ty_SQFlags & SF_LVALUE) { /* * Expressive elements which return lvalues do not get * temporary space. Note that this also prevents lvalues * such as large arrays (int ary[999999999]) from reserving * unnecessary stack space. * * NOTE: SF_LVALUE is unrelated to SCOPE_LVALUE. SCOPE_LVALUE * applies to SemGroup storage (LValueStor). SF_LVALUE * merely flags the type for an expression as expecting * or not expecting an lvalue. */ #if 0 /* * XXX removeme, LValueStor only applies to semgroups */ runesize_t lvmask = sizeof(LValueStor) - 1; *offset = (*offset + lvmask) & ~lvmask; exp->ex_TmpOffset = *offset; *offset = *offset + (lvmask + 1); #endif exp->ex_TmpOffset = -2; } else { /* * Reserve temporary space for potential intermediate * results. * * Compound expressions may need extra space to default-init * the compound value, it is expected to be available to the * generator right after the nominal type in the TmpOffset. * XXX also make available to the interpreter? * * Procedure calls also may need extra space to default-init * the return value. XXX also make available to the * interpreter? */ base = BASEALIGN(base, type->ty_AlignMask); /* * It may be convenient to use a larger alignment for arrays, * which would allow (e.g.) %xmm registers to be used on * 64-bit arrays for moves. Limit to 16-byte alignment for * now. * * (See also resolveExpAlign()) */ if (type->ty_Op == TY_ARYOF || type->ty_Op == TY_COMPOUND || type->ty_Op == TY_ARGS) { if (type->ty_Bytes >= 16) { base = BASEALIGN(base, 15); } else if (type->ty_Bytes >= 8) { base = BASEALIGN(base, 7); } else if (type->ty_Bytes >= 4) { base = BASEALIGN(base, 3); } } /* * Temporary storage for this exp */ exp->ex_TmpOffset = base; SIZELIMIT(base, type->ty_Bytes, limitp); /* * A compound expression's type may need additional temporary * storage. NOTE: The type might not yet be changed to * TY_COMPOUND, but single-element compounds will use the * same temporary space as a non-compound. * * A procedure call may need additional temporary storage. * * (base was adjusted above and is exp->ex_TmpOffset) */ if (exp->ex_Token == TOK_COMPOUND) { /* * NOTE: type might not yet be changed to compound, * but single-element compound will use the * same temporary space. */ resolveStorageType(type, 0, base + type->ty_Bytes, limitp); } else if (exp->ex_Token == TOK_CALL) { resolveStorageType(type, 0, base + type->ty_TmpBytes, limitp); } } dassert(exp->ex_TmpOffset != -1); } /* * Calculate the overlapping temporary space for sub-expression trees. */ static void resolveStorageExpSub(Exp *exp, runesize_t base, runesize_t *limitp) { if (exp->ex_Type) resolveStorageType(exp->ex_Type, 0, base, limitp); #if 1 /* * Make sure resolved declarations have resolved temporary * storage for assigned expressions. XXX pure test */ if (exp->ex_Token == TOK_ID || exp->ex_Token == TOK_CLASSID) { Declaration *d; d = exp->ex_Decl; if (d && (d->d_Flags & DF_RESOLVED)) { resolveDeclStorage(d, base, limitp, base, limitp); } /* note: UNARY can be set for aliases */ } #endif /* * Calculate the overlapping temporary space for sub-trees. */ if (exp->ex_Flags & EXF_BINARY) { /* * Ensure lhs's NON-RECURSIVE temporary storage on-return * does not intefere with rhs's, or vise-versa. * * To do this offset the rhs storage by the non-recursive * lhs storage. */ resolveStorageExp(exp->ex_Lhs, base, limitp); if (exp->ex_Lhs->ex_TmpOffset >= 0) { resolveStorageExp(exp->ex_Rhs, exp->ex_Lhs->ex_TmpOffset + exp->ex_Lhs->ex_Type->ty_Bytes, limitp); } else { resolveStorageExp(exp->ex_Rhs, base, limitp); } #if 0 runesize_t xoffset; runesize_t roffset; roffset = *offset; xoffset = roffset; resolveStorageExp(exp->ex_Lhs, &xoffset); if (*offset < xoffset) *offset = xoffset; if (exp->ex_Lhs->ex_TmpOffset >= 0) { xoffset = exp->ex_Lhs->ex_TmpOffset + exp->ex_Lhs->ex_Type->ty_Bytes; } else { xoffset = roffset; } resolveStorageExp(exp->ex_Rhs, &xoffset); if (*offset < xoffset) *offset = xoffset; #endif } else if (exp->ex_Flags & EXF_UNARY) { resolveStorageExp(exp->ex_Lhs, base, limitp); dassert_exp(exp, exp->ex_Lhs->ex_Next == NULL); } else if (exp->ex_Flags & EXF_COMPOUND) { /* * each element will be copied into the compound storage * in turn, so we can union the temporary storage required * for each element. */ Exp *scan; for (scan = exp->ex_Lhs; scan; scan = scan->ex_Next) { dassert_exp(scan, scan->ex_Type != NULL); resolveStorageExp(scan, base, limitp); } } if (exp->ex_Token == TOK_CALL) { resolveDynamicProcedureStorage(exp, base, limitp, base, limitp); } else if (exp->ex_Token == TOK_INLINE_CALL) { Stmt *st = exp->ex_AuxStmt; SemGroup *sg = st->st_MyGroup; #if 0 runesize_t obytes = sg->sg_Parent->sg_Bytes; #endif /*dassert((exp->ex_Flags & EXF_DUPEXP) == 0);*/ dassert(exp->ex_Flags & EXF_BINARY); dassert((st->st_Flags & (STF_SEMTOP|STF_SEMANTIC)) == STF_SEMANTIC); #if 0 printf("%p Resolve inline storage %ld %s (%p)\n", exp, *offset, exp->ex_Lhs->ex_Decl->d_Id, st); printf("ST %p\n", st); #endif dassert((st->st_Flags & STF_TMPRESOLVED) == 0); dassert((sg->sg_Flags & SGF_TMPRESOLVED) == 0); sg->sg_TmpBytes = BASEALIGN(*limitp, sg->sg_TmpAlignMask); /* sg->sg_Bytes set automatically using parent */ dassert(sg->sg_Parent); ResolveStorage(st, 0); #if 0 printf("%p End resolve (%ld, %ld) (%ld, %ld) (%ld, %ld)\n", exp, *offset, sg->sg_TmpBytes, obytes, sg->sg_Bytes, sg->sg_BlkOffset, sg->sg_BlkBytes); #endif dassert(*limitp <= sg->sg_TmpBytes); *limitp = sg->sg_TmpBytes; /* sg->sg_Parent->sg_Bytes set automatically */ resolveDynamicProcedureStorage(exp, base, limitp, base, limitp); } } /* * [re]resolve temporary storage requirements. * * Currently we do not overlap exp's temporary space with that of the * sub-expression. * * WARNING! This may be called more than once if an expression requires * resolve-time interpretation to generate a constant. In this * ex_TmpOffset for the sub-chain may be regenerated from 0, * and then just the top-level (post-constant-resolved) * ex_TmpOffset will be restored by the caller. */ static void resolveStorageExp(Exp *exp, runesize_t base, runesize_t *limitp) { resolveStorageExpOnly(exp, base, limitp); if ((exp->ex_Flags & EXF_RET_TYPE) == 0) { if (exp->ex_TmpOffset >= 0) { resolveStorageExpSub(exp, exp->ex_TmpOffset + exp->ex_Type->ty_Bytes, limitp); } else { resolveStorageExpSub(exp, base, limitp); } } } static void resolveExpAlign(Exp *exp, runesize_t *expalignp, int flags) { Type *type; if (exp->ex_Flags & EXF_RET_TYPE) { if (*expalignp < CPOINTER_ALIGN) *expalignp = CPOINTER_ALIGN; return; } type = exp->ex_Type; if (type == NULL) { /* * Do nothing */ } else { if (type->ty_SQFlags & SF_LVALUE) { if (*expalignp < LVALUESTOR_ALIGN) *expalignp = LVALUESTOR_ALIGN; } else { if (*expalignp < type->ty_AlignMask) *expalignp = type->ty_AlignMask; } resolveTypeAlign(type, expalignp, flags); /* * It may be convenient to use a larger alignment for arrays, * which would allow (e.g.) %xmm registers to be used on * 64-bit arrays for moves. Limit to 16-byte alignment for * now. * * (See also resolveStorageExpOnly()) */ if (type->ty_Op == TY_ARYOF || type->ty_Op == TY_COMPOUND || type->ty_Op == TY_ARGS) { #if 0 if (type->ty_Bytes >= 64) { if (*expalignp < 63) *expalignp = 63; } else if (type->ty_Bytes >= 32) { if (*expalignp < 31) *expalignp = 31; } else #endif if (type->ty_Bytes >= 16) { if (*expalignp < 15) *expalignp = 15; } else if (type->ty_Bytes >= 8) { if (*expalignp < 7) *expalignp = 7; } else if (type->ty_Bytes >= 4) { if (*expalignp < 3) *expalignp = 3; } } } if (exp->ex_Decl) { Declaration *d; d = exp->ex_Decl; if (d->d_Flags & DF_RESOLVED) { resolveDeclAlign(d, expalignp, flags); } } #if 0 /* * This typically only occurs when the resolver needs to * evaluate a constant expression. The declaration is * typically not resolved at that time. */ if (exp->ex_Token == TOK_ID || exp->ex_Token == TOK_CLASSID) { Declaration *d; d = exp->ex_Decl; if (d && (d->d_Flags & DF_RESOLVED)) { resolveDeclAlign(d, expalignp, flags); } /* note: UNARY can be set for aliases */ } #endif /* * Recurse through for an inline call, then roll-up the alignment * requirement(s) for the target procedure. We handle the * 'arguments' and 'return value' alignment in EXF_BINARY below. */ if (exp->ex_Token == TOK_CALL) { resolveDynamicProcedureAlign(exp, expalignp, flags); } else if (exp->ex_Token == TOK_INLINE_CALL) { SemGroup *asg; ResolveAlignment(exp->ex_AuxStmt, flags); asg = exp->ex_AuxStmt->st_MyGroup; if (*expalignp < asg->sg_TmpAlignMask) *expalignp = asg->sg_TmpAlignMask; resolveDynamicProcedureAlign(exp, expalignp, flags); } /* * Nominal code */ if (exp->ex_Flags & EXF_BINARY) { resolveExpAlign(exp->ex_Lhs, expalignp, flags); resolveExpAlign(exp->ex_Rhs, expalignp, flags); } else if (exp->ex_Flags & EXF_UNARY) { resolveExpAlign(exp->ex_Lhs, expalignp, flags); } else if (exp->ex_Flags & EXF_COMPOUND) { Exp *scan; for (scan = exp->ex_Lhs; scan; scan = scan->ex_Next) { resolveExpAlign(scan, expalignp, flags); } } } /* * resolveStorageType() - temporary space required to initialize type defaults * * Figure out the temporary space required to initialize a type's * defaults. Note that the space will be figured independantly * for any SemGroup's. */ static void resolveTypeAlign(Type *type, runesize_t *expalignp, int flags) { SemGroup *sg = NULL; Type *subtype1 = NULL; Type *subtype2 = NULL; dassert(type->ty_Flags & TF_RESOLVED); if (flags & RESOLVE_CLEAN) { if ((type->ty_Flags & TF_ALIGNRESOLVED) == 0) return; type->ty_Flags &= ~(TF_ALIGNRESOLVED | TF_TMPRESOLVED); } else { if (type->ty_Flags & TF_ALIGNRESOLVED) { if (*expalignp < type->ty_TmpAlignMask) *expalignp = type->ty_TmpAlignMask; return; } type->ty_Flags |= TF_ALIGNRESOLVED; } switch(type->ty_Op) { case TY_CLASS: sg = type->ty_ClassType.et_SemGroup; break; case TY_ARYOF: subtype1 = type->ty_AryType.et_Type; break; case TY_COMPOUND: sg = type->ty_CompType.et_SemGroup; break; case TY_PROC: subtype1 = type->ty_ProcType.et_ArgsType; subtype2 = type->ty_ProcType.et_RetType; break; case TY_IMPORT: sg = type->ty_ImportType.et_SemGroup; break; case TY_ARGS: sg = type->ty_ArgsType.et_SemGroup; break; case TY_VAR: sg = type->ty_VarType.et_SemGroup; break; case TY_CPTRTO: /* has nothing to do with initializing the pointer */ /* subtype1 = type->ty_CPtrType.et_Type; */ break; case TY_PTRTO: /* has nothing to do with initializing the pointer */ /* subtype1 = type->ty_PtrType.et_Type; */ break; case TY_REFTO: /* has nothing to do with initializing the pointer */ /* subtype1 = type->ty_RefType.et_Type; */ break; case TY_STORAGE: case TY_DYNAMIC: /* * nothing to be done here. */ break; case TY_UNRESOLVED: /* should be no unresolved types at this stage */ default: dassert_type(type, 0); } if (subtype1) { resolveTypeAlign(subtype1, &subtype1->ty_TmpAlignMask, flags); if (subtype1->ty_AssExp) { resolveExpAlign(subtype1->ty_AssExp, &subtype1->ty_TmpAlignMask, flags); } if (type->ty_TmpAlignMask < subtype1->ty_TmpAlignMask) type->ty_TmpAlignMask = subtype1->ty_TmpAlignMask; } if (subtype2) { resolveTypeAlign(subtype2, &subtype2->ty_TmpAlignMask, flags); if (subtype2->ty_AssExp) { resolveExpAlign(subtype2->ty_AssExp, &subtype2->ty_TmpAlignMask, flags); } if (type->ty_TmpAlignMask < subtype2->ty_TmpAlignMask) type->ty_TmpAlignMask = subtype2->ty_TmpAlignMask; } if (type->ty_AssExp) { resolveExpAlign(type->ty_AssExp, &type->ty_TmpAlignMask, flags); } if (sg) { dassert(sg->sg_Flags & SGF_RESOLVED); /*ResolveSemGroup(sg, 0);*/ resolveSemGroupAlign(sg, flags); if (type->ty_TmpAlignMask < sg->sg_TmpAlignMask) type->ty_TmpAlignMask = sg->sg_TmpAlignMask; } if (*expalignp < type->ty_TmpAlignMask) *expalignp = type->ty_TmpAlignMask; } static void resolveStorageType(Type *type, int isglob, runesize_t base, runesize_t *limitp) { SemGroup *sg = NULL; Type *subtype1 = NULL; Type *subtype2 = NULL; dassert(type->ty_Flags & TF_ALIGNRESOLVED); if (type->ty_Flags & TF_TMPRESOLVED) { base = BASEALIGN(base, type->ty_TmpAlignMask); SIZELIMIT(base, type->ty_TmpBytes, limitp); return; } type->ty_Flags |= TF_TMPRESOLVED; switch(type->ty_Op) { case TY_CLASS: sg = type->ty_ClassType.et_SemGroup; break; case TY_ARYOF: subtype1 = type->ty_AryType.et_Type; break; case TY_COMPOUND: sg = type->ty_CompType.et_SemGroup; break; case TY_PROC: subtype1 = type->ty_ProcType.et_ArgsType; subtype2 = type->ty_ProcType.et_RetType; break; case TY_IMPORT: sg = type->ty_ImportType.et_SemGroup; break; case TY_ARGS: sg = type->ty_ArgsType.et_SemGroup; break; case TY_VAR: sg = type->ty_VarType.et_SemGroup; break; case TY_CPTRTO: /* has nothing to do with initializing the pointer */ /* subtype1 = type->ty_CPtrType.et_Type; */ break; case TY_PTRTO: /* has nothing to do with initializing the pointer */ /* subtype1 = type->ty_PtrType.et_Type; */ break; case TY_REFTO: /* has nothing to do with initializing the pointer */ /* subtype1 = type->ty_RefType.et_Type; */ break; case TY_STORAGE: case TY_DYNAMIC: /* * nothing to be done here. */ break; case TY_UNRESOLVED: /* should be no unresolved types at this stage */ default: dassert_type(type, 0); } if (subtype1) { resolveStorageType(subtype1, 0, 0, &subtype1->ty_TmpBytes); if (subtype1->ty_AssExp) { /* XXX base is 0? */ resolveStorageExp(subtype1->ty_AssExp, 0, &subtype1->ty_TmpBytes); } base = BASEALIGN(base, subtype1->ty_TmpAlignMask); SIZELIMIT(base, subtype1->ty_TmpBytes, limitp); if (type->ty_TmpAlignMask < subtype1->ty_TmpAlignMask) type->ty_TmpAlignMask = subtype1->ty_TmpAlignMask; } if (subtype2) { resolveStorageType(subtype2, 0, 0, &subtype2->ty_TmpBytes); if (subtype2->ty_AssExp) { /* XXX base is 0? */ resolveStorageExp(subtype2->ty_AssExp, 0, &subtype2->ty_TmpBytes); } base = BASEALIGN(base, subtype2->ty_TmpAlignMask); SIZELIMIT(base, subtype2->ty_TmpBytes, limitp); if (type->ty_TmpAlignMask < subtype2->ty_TmpAlignMask) type->ty_TmpAlignMask = subtype2->ty_TmpAlignMask; } if (type->ty_AssExp) { /* XXX base is 0? */ resolveStorageExp(type->ty_AssExp, 0, &type->ty_TmpBytes); } if (sg) { dassert(sg->sg_Flags & SGF_RESOLVED); resolveStorageSemGroup(sg, 0, NULL, 0, NULL); if (isglob) { /* XXX */ base = BASEALIGN(base, sg->sg_GlobalAlignMask); base = BASEALIGN(base, sg->sg_TmpAlignMask); SIZELIMIT(base, sg->sg_GlobalTmpBytes, limitp); } else { base = BASEALIGN(base, sg->sg_TmpAlignMask); SIZELIMIT(base, sg->sg_TmpBytes, limitp); } /* * Re-resolve the type flags. XXX mostly fixed once I handled * CBase/DBase/GBase in resolveSemGroup1(). */ if (sg->sg_Flags & SGF_HASASS) type->ty_Flags |= TF_HASASS; if (sg->sg_SRBase) type->ty_Flags |= TF_HASLVPTR; if (sg->sg_Flags & SGF_VARARGS) type->ty_Flags |= TF_HASLVPTR; /* XXX TF_VARARGS */ if (sg->sg_CBase) type->ty_Flags |= TF_HASCONSTRUCT; if (sg->sg_DBase) type->ty_Flags |= TF_HASDESTRUCT; } } /* * This is used to resolve temporary storage requirements for SemGroup's * related to classes and compound types. Temporary storage requirements * are calculated on a SemGroup-by-SemGroup basis and not aggregated into * any parent. * * In the final pass we also reverse the constructor and destructor lists * (sg_CBase and sg_DBase), and the pointer/lvalue list (SRBase). These * lists were originally constructed by prepending and are thus in the * wrong order. */ static void resolveSemGroupAlign(SemGroup *sg, int flags) { Declaration *d; /* * NOTE: SGF_RESOLVED might not be set, indicating that we were * able to pick-out individual declarations in (global) * SGs without having to resolve the whole group. This * allows unused declarations to be omitted by the code * generator. */ if (flags & RESOLVE_CLEAN) { if ((sg->sg_Flags & SGF_ALIGNRESOLVED) == 0) return; sg->sg_Flags &= ~(SGF_ALIGNRESOLVED | SGF_TMPRESOLVED); } else { if (sg->sg_Flags & SGF_ALIGNRESOLVED) return; sg->sg_Flags |= SGF_ALIGNRESOLVED; } RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { #if 0 if ((d->d_ScopeFlags & (SCOPE_CONSTRUCTOR | SCOPE_DESTRUCTOR))) { if ((sg->sg_Flags & SGF_RESOLVED) == 0 && (sg->sg_Type == SG_MODULE || sg->sg_Type == SG_CLASS)) { ResolveSemGroup(sg, 0); } } #endif if ((d->d_Flags & DF_RESOLVED) == 0) continue; resolveDeclAlign(d, &sg->sg_TmpAlignMask, flags); if (d->d_ScopeFlags & SCOPE_GLOBAL) { if (sg->sg_GlobalAlignMask < d->d_AlignMask) sg->sg_GlobalAlignMask = d->d_AlignMask; } else { if (sg->sg_AlignMask < d->d_AlignMask) sg->sg_AlignMask = d->d_AlignMask; } } } static void resolveStorageSemGroup(SemGroup *sg, runesize_t base, runesize_t *limitp, runesize_t gbase, runesize_t *glimitp) { Declaration *d; Declaration *d2; runesize_t dummy_limit = 0; runesize_t dummy_glimit = 0; if (limitp == NULL) { limitp = &dummy_limit; glimitp = &dummy_glimit; } #if 0 if ((sg->sg_Flags & SGF_RESOLVED) == 0) { ResolveSemGroup(sg, 0); } #endif dassert(sg->sg_Flags & SGF_ALIGNRESOLVED); if (sg->sg_Flags & SGF_TMPRESOLVED) return; sg->sg_Flags |= SGF_TMPRESOLVED; /* * Final pass */ RUNE_FOREACH(d, &sg->sg_DeclList, d_Node) { if (d->d_Flags & DF_RESOLVED) { resolveDeclStorage(d, base, limitp, gbase, glimitp); } } /* * Reverse order */ if ((d2 = sg->sg_CBase) != NULL) { sg->sg_CBase = NULL; while ((d = d2) != NULL) { d2 = d->d_CNext; d->d_CNext = sg->sg_CBase; sg->sg_CBase = d; } } if ((d2 = sg->sg_DBase) != NULL) { sg->sg_DBase = NULL; while ((d = d2) != NULL) { d2 = d->d_DNext; d->d_DNext = sg->sg_DBase; sg->sg_DBase = d; } } if ((d2 = sg->sg_GBase) != NULL) { sg->sg_GBase = NULL; while ((d = d2) != NULL) { d2 = d->d_GNext; d->d_GNext = sg->sg_GBase; sg->sg_GBase = d; } } if ((d2 = sg->sg_SRBase) != NULL) { sg->sg_SRBase = NULL; while ((d = d2) != NULL) { d2 = d->d_SRNext; d->d_SRNext = sg->sg_SRBase; sg->sg_SRBase = d; } } sg->sg_TmpBytes = *limitp; sg->sg_GlobalTmpBytes = *glimitp; } /* * Return non-zero if the method procedure wants the 'this' variable * to be an lvalue-pointer rather than an lvalue-target-object. In this * situation we do not want to indirect the pointer. */ static int methodProcThisIsPointer(Type *type) { Declaration *d; SemGroup *sg; int r; dassert(type->ty_Op == TY_PROC); dassert(type->ty_ProcType.et_ArgsType->ty_Op == TY_ARGS); sg = type->ty_ProcType.et_ArgsType->ty_CompType.et_SemGroup; d = RUNE_FIRST(&sg->sg_DeclList); dassert(d->d_Id == String_This); switch (d->d_StorDecl.ed_Type->ty_Op) { case TY_REFTO: r = 1; break; case TY_PTRTO: r = 1; break; case TY_CPTRTO: fprintf(stderr, "methodProcThisIsPointer(): method call through " "C pointer is illegal\n"); dassert_decl(d, 0); r = 1; break; default: r = 0; break; } return r; } #if 0 /* * Calculate SG dependencies */ #define SGDEP_HSIZE 1024 #define SGDEP_HMASK (SGDEP_HSIZE - 1) static SemGroup *SGCurrentDep; static SGDepend *SGDepHash[SGDEP_HSIZE]; static SGDepend ** resolveSGDependHash(SemGroup *src, SemGroup *dst) { intptr_t hv; hv = ((intptr_t)src >> 7) ^ ((intptr_t)dst >> 5); hv ^= hv >> 16; return (&SGDepHash[hv & SGDEP_HMASK]); } static SemGroup * resolvePushSGDepend(SemGroup *sg __unused) { SemGroup *last; SGDepend *dep; SGDepend **depp; depp = resolveSGDependHash(SGCurrentDep, sg); for (dep = *depp; dep; dep = dep->hnext) { if (dep->src == SGCurrentDep && dep->dst == sg) break; } if (dep == NULL) { dep = zalloc(sizeof(SGDepend)); dep->hnext = *depp; dep->src = SGCurrentDep; dep->dst = sg; *depp = dep; if (SGCurrentDep) { dep->next = SGCurrentDep->sg_DepFirst; SGCurrentDep->sg_DepFirst = dep; } } last = SGCurrentDep; SGCurrentDep = sg; return last; } static void resolvePopSGDepend(SemGroup *dep) { SGCurrentDep = dep; } #endif /* * If we are resolving to a dynamic method call we need to flag all matching * current subclass decls for (d) not yet resolved to ensure they get * resolved if their related class is used at all, since the dynamic method * call might be trying to call any of them. */ static void resolveDynamicDecl(Declaration *d); static void resolveDynamicProcedure(SemGroup *isg __unused, SemGroup *sg __unused, Exp *exp, int flags __unused) { Declaration *d; Type *type; Exp *lhs; lhs = exp->ex_Lhs; type = lhs->ex_Lhs->ex_Type; d = lhs->ex_Decl; if (lhs->ex_Token != TOK_STRIND || type->ty_Op != TY_REFTO) return; type = type->ty_RefType.et_Type; dassert_exp(exp, type->ty_Op == TY_CLASS); resolveDynamicDecl(d); } static void resolveDynamicProcedureAlign(Exp *exp, runesize_t *expalignp, int flags) { Declaration *d; Type *type; Exp *lhs; lhs = exp->ex_Lhs; type = lhs->ex_Lhs->ex_Type; d = lhs->ex_Decl; if (lhs->ex_Token != TOK_STRIND || type->ty_Op != TY_REFTO) return; type = type->ty_RefType.et_Type; dassert_exp(exp, type->ty_Op == TY_CLASS); resolveDynamicDeclAlign(d, expalignp, flags); } static void resolveDynamicProcedureStorage(Exp *exp, runesize_t base, runesize_t *limitp, runesize_t gbase, runesize_t *glimitp) { Declaration *d; Type *type; Exp *lhs; lhs = exp->ex_Lhs; type = lhs->ex_Lhs->ex_Type; d = lhs->ex_Decl; if (lhs->ex_Token != TOK_STRIND || type->ty_Op != TY_REFTO) return; type = type->ty_RefType.et_Type; dassert_exp(exp, type->ty_Op == TY_CLASS); resolveDynamicDeclStorage(d, base, limitp, gbase, glimitp); } static void resolveDynamicDecl(Declaration *d) { Declaration *scan; for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { scan->d_Flags |= DF_DYNAMICREF; if (scan->d_MyGroup && (scan->d_MyGroup->sg_Flags & (SGF_RESOLVING | SGF_RESOLVED))) { ResolveDecl(scan, 0); } } for (scan = d->d_SubBase; scan; scan = scan->d_SubNext) { if (scan->d_SubBase) resolveDynamicDecl(scan); } } /* * Handle everything required to inline a procedure. Small procedures * are automatically inlined unless 'noinline' is specified. 'inline' * must be specified to inline large procedures. We can only inline * when we know the exact procedure in question, so ref-based method calls * tend to prevent inlining. */ typedef struct xinline { struct xinline *prev; struct xinline *next; Declaration *d; } xinline_t; xinline_t XInlineTop; xinline_t *XInlineBot = &XInlineTop; static void resolveProcedureInline(SemGroup *isg __unused, SemGroup *sg __unused, Exp *exp, int flags __unused) { Declaration *d; Exp *lhs; Stmt *st __unused; xinline_t *xin; lhs = exp->ex_Lhs; d = lhs->ex_Decl; /* * Do not inline of internal, clang call, marked as noinline, or * threaded. Do not inline a function which will probably return * a constant (and be optimized into one directly, inlining will * slower things down in that situation). */ if (d->d_ScopeFlags & (SCOPE_INTERNAL | SCOPE_CLANG | SCOPE_NOINLINE)) return; if (d->d_ScopeFlags & (SCOPE_THREAD)) return; if (exp->ex_Flags & EXF_PROBCONST) return; /* * XXX optimize this if the reference type is known explicitly, * otherwise we can't inline since it requires a dynamic call. */ if (lhs->ex_Token == TOK_STRIND && lhs->ex_Lhs->ex_Type->ty_Op == TY_REFTO) return; /* * For now do not try to combine global data because each inline will * get its own instantiation, which is not what the programmer * expects. */ st = d->d_ProcDecl.ed_ProcBody; if (st == NULL) return; if (st->st_MyGroup->sg_GlobalBytes || st->st_MyGroup->sg_GlobalTmpBytes) return; /* * XXX we should be able to allow var-args inlines, why doesn't * this work? */ if (d->d_ProcDecl.ed_Type->ty_ProcType.et_ArgsType-> ty_CompType.et_SemGroup->sg_Flags & SGF_VARARGS) return; /* * Do not inline the same procedure recursively, or if we can * optimize the procedure call into a constant by interpreting * it once. */ if (d->d_Flags & DF_INLINING) return; if (exp->ex_Flags & EXF_CONST) return; /* * Do not inline if we do not know the precise procedure at * resolve-time. */ if (d->d_Op != DOP_PROC || lhs->ex_Type->ty_Op == TY_REFTO) return; xin = zalloc(sizeof(*xin)); xin->prev = XInlineBot; xin->d = d; XInlineBot->next = xin; XInlineBot = xin; /* * We inline the procedure by duplicating the procedure body * and changing the procedure call ex. Disallow recursive inlining. * * Set PARSE_TYPE on exLhs to retain exLhs->ex_Type across any * further duplication for the TOK_INLINE_CALL switch. */ d->d_Flags |= DF_INLINING; #if 1 dassert((exp->ex_Flags & EXF_DUPEXP) == 0); exp->ex_Lhs->ex_Flags |= EXF_PARSE_TYPE; st = d->d_ProcDecl.ed_ProcBody; if (st->st_MyGroup->sg_Complexity < RuneInlineComplexity) { SemGroup *altsg; if (DebugOpt) { xinline_t *xscan; printf("InlineTest: %5d", st->st_MyGroup->sg_Complexity); for (xscan = XInlineTop.next; xscan; xscan = xscan->next) { printf(".%s", xscan->d->d_Id); } printf("\n"); } altsg = st->st_MyGroup->sg_Parent; dassert(st->st_Flags & STF_SEMANTIC); st = DupStmt(st->st_MyGroup, NULL, st); st->st_ProcStmt.es_Decl = d; st->st_ProcStmt.es_Scope = d->d_Scope; st->st_Flags |= STF_INLINED_PROC; exp->ex_Token = TOK_INLINE_CALL; exp->ex_AuxStmt = st; /* * XXX sg_AltContext is actually what we want to have * priority for searches, not sg_Parent! */ ResolveStmt(d->d_ImportSemGroup, st, flags); st->st_MyGroup->sg_AltContext = altsg; st->st_MyGroup->sg_Flags |= SGF_ALTPRIORITY; /* * Link the inlined procedure's semantic context with our * own so stack storage is properly calculated. We must * clear STF_SEMTOP here or the alignment recursion will * restart at 0. */ dassert(st->st_Flags & STF_SEMTOP); dassert(st->st_Flags & STF_SEMANTIC); st->st_Flags &= ~STF_SEMTOP; st->st_MyGroup->sg_Parent = sg; /* ResolveExp(isg, sg, exp, exp->ex_Type, flags); */ } #endif d->d_Flags &= ~DF_INLINING; XInlineBot->next = NULL; XInlineBot = xin->prev; zfree(xin, sizeof(*xin)); }