From 215848eb9b96d3af170bda049ec37d9f78e9a06d Mon Sep 17 00:00:00 2001 From: Leon Sha Date: Mon, 27 Jul 2009 11:30:57 +0800 Subject: [PATCH 01/19] Bug 506201 - regexp.cpp fails to compile on opensolaris: goto bypasses variable init. r=gal --- js/src/jsregexp.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/js/src/jsregexp.cpp b/js/src/jsregexp.cpp index 91a41e49da76..960593e235a1 100644 --- a/js/src/jsregexp.cpp +++ b/js/src/jsregexp.cpp @@ -3166,10 +3166,6 @@ CompileRegExpToNative(JSContext* cx, JSRegExp* re, Fragment* fragment) goto out; } rv = rc.compile(cx); - static int fail = 0; // TODO: remove - if (!rv) - ++fail; - debug_only_printf(LC_TMRegexp, "## Fail? %d, Total %d\n", (int)!rv, fail); out: JS_ARENA_RELEASE(&cx->tempPool, mark); return rv; From 1d5548f39bd0f52f865c54f4803a2efcfc80e960 Mon Sep 17 00:00:00 2001 From: "wes@page.ca" Date: Mon, 27 Jul 2009 12:40:59 +0800 Subject: [PATCH 02/19] Bug 504594 - Addresses in nanojit-debug output are wrong on SPARC. r=graydon. --- js/src/nanojit/NativeSparc.h | 184 +++++++++++++++++------------------ 1 file changed, 92 insertions(+), 92 deletions(-) diff --git a/js/src/nanojit/NativeSparc.h b/js/src/nanojit/NativeSparc.h index 13e0b8dee323..7c7b43866b90 100644 --- a/js/src/nanojit/NativeSparc.h +++ b/js/src/nanojit/NativeSparc.h @@ -214,7 +214,7 @@ namespace nanojit int offset = (c->_address) - ((int)_nIns) + 4; \ int i = 0x40000000 | ((offset >> 2) & 0x3FFFFFFF); \ IMM32(i); \ - verbose_only(asm_output("call %s",(c->_name));) \ + asm_output("call %s",(c->_name)); \ } while (0) #define Format_2_1(rd, op2, imm22) do { \ @@ -292,548 +292,548 @@ namespace nanojit #define ADDCC(rs1, rs2, rd) \ do { \ - asm_output("addcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x10, rs1, 0, rs2); \ + asm_output("addcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define ADD(rs1, rs2, rd) \ do { \ - asm_output("add %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0, rs1, 0, rs2); \ + asm_output("add %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define AND(rs1, rs2, rd) \ do { \ - asm_output("and %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x1, rs1, 0, rs2); \ + asm_output("and %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define BA(a, dsp22) \ do { \ - asm_output("ba %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x8, 0x2, dsp22); \ + asm_output("ba %p", _nIns + dsp22 - 1); \ } while (0) #define BE(a, dsp22) \ do { \ - asm_output("be %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x1, 0x2, dsp22); \ + asm_output("be %p", _nIns + dsp22 - 1); \ } while (0) #define BG(a, dsp22) \ do { \ - asm_output("bg %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xA, 0x2, dsp22); \ + asm_output("bg %p", _nIns + dsp22 - 1); \ } while (0) #define BGU(a, dsp22) \ do { \ - asm_output("bgu %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xC, 0x2, dsp22); \ + asm_output("bgu %p", _nIns + dsp22 - 1); \ } while (0) #define BGE(a, dsp22) \ do { \ - asm_output("bge %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xB, 0x2, dsp22); \ + asm_output("bge %p", _nIns + dsp22 - 1); \ } while (0) #define BL(a, dsp22) \ do { \ - asm_output("bl %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x3, 0x2, dsp22); \ + asm_output("bl %p", _nIns + dsp22 - 1); \ } while (0) #define BLE(a, dsp22) \ do { \ - asm_output("ble %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x2, 0x2, dsp22); \ + asm_output("ble %p", _nIns + dsp22 - 1); \ } while (0) #define BLEU(a, dsp22) \ do { \ - asm_output("bleu %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x4, 0x2, dsp22); \ + asm_output("bleu %p", _nIns + dsp22 - 1); \ } while (0) #define BCC(a, dsp22) \ do { \ - asm_output("bcc %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xd, 0x2, dsp22); \ + asm_output("bcc %p", _nIns + dsp22 - 1); \ } while (0) #define BCS(a, dsp22) \ do { \ - asm_output("bcs %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x5, 0x2, dsp22); \ + asm_output("bcs %p", _nIns + dsp22 - 1); \ } while (0) #define BVC(a, dsp22) \ do { \ - asm_output("bvc %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xf, 0x2, dsp22); \ + asm_output("bvc %p", _nIns + dsp22 - 1); \ } while (0) #define BVS(a, dsp22) \ do { \ - asm_output("bvc %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x7, 0x2, dsp22); \ + asm_output("bvc %p", _nIns + dsp22 - 1); \ } while (0) #define BNE(a, dsp22) \ do { \ - asm_output("bne %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x9, 0x2, dsp22); \ + asm_output("bne %p", _nIns + dsp22 - 1); \ } while (0) #define FABSS(rs2, rd) \ do { \ - asm_output("fabs %s, %s", gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, 0, 0x9, rs2); \ + asm_output("fabs %s, %s", gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FADDD(rs1, rs2, rd) \ do { \ - asm_output("faddd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, rs1, 0x42, rs2); \ + asm_output("faddd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FBE(a, dsp22) \ do { \ - asm_output("fbe %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x9, 0x6, dsp22); \ + asm_output("fbe %p", _nIns + dsp22 - 1); \ } while(0) #define FBNE(a, dsp22) \ do { \ - asm_output("fbne %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x1, 0x6, dsp22); \ + asm_output("fbne %p", _nIns + dsp22 - 1); \ } while(0) #define FBUE(a, dsp22) \ do { \ - asm_output("fbue %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xA, 0x6, dsp22); \ + asm_output("fbue %p", _nIns + dsp22 - 1); \ } while(0) #define FBG(a, dsp22) \ do { \ - asm_output("fng %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x6, 0x6, dsp22); \ + asm_output("fng %p", _nIns + dsp22 - 1); \ } while(0) #define FBUG(a, dsp22) \ do { \ - asm_output("fbug %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x5, 0x6, dsp22); \ + asm_output("fbug %p", _nIns + dsp22 - 1); \ } while(0) #define FBGE(a, dsp22) \ do { \ - asm_output("fbge %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xB, 0x6, dsp22); \ + asm_output("fbge %p", _nIns + dsp22 - 1); \ } while(0) #define FBUGE(a, dsp22) \ do { \ - asm_output("fbuge %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xC, 0x6, dsp22); \ + asm_output("fbuge %p", _nIns + dsp22 - 1); \ } while(0) #define FBL(a, dsp22) \ do { \ - asm_output("fbl %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0x4, 0x6, dsp22); \ + asm_output("fbl %p", _nIns + dsp22 - 1); \ } while(0) #define FBLE(a, dsp22) \ do { \ - asm_output("fble %p", _nIns + dsp22 - 1); \ Format_2_2(a, 0xD, 0x6, dsp22); \ + asm_output("fble %p", _nIns + dsp22 - 1); \ } while(0) #define FCMPD(rs1, rs2) \ do { \ - asm_output("fcmpd %s, %s", gpn(rs1+32), gpn(rs2+32)); \ Format_3_9(2, 0, 0, 0x35, rs1, 0x52, rs2); \ + asm_output("fcmpd %s, %s", gpn(rs1+32), gpn(rs2+32)); \ } while (0) #define FSUBD(rs1, rs2, rd) \ do { \ - asm_output("fsubd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, rs1, 0x46, rs2); \ + asm_output("fsubd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FMULD(rs1, rs2, rd) \ do { \ - asm_output("fmuld %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, rs1, 0x4a, rs2); \ + asm_output("fmuld %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FDIVD(rs1, rs2, rd) \ do { \ - asm_output("fdivd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, rs1, 0x4e, rs2); \ + asm_output("fdivd %s, %s, %s", gpn(rs1+32), gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FMOVD(rs2, rd) \ do { \ - asm_output("fmovd %s, %s", gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, 0, 0x2, rs2); \ + asm_output("fmovd %s, %s", gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FNEGD(rs2, rd) \ do { \ - asm_output("fnegd %s, %s", gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, 0, 0x6, rs2); \ + asm_output("fnegd %s, %s", gpn(rs2+32), gpn(rd+32)); \ } while (0) #define FITOD(rs2, rd) \ do { \ - asm_output("fitod %s, %s", gpn(rs2+32), gpn(rd+32)); \ Format_3_8(2, rd, 0x34, 0, 0xc8, rs2); \ + asm_output("fitod %s, %s", gpn(rs2+32), gpn(rd+32)); \ } while (0) #define JMPL(rs1, rs2, rd) \ do { \ - asm_output("jmpl [%s + %s]", gpn(rs1), gpn(rs2)); \ Format_3_1(2, rd, 0x38, rs1, 0, rs2); \ + asm_output("jmpl [%s + %s]", gpn(rs1), gpn(rs2)); \ } while (0) #define JMPLI(rs1, simm13, rd) \ do { \ - asm_output("jmpl [%s + %d]", gpn(rs1), simm13); \ Format_3_1I(2, rd, 0x38, rs1, simm13); \ + asm_output("jmpl [%s + %d]", gpn(rs1), simm13); \ } while (0) #define LDF(rs1, rs2, rd) \ do { \ - asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd+32)); \ Format_3_1(3, rd, 0x20, rs1, 0, rs2); \ + asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd+32)); \ } while (0) #define LDFI(rs1, simm13, rd) \ do { \ - asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd+32)); \ Format_3_1I(3, rd, 0x20, rs1, simm13); \ + asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd+32)); \ } while (0) #define LDUB(rs1, rs2, rd) \ do { \ - asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(3, rd, 0x1, rs1, 0, rs2); \ + asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define LDUBI(rs1, simm13, rd) \ do { \ - asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(3, rd, 0x1, rs1, simm13); \ + asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define LDUH(rs1, rs2, rd) \ do { \ - asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(3, rd, 0x2, rs1, 0, rs2); \ + asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define LDUHI(rs1, simm13, rd) \ do { \ - asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(3, rd, 0x2, rs1, simm13); \ + asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define LDSW(rs1, rs2, rd) \ do { \ - asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(3, rd, 0x8, rs1, 0, rs2); \ + asm_output("ld [%s + %s], %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define LDSWI(rs1, simm13, rd) \ do { \ - asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(3, rd, 0x8, rs1, simm13); \ + asm_output("ld [%s + %d], %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define MOVE(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 1, cc1, cc0, rs); \ + asm_output("move %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVNE(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movne %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 9, cc1, cc0, rs); \ + asm_output("movne %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVL(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movl %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 3, cc1, cc0, rs); \ + asm_output("movl %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVLE(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movle %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 2, cc1, cc0, rs); \ + asm_output("movle %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVG(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movg %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xa, cc1, cc0, rs); \ + asm_output("movg %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVGE(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movge %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xb, cc1, cc0, rs); \ + asm_output("movge %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVCS(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movcs %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 5, cc1, cc0, rs); \ + asm_output("movcs %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVLEU(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movleu %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 4, cc1, cc0, rs); \ + asm_output("movleu %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVGU(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movgu %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xc, cc1, cc0, rs); \ + asm_output("movgu %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVCC(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movcc %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xd, cc1, cc0, rs); \ + asm_output("movcc %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVVC(rs, cc2, cc1, cc0, rd) \ do { \ - asm_output("movvc %s, %s", gpn(rs), gpn(rd)); \ Format_4_2(rd, 0x2c, cc2, 0xf, cc1, cc0, rs); \ + asm_output("movvc %s, %s", gpn(rs), gpn(rd)); \ } while (0) #define MOVEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 1, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 9, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVNEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 9, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVLI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 3, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFLI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("move %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 4, cc1, cc0, simm11); \ + asm_output("move %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVLEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movle %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 2, cc1, cc0, simm11); \ + asm_output("movle %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFLEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movle %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xd, cc1, cc0, simm11); \ + asm_output("movle %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVGI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movg %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xa, cc1, cc0, simm11); \ + asm_output("movg %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFGI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movg %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 6, cc1, cc0, simm11); \ + asm_output("movg %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVGEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movge %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xb, cc1, cc0, simm11); \ + asm_output("movge %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVFGEI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movge %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xb, cc1, cc0, simm11); \ + asm_output("movge %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVLEUI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movleu %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 4, cc1, cc0, simm11); \ + asm_output("movleu %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVGUI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movgu %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xc, cc1, cc0, simm11); \ + asm_output("movgu %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVCCI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movcc %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 0xd, cc1, cc0, simm11); \ + asm_output("movcc %d, %s", simm11, gpn(rd)); \ } while (0) #define MOVVSI(simm11, cc2, cc1, cc0, rd) \ do { \ - asm_output("movvs %d, %s", simm11, gpn(rd)); \ Format_4_2I(rd, 0x2c, cc2, 7, cc1, cc0, simm11); \ + asm_output("movvs %d, %s", simm11, gpn(rd)); \ } while (0) #define MULX(rs1, rs2, rd) \ do { \ - asm_output("mul %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x9, rs1, 0, rs2); \ + asm_output("mul %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define NOP() \ do { \ - asm_output("nop"); \ Format_2_1(0, 0x4, 0); \ + asm_output("nop"); \ } while (0) #define ORI(rs1, simm13, rd) \ do { \ - asm_output("or %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(2, rd, 0x2, rs1, simm13); \ + asm_output("or %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define OR(rs1, rs2, rd) \ do { \ - asm_output("or %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x2, rs1, 0, rs2); \ + asm_output("or %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define ORN(rs1, rs2, rd) \ do { \ - asm_output("orn %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x6, rs1, 0, rs2); \ + asm_output("orn %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define ANDCC(rs1, rs2, rd) \ do { \ - asm_output("andcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x11, rs1, 0, rs2); \ + asm_output("andcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define RESTORE(rs1, rs2, rd) \ do { \ - asm_output("restore"); \ Format_3_1(2, rd, 0x3D, rs1, 0, rs2); \ + asm_output("restore"); \ } while (0) #define SAVEI(rs1, simm13, rd) \ do { \ - asm_output("save %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(2, rd, 0x3C, rs1, simm13); \ + asm_output("save %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define SAVE(rs1, rs2, rd) \ do { \ - asm_output("save %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x3C, rs1, 0, rs2); \ + asm_output("save %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SETHI(imm22, rd) \ do { \ - asm_output("sethi %p, %s", imm22, gpn(rd)); \ Format_2_1(rd, 0x4, (imm22 >> 10)); \ + asm_output("sethi %p, %s", imm22, gpn(rd)); \ } while (0) #define SLL(rs1, rs2, rd) \ do { \ - asm_output("sll %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_5(2, rd, 0x25, rs1, 0, rs2); \ + asm_output("sll %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SRA(rs1, rs2, rd) \ do { \ - asm_output("sra %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_5(2, rd, 0x27, rs1, 0, rs2); \ + asm_output("sra %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SRL(rs1, rs2, rd) \ do { \ - asm_output("srl %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_5(2, rd, 0x26, rs1, 0, rs2); \ + asm_output("srl %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define STF(rd, rs1, rs2) \ do { \ - asm_output("st %s, [%s + %s]", gpn(rd+32), gpn(rs1), gpn(rs2)); \ Format_3_1(3, rd, 0x24, rs1, 0, rs2); \ + asm_output("st %s, [%s + %s]", gpn(rd+32), gpn(rs1), gpn(rs2)); \ } while (0) #define STFI(rd, simm13, rs1) \ do { \ - asm_output("st %s, [%s + %d]", gpn(rd+32), gpn(rs1), simm13); \ Format_3_1I(3, rd, 0x24, rs1, simm13); \ + asm_output("st %s, [%s + %d]", gpn(rd+32), gpn(rs1), simm13); \ } while (0) #define STW(rd, rs2, rs1) \ do { \ - asm_output("st %s, [%s + %s]", gpn(rd), gpn(rs1), gpn(rs2)); \ Format_3_1(3, rd, 0x4, rs1, 0, rs2); \ + asm_output("st %s, [%s + %s]", gpn(rd), gpn(rs1), gpn(rs2)); \ } while (0) #define STWI(rd, simm13, rs1) \ do { \ - asm_output("st %s, [%s + %d]", gpn(rd), gpn(rs1), simm13); \ Format_3_1I(3, rd, 0x4, rs1, simm13); \ + asm_output("st %s, [%s + %d]", gpn(rd), gpn(rs1), simm13); \ } while (0) #define SUBCC(rs1, rs2, rd) \ do { \ - asm_output("subcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x14, rs1, 0, rs2); \ + asm_output("subcc %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SUB(rs1, rs2, rd) \ do { \ - asm_output("sub %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x4, rs1, 0, rs2); \ + asm_output("sub %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) #define SUBI(rs1, simm13, rd) \ do { \ - asm_output("sub %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ Format_3_1I(2, rd, 0x4, rs1, simm13); \ + asm_output("sub %s, %d, %s", gpn(rs1), simm13, gpn(rd)); \ } while (0) #define XOR(rs1, rs2, rd) \ do { \ - asm_output("xor %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ Format_3_1(2, rd, 0x3, rs1, 0, rs2); \ + asm_output("xor %s, %s, %s", gpn(rs1), gpn(rs2), gpn(rd)); \ } while (0) // Returns true if imm below 13-bit unsigned immediate) From 1f1e1452e4a60c55cef0bca7f558b4a2488f6080 Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Fri, 17 Jul 2009 00:48:39 -0500 Subject: [PATCH 03/19] Bug 505249 - lirasm - Style cleanup and new tokenizer. r=graydon. --- js/src/lirasm/lirasm.cpp | 1067 ++++++++++++++++++++------------------ 1 file changed, 575 insertions(+), 492 deletions(-) diff --git a/js/src/lirasm/lirasm.cpp b/js/src/lirasm/lirasm.cpp index 060460898f68..f3c22fc77ed5 100644 --- a/js/src/lirasm/lirasm.cpp +++ b/js/src/lirasm/lirasm.cpp @@ -44,7 +44,6 @@ #include #include #include -#include #ifdef AVMPLUS_UNIX #include @@ -54,6 +53,8 @@ #include #include +#include +#include #include "nanojit/nanojit.h" #include "jstracer.h" @@ -106,6 +107,88 @@ const int PTRRET = #endif ; +enum LirTokenType { + NAME, NUMBER, PUNCT, NEWLINE +}; + +struct LirToken { + LirTokenType type; + string data; + int lineno; +}; + +inline bool +startsWith(const string &s, const string &prefix) +{ + return s.size() >= prefix.size() && s.compare(0, prefix.length(), prefix) == 0; +} + +// LIR files must be ASCII, for simplicity. +class LirTokenStream { +public: + LirTokenStream(istream &in) : mIn(in), mLineno(0) {} + + bool get(LirToken &token) { + if (mLine.empty()) { + if (!getline(mIn, mLine)) + return false; + mLine += '\n'; + mLineno++; + } + mLine.erase(0, mLine.find_first_not_of(" \t\v\r")); + char c = mLine[0]; + size_t e = mLine.find_first_not_of("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$.+-"); + if (startsWith(mLine, "->")) { + mLine.erase(0, 2); + token.type = PUNCT; + token.data = "->"; + } else if (e > 0) { + string s = mLine.substr(0, e); + mLine.erase(0, e); + if (e > 1 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')) + token.type = NUMBER; + else if (isdigit(s[0]) || (e > 1 && s[0] == '.' && isdigit(s[1]))) + token.type = NUMBER; + else + token.type = NAME; + token.data = s; + } else if (strchr(":,=[]()", c)) { + token.type = PUNCT; + token.data = c; + mLine.erase(0, 1); + } else if (c == ';' || c == '\n') { + token.type = NEWLINE; + token.data.clear(); + mLine.clear(); + } else { + cerr << mLineno << ": error: Unrecognized character in file." << endl; + return false; + } + + token.lineno = mLineno; + return true; + } + + bool eat(LirTokenType type, const char *exact = NULL) { + LirToken token; + return (get(token) && token.type == type && (exact == NULL || token.data == exact)); + } + + bool getName(string &name) { + LirToken t; + if (get(t) && t.type == NAME) { + name = t.data; + return true; + } + return false; + } + +private: + istream &mIn; + string mLine; + int mLineno; +}; + class LirasmFragment { public: union { @@ -122,23 +205,46 @@ typedef map Fragments; class Lirasm { public: + Lirasm(bool verbose); + ~Lirasm(); + + void assemble(istream &in); + void lookupFunction(const string &name, CallInfo *&ci); + Fragmento *mFragmento; LirBuffer *mLirbuf; LogControl mLogc; bool mVerbose; - avmplus::AvmCore s_core; Fragments mFragments; - vector mCallInfos; + map > mOpMap; - Lirasm(bool verbose); - ~Lirasm(); + void bad(const string &msg) { + cerr << "error: " << msg << endl; + exit(1); + } + +private: + void handlePatch(LirTokenStream &in); + + avmplus::AvmCore mCore; }; -class LirasmAssembler { +class FragmentAssembler { +public: + FragmentAssembler(Lirasm &parent, const string &fragmentName); + ~FragmentAssembler(); + + void assembleFragment(LirTokenStream &in, bool implied, const LirToken *firstToken); + private: + // Prohibit copying. + FragmentAssembler(const FragmentAssembler &); + FragmentAssembler & operator=(const FragmentAssembler &); + Lirasm *mParent; + const string mFragName; Fragment *mFragment; - string mFragName; + vector mCallInfos; map mLabels; LirWriter *mLir; LirBufWriter *mBufWriter; @@ -146,37 +252,26 @@ private: LirWriter *mExprFilter; LirWriter *mVerboseWriter; multimap mFwdJumps; - map > op_map; size_t mLineno; LOpcode mOpcode; size_t mOpcount; - bool mInFrag; - char mReturnTypeBits; vector mTokens; - void lookupFunction(const char*, CallInfo *&); + void tokenizeLine(LirTokenStream &in, LirToken &token); void need(size_t); - istream& read_and_tokenize_line(istream&); - void tokenize(string const &tok_sep); - LIns *ref(string const &); + LIns *ref(const string &); LIns *do_skip(size_t); - LIns *assemble_call(string &); + LIns *assemble_call(const string &); LIns *assemble_general(); LIns *assemble_guard(); LIns *assemble_jump(); LIns *assemble_load(); - void bad(string const &msg); - void beginFragment(); + void bad(const string &msg); + void extract_any_label(string &lab, char lab_delim); void endFragment(); - void extract_any_label(string &op, string &lab, char lab_delim); - void patch(); - -public: - LirasmAssembler(Lirasm &); - void assemble(istream &); }; Function functions[] = { @@ -186,66 +281,6 @@ Function functions[] = { FN(free, I32 | (PTRARG<<2)) }; -void -LirasmAssembler::lookupFunction(const char *name, CallInfo *&ci) -{ - const size_t nfuns = sizeof(functions) / sizeof(functions[0]); - for (size_t i = 0; i < nfuns; i++) - if (strcmp(name, functions[i].name) == 0) { - *ci = functions[i].callInfo; - return; - } - - Fragments::const_iterator func = mParent->mFragments.find(name); - if (func != mParent->mFragments.end()) { - if (func->second.mReturnType == RT_FLOAT) { - CallInfo target = {(uintptr_t) func->second.rfloat, ARGSIZE_F, 0, - 0, nanojit::ABI_FASTCALL, func->first.c_str()}; - *ci = target; - - } else { - CallInfo target = {(uintptr_t) func->second.rint, ARGSIZE_LO, 0, - 0, nanojit::ABI_FASTCALL, func->first.c_str()}; - *ci = target; - } - } else { - ci = NULL; - } -} - -istream & -LirasmAssembler::read_and_tokenize_line(istream &in) -{ - char buf[1024]; - string tok_sep(" \n\t"); - - mTokens.clear(); - - if (in.getline(buf,sizeof(buf))) { - ++mLineno; - string line(buf); - - size_t comment = line.find("//"); - if (comment != string::npos) - line.resize(comment); - - line += '\n'; - - size_t start = 0; - size_t end = 0; - while((start = line.find_first_not_of(tok_sep, end)) != string::npos && - (end = line.find_first_of(tok_sep, start)) != string::npos) { - string ss = line.substr(start, (end-start)); - if (ss == "=") { - mTokens[mTokens.size()-1] += ss; - continue; - } - mTokens.push_back(ss); - } - } - return in; -} - template out lexical_cast(in arg) { @@ -258,18 +293,19 @@ lexical_cast(in arg) } int32_t -imm(string const &s) +imm(const string &s) { stringstream tmp(s); int32_t ret; if ((s.find("0x") == 0 || s.find("0X") == 0) && - (tmp >> hex >> ret && tmp.eof())) + (tmp >> hex >> ret && tmp.eof())) { return ret; + } return lexical_cast(s); } uint64_t -quad(string const &s) +quad(const string &s) { stringstream tmp1(s), tmp2(s); union { @@ -292,223 +328,9 @@ pop_front(vector &vec) cerr << "pop_front of empty vector" << endl; exit(1); } - t tmp = vec[0]; - vec.erase(vec.begin()); - return tmp; -} - -void -LirasmAssembler::bad(string const &msg) -{ - cerr << "instruction " << mLineno << ": " << msg << endl; - exit(1); -} - -void -LirasmAssembler::need(size_t n) -{ - if (mTokens.size() != n) - bad("need " + lexical_cast(n) - + " tokens, have " + lexical_cast(mTokens.size())); -} - -LIns* -LirasmAssembler::ref(string const &lab) -{ - if (mLabels.find(lab) == mLabels.end()) - bad("unknown label '" + lab + "'"); - return mLabels.find(lab)->second; -} - -LIns* -LirasmAssembler::do_skip(size_t i) -{ - LIns *s = mLir->insSkip(i); - memset(s->payload(), 0xba, i); - return s; -} - -LIns* -LirasmAssembler::assemble_jump() -{ - LIns *target = NULL; - LIns *condition = NULL; - - if (mOpcode == LIR_j) { - need(1); - } else { - need(2); - string cond = pop_front(mTokens); - condition = ref(cond); - } - string name = pop_front(mTokens); - if (mLabels.find(name) != mLabels.end()) { - target = ref(name); - return mLir->insBranch(mOpcode, condition, target); - } else { - LIns *ins = mLir->insBranch(mOpcode, condition, target); - mFwdJumps.insert(make_pair(name, ins)); - return ins; - } -} - -LIns* -LirasmAssembler::assemble_load() -{ - // Support implicit immediate-as-second-operand modes - // since, unlike sti/stqi, no immediate-displacement - // load opcodes were defined in LIR. - need(2); - if (mTokens[1].find("0x") == 0 || - mTokens[1].find("0x") == 0 || - mTokens[1].find_first_of("0123456789") == 0) { - return mLir->insLoad(mOpcode, - ref(mTokens[0]), - imm(mTokens[1])); - } - bad("immediate offset required for load"); - return NULL; // not reached -} - -LIns* -LirasmAssembler::assemble_call(string &op) -{ - CallInfo *ci = new CallInfo(); - mParent->mCallInfos.push_back(ci); - LIns* args[MAXARGS]; - - // Assembler syntax for a call: - // - // call 0x1234 fastcall a b c - // - // requires at least 2 args, - // fn address immediate and ABI token. - - if (mTokens.size() < 2) - bad("need at least address and ABI code for " + op); - - string func = pop_front(mTokens); - string abi = pop_front(mTokens); - - AbiKind _abi; - if (abi == "fastcall") - _abi = ABI_FASTCALL; - else if (abi == "stdcall") - _abi = ABI_STDCALL; - else if (abi == "thiscall") - _abi = ABI_THISCALL; - else if (abi == "cdecl") - _abi = ABI_CDECL; - else - bad("call abi name '" + abi + "'"); - ci->_abi = _abi; - - if (mTokens.size() > MAXARGS) - bad("too many args to " + op); - - if (func.find("0x") == 0) { - ci->_address = imm(func); - - ci->_cse = 0; - ci->_fold = 0; - -#ifdef DEBUG - ci->_name = "fn"; -#endif - - } else { - lookupFunction(func.c_str(), ci); - if (ci == NULL) - bad("invalid function reference " + func); - if (_abi != ci->_abi) - bad("invalid calling convention for " + func); - } - - ci->_argtypes = 0; - - for (size_t i = 0; i < mTokens.size(); ++i) { - args[i] = ref(mTokens[mTokens.size() - (i+1)]); - ci->_argtypes |= args[i]->isQuad() ? ARGSIZE_F : ARGSIZE_LO; - ci->_argtypes <<= 2; - } - - // Select return type from opcode. - // FIXME: callh needs special treatment currently - // missing from here. - if (mOpcode == LIR_call) - ci->_argtypes |= ARGSIZE_LO; - else - ci->_argtypes |= ARGSIZE_F; - - return mLir->insCall(ci, args); -} - -LIns* -LirasmAssembler::assemble_guard() -{ - LIns *exitIns = do_skip(sizeof(LasmSideExit)); - LasmSideExit* exit = (LasmSideExit*) exitIns->payload(); - memset(exit, 0, sizeof(LasmSideExit)); - exit->from = mFragment; - exit->target = NULL; - exit->line = mLineno; - - LIns *guardRec = do_skip(sizeof(GuardRecord)); - GuardRecord *rec = (GuardRecord*) guardRec->payload(); - memset(rec, 0, sizeof(GuardRecord)); - rec->exit = exit; - exit->addGuard(rec); - - need(mOpcount); - - if (mOpcode != LIR_loop) - mReturnTypeBits |= RT_GUARD; - - LIns *ins_cond; - if (mOpcode == LIR_xt || mOpcode == LIR_xf) - ins_cond = ref(pop_front(mTokens)); - else - ins_cond = NULL; - - if (!mTokens.empty()) - bad("too many arguments"); - - return mLir->insGuard(mOpcode, ins_cond, guardRec); -} - -LIns* -LirasmAssembler::assemble_general() -{ - if (mOpcount == 0) { - // 0-ary ops may, or may not, have an immediate - // thing wedged in them; depends on the op. We - // are lax and set it if it's provided. - LIns *ins = mLir->ins0(mOpcode); - if (mTokens.size() > 0) { - assert(mTokens.size() == 1); - ins->initLInsI(mOpcode, imm(mTokens[0])); - } - return ins; - } else { - need(mOpcount); - if (mOpcount == 1) { - if (mOpcode == LIR_ret) - mReturnTypeBits |= RT_INT32; - if (mOpcode == LIR_fret) - mReturnTypeBits |= RT_FLOAT; - - return mLir->ins1(mOpcode, - ref(mTokens[0])); - } else if (mOpcount == 2) { - return mLir->ins2(mOpcode, - ref(mTokens[0]), - ref(mTokens[1])); - } else { - bad("too many operands"); - } - } - // Never get here. - return NULL; + t tmp = vec[0]; + vec.erase(vec.begin()); + return tmp; } void @@ -575,26 +397,8 @@ dump_srecords(ostream &out, Fragment *frag) } } -void -LirasmAssembler::extract_any_label(string &op, - string &lab, - char lab_delim) -{ - if (op.size() > 1 && - op[op.size()-1] == lab_delim && - !mTokens.empty()) { - - lab = op; - op = pop_front(mTokens); - lab.erase(lab.size()-1); - - if (mLabels.find(lab) != mLabels.end()) - bad("duplicate label"); - } -} - -void -LirasmAssembler::beginFragment() +FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName) + : mParent(&parent), mFragName(fragmentName) { mFragment = new (&gc) Fragment(NULL); mFragment->lirbuf = mParent->mLirbuf; @@ -615,24 +419,249 @@ LirasmAssembler::beginFragment() } #endif - mInFrag = true; mReturnTypeBits = 0; mLir->ins0(LIR_start); + + mLineno = 0; +} + +FragmentAssembler::~FragmentAssembler() +{ + delete mVerboseWriter; + delete mExprFilter; + delete mCseFilter; + delete mBufWriter; + + for (size_t i = 0; i < mCallInfos.size(); ++i) + delete mCallInfos[i]; } void -LirasmAssembler::endFragment() +FragmentAssembler::bad(const string &msg) { - mInFrag = false; + cerr << "instruction " << mLineno << ": " << msg << endl; + exit(1); +} - if (mReturnTypeBits == 0) +void +FragmentAssembler::need(size_t n) +{ + if (mTokens.size() != n) { + bad("need " + lexical_cast(n) + + " tokens, have " + lexical_cast(mTokens.size())); + } +} + +LIns * +FragmentAssembler::ref(const string &lab) +{ + if (mLabels.find(lab) == mLabels.end()) + bad("unknown label '" + lab + "'"); + return mLabels.find(lab)->second; +} + +LIns * +FragmentAssembler::do_skip(size_t i) +{ + LIns *s = mLir->insSkip(i); + memset(s->payload(), 0xba, i); + return s; +} + +LIns * +FragmentAssembler::assemble_jump() +{ + LIns *target = NULL; + LIns *condition = NULL; + + if (mOpcode == LIR_j) { + need(1); + } else { + need(2); + string cond = pop_front(mTokens); + condition = ref(cond); + } + string name = pop_front(mTokens); + if (mLabels.find(name) != mLabels.end()) { + target = ref(name); + return mLir->insBranch(mOpcode, condition, target); + } else { + LIns *ins = mLir->insBranch(mOpcode, condition, target); + mFwdJumps.insert(make_pair(name, ins)); + return ins; + } +} + +LIns * +FragmentAssembler::assemble_load() +{ + // Support implicit immediate-as-second-operand modes + // since, unlike sti/stqi, no immediate-displacement + // load opcodes were defined in LIR. + need(2); + if (mTokens[1].find("0x") == 0 || + mTokens[1].find("0x") == 0 || + mTokens[1].find_first_of("0123456789") == 0) { + return mLir->insLoad(mOpcode, + ref(mTokens[0]), + imm(mTokens[1])); + } + bad("immediate offset required for load"); + return NULL; // not reached +} + +LIns * +FragmentAssembler::assemble_call(const string &op) +{ + CallInfo *ci = new CallInfo(); + mCallInfos.push_back(ci); + LIns *args[MAXARGS]; + + // Assembler syntax for a call: + // + // call 0x1234 fastcall a b c + // + // requires at least 2 args, + // fn address immediate and ABI token. + + if (mTokens.size() < 2) + bad("need at least address and ABI code for " + op); + + string func = pop_front(mTokens); + string abi = pop_front(mTokens); + + AbiKind _abi; + if (abi == "fastcall") + _abi = ABI_FASTCALL; + else if (abi == "stdcall") + _abi = ABI_STDCALL; + else if (abi == "thiscall") + _abi = ABI_THISCALL; + else if (abi == "cdecl") + _abi = ABI_CDECL; + else + bad("call abi name '" + abi + "'"); + ci->_abi = _abi; + + if (mTokens.size() > MAXARGS) + bad("too many args to " + op); + + if (func.find("0x") == 0) { + ci->_address = imm(func); + + ci->_cse = 0; + ci->_fold = 0; + +#ifdef DEBUG + ci->_name = "fn"; +#endif + } else { + mParent->lookupFunction(func, ci); + if (ci == NULL) + bad("invalid function reference " + func); + if (_abi != ci->_abi) + bad("invalid calling convention for " + func); + } + + ci->_argtypes = 0; + + for (size_t i = 0; i < mTokens.size(); ++i) { + args[i] = ref(mTokens[mTokens.size() - (i+1)]); + ci->_argtypes |= args[i]->isQuad() ? ARGSIZE_F : ARGSIZE_LO; + ci->_argtypes <<= 2; + } + + // Select return type from opcode. + // FIXME: callh needs special treatment currently + // missing from here. + if (mOpcode == LIR_call) + ci->_argtypes |= ARGSIZE_LO; + else + ci->_argtypes |= ARGSIZE_F; + + return mLir->insCall(ci, args); +} + +LIns * +FragmentAssembler::assemble_guard() +{ + LIns *exitIns = do_skip(sizeof(LasmSideExit)); + LasmSideExit* exit = (LasmSideExit*) exitIns->payload(); + memset(exit, 0, sizeof(LasmSideExit)); + exit->from = mFragment; + exit->target = NULL; + exit->line = mLineno; + + LIns *guardRec = do_skip(sizeof(GuardRecord)); + GuardRecord *rec = (GuardRecord*) guardRec->payload(); + memset(rec, 0, sizeof(GuardRecord)); + rec->exit = exit; + exit->addGuard(rec); + + need(mOpcount); + + if (mOpcode != LIR_loop) + mReturnTypeBits |= RT_GUARD; + + LIns *ins_cond; + if (mOpcode == LIR_xt || mOpcode == LIR_xf) + ins_cond = ref(pop_front(mTokens)); + else + ins_cond = NULL; + + if (!mTokens.empty()) + bad("too many arguments"); + + return mLir->insGuard(mOpcode, ins_cond, guardRec); +} + +LIns * +FragmentAssembler::assemble_general() +{ + if (mOpcount == 0) { + // 0-ary ops may, or may not, have an immediate + // thing wedged in them; depends on the op. We + // are lax and set it if it's provided. + LIns *ins = mLir->ins0(mOpcode); + if (mTokens.size() > 0) { + assert(mTokens.size() == 1); + ins->initLInsI(mOpcode, imm(mTokens[0])); + } + return ins; + } else { + need(mOpcount); + if (mOpcount == 1) { + if (mOpcode == LIR_ret) + mReturnTypeBits |= RT_INT32; + if (mOpcode == LIR_fret) + mReturnTypeBits |= RT_FLOAT; + + return mLir->ins1(mOpcode, + ref(mTokens[0])); + } else if (mOpcount == 2) { + return mLir->ins2(mOpcode, + ref(mTokens[0]), + ref(mTokens[1])); + } else { + bad("too many operands"); + } + } + // Never get here. + return NULL; +} + +void +FragmentAssembler::endFragment() +{ + if (mReturnTypeBits == 0) { cerr << "warning: no return type in fragment '" << mFragName << "'" << endl; - if (mReturnTypeBits != RT_INT32 && mReturnTypeBits != RT_FLOAT - && mReturnTypeBits != RT_GUARD) + } + if (mReturnTypeBits != RT_INT32 && mReturnTypeBits != RT_FLOAT && + mReturnTypeBits != RT_GUARD) { cerr << "warning: multiple return types in fragment '" << mFragName << "'" << endl; - + } LIns *exitIns = do_skip(sizeof(SideExit)); SideExit* exit = (SideExit*) exitIns->payload(); memset(exit, 0, sizeof(SideExit)); @@ -645,15 +674,15 @@ LirasmAssembler::endFragment() if (mParent->mFragmento->assm()->error() != nanojit::None) { cerr << "error during assembly: "; switch (mParent->mFragmento->assm()->error()) { - case nanojit::OutOMem: cerr << "OutOMem"; break; - case nanojit::StackFull: cerr << "StackFull"; break; - case nanojit::RegionFull: cerr << "RegionFull"; break; - case nanojit::MaxLength: cerr << "MaxLength"; break; - case nanojit::MaxExit: cerr << "MaxExit"; break; - case nanojit::MaxXJump: cerr << "MaxXJump"; break; - case nanojit::UnknownPrim: cerr << "UnknownPrim"; break; - case nanojit::UnknownBranch: cerr << "UnknownBranch"; break; - case nanojit::None: cerr << "None"; break; + case nanojit::OutOMem: cerr << "OutOMem"; break; + case nanojit::StackFull: cerr << "StackFull"; break; + case nanojit::RegionFull: cerr << "RegionFull"; break; + case nanojit::MaxLength: cerr << "MaxLength"; break; + case nanojit::MaxExit: cerr << "MaxExit"; break; + case nanojit::MaxXJump: cerr << "MaxXJump"; break; + case nanojit::UnknownPrim: cerr << "UnknownPrim"; break; + case nanojit::UnknownBranch: cerr << "UnknownBranch"; break; + case nanojit::None: cerr << "None"; break; } cerr << endl; std::exit(1); @@ -663,170 +692,156 @@ LirasmAssembler::endFragment() f = &mParent->mFragments[mFragName]; switch (mReturnTypeBits) { - case RT_FLOAT: - default: - f->rfloat = reinterpret_cast(mFragment->code()); - f->mReturnType = RT_FLOAT; - break; - case RT_INT32: - f->rint = reinterpret_cast(mFragment->code()); - f->mReturnType = RT_INT32; - break; case RT_GUARD: f->rguard = reinterpret_cast(mFragment->code()); f->mReturnType = RT_GUARD; break; + case RT_FLOAT: + f->rfloat = reinterpret_cast(mFragment->code()); + f->mReturnType = RT_FLOAT; + break; + default: + f->rint = reinterpret_cast(mFragment->code()); + f->mReturnType = RT_INT32; + break; } - delete mVerboseWriter; - delete mExprFilter; - delete mCseFilter; - delete mBufWriter; - for (size_t i = 0; i < mParent->mCallInfos.size(); ++i) - delete mParent->mCallInfos[i]; - mParent->mCallInfos.clear(); - mParent->mFragments[mFragName].mLabels = mLabels; - mLabels.clear(); } void -LirasmAssembler::assemble(istream &in) +FragmentAssembler::tokenizeLine(LirTokenStream &in, LirToken &token) { -#define OPDEF(op, number, args, repkind) \ - op_map[#op] = make_pair(LIR_##op, args); -#define OPDEF64(op, number, args, repkind) \ - op_map[#op] = make_pair(LIR_##op, args); -#include "nanojit/LIRopcode.tbl" -#undef OPDEF -#undef OPDEF64 + mTokens.clear(); + mTokens.push_back(token.data); - op_map["alloc"] = op_map["ialloc"]; - op_map["param"] = op_map["iparam"]; + while (in.get(token)) { + if (token.type == NEWLINE) + break; + mTokens.push_back(token.data); + } +} - bool singleFrag = false; - bool first = true; - while(read_and_tokenize_line(in)) { +void +FragmentAssembler::extract_any_label(string &lab, char lab_delim) +{ + if (mTokens.size() > 2 && mTokens[1].size() == 1 && mTokens[1][0] == lab_delim) { + lab = pop_front(mTokens); + pop_front(mTokens); // remove punctuation - if (mParent->mLirbuf->outOMem()) { - cerr << "lirbuf out of memory" << endl; - exit(1); + if (mLabels.find(lab) != mLabels.end()) + bad("duplicate label"); + } +} + +void +FragmentAssembler::assembleFragment(LirTokenStream &in, bool implied, const LirToken *firstToken) +{ + LirToken token; + while (true) { + if (firstToken) { + token = *firstToken; + firstToken = NULL; + } else if (!in.get(token)) { + if (!implied) + bad("unexpected end of file in fragment '" + mFragName + "'"); + break; } - - if (mTokens.empty()) + if (token.type == NEWLINE) continue; + if (token.type != NAME) + bad("unexpected token '" + token.data + "'"); - string op = pop_front(mTokens); - - if (op == ".patch") { - tokenize("."); - patch(); - continue; + string op = token.data; + if (op == ".begin") + bad("nested fragments are not supported"); + if (op == ".end") { + if (implied) + bad(".end without .begin"); + if (!in.eat(NEWLINE)) + bad("extra junk after .end"); + break; } - if (!singleFrag) { - if (op == ".begin") { - if (mTokens.size() != 1) - bad("missing fragment name"); - if (mInFrag) - bad("nested fragments are not supported"); - - mFragName = pop_front(mTokens); - - beginFragment(); - first = false; - continue; - } else if (op == ".end") { - if (!mInFrag) - bad("expecting .begin before .end"); - if (!mTokens.empty()) - bad("too many tokens"); - endFragment(); - continue; - } - } - if (first) { - first = false; - singleFrag = true; - mFragName = "main"; - - beginFragment(); - } + mLineno = token.lineno; + tokenizeLine(in, token); string lab; LIns *ins = NULL; - extract_any_label(op, lab, ':'); + extract_any_label(lab, ':'); /* Save label and do any back-patching of deferred forward-jumps. */ if (!lab.empty()) { ins = mLir->ins0(LIR_label); - typedef multimap mulmap; + typedef multimap mulmap; typedef mulmap::const_iterator ci; - pair range = mFwdJumps.equal_range(lab); + pair range = mFwdJumps.equal_range(lab); for (ci i = range.first; i != range.second; ++i) { i->second->setTarget(ins); } mFwdJumps.erase(lab); lab.clear(); } - extract_any_label(op, lab, '='); + extract_any_label(lab, '='); - if (op_map.find(op) == op_map.end()) + assert(!mTokens.empty()); + op = pop_front(mTokens); + if (mParent->mOpMap.find(op) == mParent->mOpMap.end()) bad("unknown instruction '" + op + "'"); - pair entry = op_map[op]; + pair entry = mParent->mOpMap[op]; mOpcode = entry.first; mOpcount = entry.second; switch (mOpcode) { // A few special opcode cases. - case LIR_j: - case LIR_jt: - case LIR_jf: - case LIR_ji: + case LIR_j: + case LIR_jt: + case LIR_jf: + case LIR_ji: ins = assemble_jump(); break; - case LIR_int: + case LIR_int: need(1); ins = mLir->insImm(imm(mTokens[0])); break; - case LIR_quad: + case LIR_quad: need(1); ins = mLir->insImmq(quad(mTokens[0])); break; - case LIR_sti: - case LIR_stqi: + case LIR_sti: + case LIR_stqi: need(3); ins = mLir->insStorei(ref(mTokens[0]), ref(mTokens[1]), imm(mTokens[2])); break; - case LIR_ld: - case LIR_ldc: - case LIR_ldq: - case LIR_ldqc: - case LIR_ldcb: - case LIR_ldcs: + case LIR_ld: + case LIR_ldc: + case LIR_ldq: + case LIR_ldqc: + case LIR_ldcb: + case LIR_ldcs: ins = assemble_load(); break; - case LIR_iparam: + case LIR_iparam: need(2); ins = mLir->insParam(imm(mTokens[0]), imm(mTokens[1])); break; - case LIR_ialloc: + case LIR_ialloc: need(1); ins = mLir->insAlloc(imm(mTokens[0])); break; - case LIR_skip: + case LIR_skip: need(1); { int32_t count = imm(mTokens[0]); @@ -836,20 +851,21 @@ LirasmAssembler::assemble(istream &in) } break; - case LIR_xt: - case LIR_xf: - case LIR_x: - case LIR_xbarrier: - case LIR_loop: + case LIR_xt: + case LIR_xf: + case LIR_x: + case LIR_xbarrier: + case LIR_loop: ins = assemble_guard(); break; - case LIR_call: - case LIR_callh: - case LIR_fcall: + case LIR_call: + case LIR_callh: + case LIR_fcall: ins = assemble_call(op); break; - default: + + default: ins = assemble_general(); break; } @@ -857,47 +873,43 @@ LirasmAssembler::assemble(istream &in) assert(ins); if (!lab.empty()) mLabels.insert(make_pair(lab, ins)); - } - if (mInFrag && singleFrag) - endFragment(); - if (mInFrag) - bad("unexpected EOF"); - if (mParent->mLirbuf->outOMem()) { - cerr << "lirbuf out of memory" << endl; - exit(1); - } -} - -bool -has_flag(vector &args, string const &flag) -{ - for (vector::iterator i = args.begin(); - i != args.end(); ++i) { - if (*i == flag) { - args.erase(i); - return true; + if (mParent->mLirbuf->outOMem()) { + cerr << "lirbuf out of memory" << endl; + exit(1); } } - return false; + endFragment(); } - Lirasm::Lirasm(bool verbose) { mVerbose = verbose; nanojit::AvmCore::config.tree_opt = true; mLogc.lcbits = 0; - mFragmento = new (&gc) Fragmento(&s_core, &mLogc, 32); + mFragmento = new (&gc) Fragmento(&mCore, &mLogc, 32); mFragmento->labels = NULL; mLirbuf = new (&gc) LirBuffer(mFragmento); #ifdef DEBUG if (mVerbose) { mLogc.lcbits = LC_Assembly; - mFragmento->labels = new (&gc) LabelMap(&s_core); + mFragmento->labels = new (&gc) LabelMap(&mCore); mLirbuf->names = new (&gc) LirNameMap(&gc, mFragmento->labels); } #endif + + // Populate the mOpMap table. +#define OPDEF(op, number, args, repkind) \ + mOpMap[#op] = make_pair(LIR_##op, args); +#define OPDEF64(op, number, args, repkind) \ + mOpMap[#op] = make_pair(LIR_##op, args); +#include "nanojit/LIRopcode.tbl" +#undef OPDEF +#undef OPDEF64 + + // TODO - These should alias to the appropriate platform-specific LIR opcode. + mOpMap["alloc"] = mOpMap["ialloc"]; + mOpMap["param"] = mOpMap["iparam"]; } Lirasm::~Lirasm() @@ -912,49 +924,120 @@ Lirasm::~Lirasm() delete mFragmento; } -LirasmAssembler::LirasmAssembler(Lirasm &lasm) -{ - mParent = &lasm; - mInFrag = false; - mLineno = 0; -} - void -LirasmAssembler::tokenize(string const &tok_sep) +Lirasm::lookupFunction(const string &name, CallInfo *&ci) { - vector::iterator i; - for (i = mTokens.begin(); i < mTokens.end(); i++) - { - string line = *i; - size_t start = 0; - size_t end = 0; - while((start = line.find_first_not_of(tok_sep, end)) != string::npos && - (end = line.find_first_of(tok_sep, start)) != string::npos) { - const string ss = line.substr(start, (end-start)); - i->erase(start, end-start+1); - mTokens.insert(i++, ss); - mTokens.insert(i++, tok_sep); + const size_t nfuns = sizeof(functions) / sizeof(functions[0]); + for (size_t i = 0; i < nfuns; i++) { + if (name == functions[i].name) { + *ci = functions[i].callInfo; + return; } } + + Fragments::const_iterator func = mFragments.find(name); + if (func != mFragments.end()) { + if (func->second.mReturnType == RT_FLOAT) { + CallInfo target = {(uintptr_t) func->second.rfloat, ARGSIZE_F, 0, + 0, nanojit::ABI_FASTCALL, func->first.c_str()}; + *ci = target; + + } else { + CallInfo target = {(uintptr_t) func->second.rint, ARGSIZE_LO, 0, + 0, nanojit::ABI_FASTCALL, func->first.c_str()}; + *ci = target; + } + } else { + ci = NULL; + } } void -LirasmAssembler::patch() +Lirasm::assemble(istream &in) { - if (mTokens[1] != "." || mTokens[3] != "->") + LirTokenStream ts(in); + bool first = true; + + LirToken token; + while (ts.get(token)) { + if (mLirbuf->outOMem()) { + cerr << "lirbuf out of memory" << endl; + exit(1); + } + if (token.type == NEWLINE) + continue; + if (token.type != NAME) + bad("unexpected token '" + token.data + "'"); + + const string &op = token.data; + if (op == ".patch") { + handlePatch(ts); + } else if (op == ".begin") { + string name; + if (!ts.getName(name)) + bad("expected fragment name after .begin"); + if (!ts.eat(NEWLINE)) + bad("extra junk after .begin " + name); + + FragmentAssembler assembler(*this, name); + assembler.assembleFragment(ts, false, NULL); + first = false; + } else if (op == ".end") { + bad(".end without .begin"); + } else if (first) { + FragmentAssembler assembler(*this, "main"); + assembler.assembleFragment(ts, true, &token); + break; + } else { + bad("unexpected stray opcode '" + op + "'"); + } + } + + if (mLirbuf->outOMem()) { + cerr << "lirbuf out of memory" << endl; + exit(1); + } +} + +void +Lirasm::handlePatch(LirTokenStream &in) +{ + string src, fragName, guardName, destName; + + if (!in.getName(src) || !in.eat(PUNCT, "->") || !in.getName(destName)) bad("incorrect syntax"); + + // Break the src at '.'. This is awkward but the syntax looks nice. + size_t j = src.find('.'); + if (j == string::npos || j == 0 || j == src.size() - 1) + bad("incorrect syntax"); + fragName = src.substr(0, j); + guardName = src.substr(j + 1); + Fragments::iterator i; - if ((i=mParent->mFragments.find(mTokens[0])) == mParent->mFragments.end()) + if ((i=mFragments.find(fragName)) == mFragments.end()) bad("invalid fragment reference"); LirasmFragment *frag = &i->second; - if (frag->mLabels.find(mTokens[2]) == frag->mLabels.end()) + if (frag->mLabels.find(guardName) == frag->mLabels.end()) bad("invalid guard reference"); - LIns *ins = frag->mLabels.find(mTokens[2])->second; - if ((i=mParent->mFragments.find(mTokens[4])) == mParent->mFragments.end()) + LIns *ins = frag->mLabels.find(guardName)->second; + if ((i=mFragments.find(destName)) == mFragments.end()) bad("invalid guard reference"); ins->record()->exit->target = i->second.fragptr; - mParent->mFragmento->assm()->patch(ins->record()->exit); + mFragmento->assm()->patch(ins->record()->exit); +} + +bool +has_flag(vector &args, const string &flag) +{ + for (vector::iterator i = args.begin(); i != args.end(); ++i) { + if (*i == flag) { + args.erase(i); + return true; + } + } + return false; } int @@ -996,7 +1079,7 @@ main(int argc, char **argv) } Lirasm lasm(verbose); - LirasmAssembler(lasm).assemble(in); + lasm.assemble(in); Fragments::const_iterator i; if (execute) { From 58fc4b29f6fb0181e5185c009bb90e9969dc860b Mon Sep 17 00:00:00 2001 From: Boris Zbarsky Date: Mon, 27 Jul 2009 16:14:12 -0400 Subject: [PATCH 04/19] Bug 496908. Make JSVAL_IS_* functions, not macros. r=brendan,jwalden --- js/src/jsapi.h | 159 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 114 insertions(+), 45 deletions(-) diff --git a/js/src/jsapi.h b/js/src/jsapi.h index d03b13d83a6e..8d6de69f2301 100644 --- a/js/src/jsapi.h +++ b/js/src/jsapi.h @@ -54,35 +54,122 @@ JS_BEGIN_EXTERN_C /* * Type tags stored in the low bits of a jsval. */ -#define JSVAL_OBJECT 0x0 /* untagged reference to object */ -#define JSVAL_INT 0x1 /* tagged 31-bit integer value */ -#define JSVAL_DOUBLE 0x2 /* tagged reference to double */ -#define JSVAL_STRING 0x4 /* tagged reference to string */ -#define JSVAL_BOOLEAN 0x6 /* tagged boolean value */ +typedef enum jsvaltag { + JSVAL_OBJECT = 0x0, /* untagged reference to object */ + JSVAL_INT = 0x1, /* tagged 31-bit integer value */ + JSVAL_DOUBLE = 0x2, /* tagged reference to double */ + JSVAL_STRING = 0x4, /* tagged reference to string */ + JSVAL_BOOLEAN = 0x6 /* tagged boolean value */ +} jsvaltag; + +#define JSVAL_OBJECT ((jsvaltag)0x0) +#define JSVAL_INT ((jsvaltag)0x1) +#define JSVAL_DOUBLE ((jsvaltag)0x2) +#define JSVAL_STRING ((jsvaltag)0x4) +#define JSVAL_BOOLEAN ((jsvaltag)0x6) /* Type tag bitfield length and derived macros. */ #define JSVAL_TAGBITS 3 #define JSVAL_TAGMASK JS_BITMASK(JSVAL_TAGBITS) -#define JSVAL_TAG(v) ((v) & JSVAL_TAGMASK) -#define JSVAL_SETTAG(v,t) ((v) | (t)) -#define JSVAL_CLRTAG(v) ((v) & ~(jsval)JSVAL_TAGMASK) #define JSVAL_ALIGN JS_BIT(JSVAL_TAGBITS) +/* Not a function, because we have static asserts that use it */ +#define JSVAL_TAG(v) ((jsvaltag)((v) & JSVAL_TAGMASK)) + +/* Not a function, because we have static asserts that use it */ +#define JSVAL_SETTAG(v, t) ((v) | (t)) + +static JS_ALWAYS_INLINE jsval +JSVAL_CLRTAG(jsval v) +{ + return v & ~(jsval)JSVAL_TAGMASK; +} + +/* + * Well-known JS values. The extern'd variables are initialized when the + * first JSContext is created by JS_NewContext (see below). + */ +#define JSVAL_NULL ((jsval) 0) +#define JSVAL_ZERO INT_TO_JSVAL(0) +#define JSVAL_ONE INT_TO_JSVAL(1) +#define JSVAL_FALSE PSEUDO_BOOLEAN_TO_JSVAL(JS_FALSE) +#define JSVAL_TRUE PSEUDO_BOOLEAN_TO_JSVAL(JS_TRUE) +#define JSVAL_VOID PSEUDO_BOOLEAN_TO_JSVAL(2) + +/* + * A pseudo-boolean is a 29-bit (for 32-bit jsval) or 61-bit (for 64-bit jsval) + * value other than 0 or 1 encoded as a jsval whose tag is JSVAL_BOOLEAN. + * + * JSVAL_VOID happens to be defined as a jsval encoding a pseudo-boolean, but + * embedders MUST NOT rely on this. All other possible pseudo-boolean values + * are implementation-reserved and MUST NOT be constructed by any embedding of + * SpiderMonkey. + */ +#define JSVAL_TO_PSEUDO_BOOLEAN(v) ((JSBool) ((v) >> JSVAL_TAGBITS)) +#define PSEUDO_BOOLEAN_TO_JSVAL(b) \ + JSVAL_SETTAG((jsval) (b) << JSVAL_TAGBITS, JSVAL_BOOLEAN) + /* Predicates for type testing. */ -#define JSVAL_IS_OBJECT(v) (JSVAL_TAG(v) == JSVAL_OBJECT) -#define JSVAL_IS_NUMBER(v) (JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v)) -#define JSVAL_IS_INT(v) ((v) & JSVAL_INT) -#define JSVAL_IS_DOUBLE(v) (JSVAL_TAG(v) == JSVAL_DOUBLE) -#define JSVAL_IS_STRING(v) (JSVAL_TAG(v) == JSVAL_STRING) -#define JSVAL_IS_BOOLEAN(v) (((v) & ~((jsval)1 << JSVAL_TAGBITS)) == \ - JSVAL_BOOLEAN) -#define JSVAL_IS_NULL(v) ((v) == JSVAL_NULL) -#define JSVAL_IS_VOID(v) ((v) == JSVAL_VOID) -#define JSVAL_IS_PRIMITIVE(v) (!JSVAL_IS_OBJECT(v) || JSVAL_IS_NULL(v)) +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_OBJECT(jsval v) +{ + return JSVAL_TAG(v) == JSVAL_OBJECT; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_INT(jsval v) +{ + return v & JSVAL_INT; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_DOUBLE(jsval v) +{ + return JSVAL_TAG(v) == JSVAL_DOUBLE; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_NUMBER(jsval v) +{ + return JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v); +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_STRING(jsval v) +{ + return JSVAL_TAG(v) == JSVAL_STRING; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_BOOLEAN(jsval v) +{ + return (v & ~((jsval)1 << JSVAL_TAGBITS)) == JSVAL_BOOLEAN; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_NULL(jsval v) +{ + return v == JSVAL_NULL; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_VOID(jsval v) +{ + return v == JSVAL_VOID; +} + +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_PRIMITIVE(jsval v) +{ + return !JSVAL_IS_OBJECT(v) || JSVAL_IS_NULL(v); +} /* Objects, strings, and doubles are GC'ed. */ -#define JSVAL_IS_GCTHING(v) (!((v) & JSVAL_INT) && \ - JSVAL_TAG(v) != JSVAL_BOOLEAN) +static JS_ALWAYS_INLINE JSBool +JSVAL_IS_GCTHING(jsval v) +{ + return !(v & JSVAL_INT) && JSVAL_TAG(v) != JSVAL_BOOLEAN; +} static JS_ALWAYS_INLINE void * JSVAL_TO_GCTHING(jsval v) @@ -145,36 +232,18 @@ STRING_TO_JSVAL(JSString *str) #define JSVAL_INT_POW2(n) ((jsval)1 << (n)) #define JSVAL_INT_MIN (-JSVAL_INT_POW2(30)) #define JSVAL_INT_MAX (JSVAL_INT_POW2(30) - 1) + +/* Not a function, because we have static asserts that use it */ #define INT_FITS_IN_JSVAL(i) ((jsuint)(i) - (jsuint)JSVAL_INT_MIN <= \ (jsuint)(JSVAL_INT_MAX - JSVAL_INT_MIN)) +/* Not a function, because we have static asserts that use it */ +/* FIXME: Bug 506721, since that means we can't assert JSVAL_IS_INT(v) */ #define JSVAL_TO_INT(v) ((jsint)(v) >> 1) + +/* Not a function, because we have static asserts that use it */ +/* FIXME: Bug 506721, since that means we can't assert INT_FITS_IN_JSVAL(i) */ #define INT_TO_JSVAL(i) (((jsval)(i) << 1) | JSVAL_INT) -/* - * A pseudo-boolean is a 29-bit (for 32-bit jsval) or 61-bit (for 64-bit jsval) - * value other than 0 or 1 encoded as a jsval whose tag is JSVAL_BOOLEAN. - * - * JSVAL_VOID happens to be defined as a jsval encoding a pseudo-boolean, but - * embedders MUST NOT rely on this. All other possible pseudo-boolean values - * are implementation-reserved and MUST NOT be constructed by any embedding of - * SpiderMonkey. - */ -#define JSVAL_TO_PSEUDO_BOOLEAN(v) ((JSBool) ((v) >> JSVAL_TAGBITS)) -#define PSEUDO_BOOLEAN_TO_JSVAL(b) \ - JSVAL_SETTAG((jsval) (b) << JSVAL_TAGBITS, JSVAL_BOOLEAN) - -/* - * Well-known JS values. The extern'd variables are initialized when the - * first JSContext is created by JS_NewContext (see below). - */ -#define JSVAL_NULL ((jsval) 0) -#define JSVAL_ZERO INT_TO_JSVAL(0) -#define JSVAL_ONE INT_TO_JSVAL(1) -#define JSVAL_FALSE PSEUDO_BOOLEAN_TO_JSVAL(JS_FALSE) -#define JSVAL_TRUE PSEUDO_BOOLEAN_TO_JSVAL(JS_TRUE) -#define JSVAL_VOID PSEUDO_BOOLEAN_TO_JSVAL(2) - - /* Convert between boolean and jsval, asserting that inputs are valid. */ static JS_ALWAYS_INLINE JSBool JSVAL_TO_BOOLEAN(jsval v) From 10702b702ba6e36c8958e79946aa51ea5515750f Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Mon, 27 Jul 2009 16:29:02 -0500 Subject: [PATCH 05/19] Bug 505932 - Shape regeneration does not touch most empty scopes. r=brendan. --- js/src/jscntxt.h | 13 ++++++++++++- js/src/jsgc.cpp | 1 + js/src/jsobj.cpp | 39 ++++++++++++++++++++++++++++----------- js/src/jsscope.cpp | 4 ++-- js/src/jsscope.h | 10 +++++++++- 5 files changed, 52 insertions(+), 15 deletions(-) diff --git a/js/src/jscntxt.h b/js/src/jscntxt.h index 091a2529a39d..b079cb014d19 100644 --- a/js/src/jscntxt.h +++ b/js/src/jscntxt.h @@ -391,7 +391,18 @@ struct JSRuntime { JSPackedBool gcPoke; JSPackedBool gcRunning; JSPackedBool gcRegenShapes; - uint8 gcPadding; + + /* + * During gc, if rt->gcRegenShapes && + * (scope->flags & JSScope::SHAPE_REGEN) == rt->gcRegenShapesScopeFlag, + * then the scope's shape has already been regenerated during this GC. + * To avoid having to sweep JSScopes, the bit's meaning toggles with each + * shape-regenerating GC. + * + * FIXME Once scopes are GC'd (bug 505004), this will be obsolete. + */ + uint8 gcRegenShapesScopeFlag; + #ifdef JS_GC_ZEAL jsrefcount gcZeal; #endif diff --git a/js/src/jsgc.cpp b/js/src/jsgc.cpp index b2ee47f5a0c1..f6a76120c4d4 100644 --- a/js/src/jsgc.cpp +++ b/js/src/jsgc.cpp @@ -3519,6 +3519,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) */ if (rt->shapeGen & SHAPE_OVERFLOW_BIT) { rt->gcRegenShapes = true; + rt->gcRegenShapesScopeFlag ^= JSScope::SHAPE_REGEN; rt->shapeGen = 0; rt->protoHazardShape = 0; } diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp index c14b66e837aa..9ca39003a927 100644 --- a/js/src/jsobj.cpp +++ b/js/src/jsobj.cpp @@ -5773,23 +5773,40 @@ js_TraceObject(JSTracer *trc, JSObject *obj) #endif sprop = scope->lastProp; - if (sprop) { - JS_ASSERT(scope->has(sprop)); + uint8 regenFlag = cx->runtime->gcRegenShapesScopeFlag; + if (IS_GC_MARKING_TRACER(trc) && + cx->runtime->gcRegenShapes && + scope->hasRegenFlag(regenFlag)) { + /* + * Either scope has its own shape, which must be regenerated, or it + * must have the same shape as its lastProp. + */ + uint32 shape; - /* Regenerate property cache shape ids if GC'ing. */ - if (IS_GC_MARKING_TRACER(trc) && cx->runtime->gcRegenShapes) { + if (sprop) { if (!(sprop->flags & SPROP_FLAG_SHAPE_REGEN)) { sprop->shape = js_RegenerateShapeForGC(cx); sprop->flags |= SPROP_FLAG_SHAPE_REGEN; } - - uint32 shape = sprop->shape; - if (scope->hasOwnShape()) { - shape = js_RegenerateShapeForGC(cx); - JS_ASSERT(shape != sprop->shape); - } - scope->shape = shape; + shape = sprop->shape; } + if (!sprop || scope->hasOwnShape()) { + shape = js_RegenerateShapeForGC(cx); + JS_ASSERT_IF(sprop, shape != sprop->shape); + } + scope->shape = shape; + scope->flags ^= JSScope::SHAPE_REGEN; + + /* Also regenerate the shapes of empty scopes, in case they are not shared. */ + for (JSScope *empty = scope->emptyScope; + empty && empty->hasRegenFlag(regenFlag); + empty = empty->emptyScope) { + empty->shape = js_RegenerateShapeForGC(cx); + empty->flags ^= JSScope::SHAPE_REGEN; + } + } + if (sprop) { + JS_ASSERT(scope->has(sprop)); /* Trace scope's property tree ancestor line. */ do { diff --git a/js/src/jsscope.cpp b/js/src/jsscope.cpp index c343533dde35..7da51cd43f71 100644 --- a/js/src/jsscope.cpp +++ b/js/src/jsscope.cpp @@ -196,7 +196,7 @@ JSScope::create(JSContext *cx, JSObjectOps *ops, JSClass *clasp, JSObject *obj) scope->object = obj; scope->nrefs = 1; scope->freeslot = JSSLOT_FREE(clasp); - scope->flags = 0; + scope->flags = cx->runtime->gcRegenShapesScopeFlag; js_LeaveTraceIfGlobalObject(cx, obj); scope->initMinimal(cx); @@ -226,7 +226,7 @@ JSScope::createEmptyScope(JSContext *cx, JSClass *clasp) */ scope->nrefs = 2; scope->freeslot = JSSLOT_FREE(clasp); - scope->flags = 0; + scope->flags = OWN_SHAPE | cx->runtime->gcRegenShapesScopeFlag; scope->initMinimal(cx); #ifdef JS_THREADSAFE diff --git a/js/src/jsscope.h b/js/src/jsscope.h index 481da6a4a5b3..fe8b5798e4f1 100644 --- a/js/src/jsscope.h +++ b/js/src/jsscope.h @@ -283,7 +283,13 @@ struct JSScope { SEALED = 0x0002, BRANDED = 0x0004, INDEXED_PROPERTIES = 0x0008, - OWN_SHAPE = 0x0010 + OWN_SHAPE = 0x0010, + + /* + * This flag toggles with each shape-regenerating GC cycle. + * See JSRuntime::gcRegenShapesScopeFlag. + */ + SHAPE_REGEN = 0x0020 }; bool hadMiddleDelete() { return flags & MIDDLE_DELETE; } @@ -312,6 +318,8 @@ struct JSScope { bool hasOwnShape() { return flags & OWN_SHAPE; } void setOwnShape() { flags |= OWN_SHAPE; } + bool hasRegenFlag(uint8 regenFlag) { return (flags & SHAPE_REGEN) == regenFlag; } + bool owned() { return object != NULL; } }; From fad2d36065c720615f020d189602247670f80ea8 Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Mon, 27 Jul 2009 16:38:39 -0500 Subject: [PATCH 06/19] lirasm - Rename a parameter, as graydon requested in review for bug 505249. --- js/src/lirasm/lirasm.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/js/src/lirasm/lirasm.cpp b/js/src/lirasm/lirasm.cpp index f3c22fc77ed5..84f73113f730 100644 --- a/js/src/lirasm/lirasm.cpp +++ b/js/src/lirasm/lirasm.cpp @@ -234,7 +234,7 @@ public: FragmentAssembler(Lirasm &parent, const string &fragmentName); ~FragmentAssembler(); - void assembleFragment(LirTokenStream &in, bool implied, const LirToken *firstToken); + void assembleFragment(LirTokenStream &in, bool implicitBegin, const LirToken *firstToken); private: // Prohibit copying. @@ -735,7 +735,7 @@ FragmentAssembler::extract_any_label(string &lab, char lab_delim) } void -FragmentAssembler::assembleFragment(LirTokenStream &in, bool implied, const LirToken *firstToken) +FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, const LirToken *firstToken) { LirToken token; while (true) { @@ -743,7 +743,7 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implied, const LirT token = *firstToken; firstToken = NULL; } else if (!in.get(token)) { - if (!implied) + if (!implicitBegin) bad("unexpected end of file in fragment '" + mFragName + "'"); break; } @@ -756,7 +756,7 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implied, const LirT if (op == ".begin") bad("nested fragments are not supported"); if (op == ".end") { - if (implied) + if (implicitBegin) bad(".end without .begin"); if (!in.eat(NEWLINE)) bad("extra junk after .end"); From 04786e0f3d14dc471aa8eb7b3031c6616420dd5f Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Mon, 27 Jul 2009 16:47:34 -0500 Subject: [PATCH 07/19] Bug 503408 - Trace native setters. r=brendan. --- js/src/jsbuiltins.cpp | 2 - js/src/jsinterp.cpp | 1 - js/src/jsinterp.h | 4 + js/src/jstracer.cpp | 423 ++++++++++++++++++++++++++++-------------- js/src/jstracer.h | 25 ++- js/src/trace-test.js | 15 ++ 6 files changed, 327 insertions(+), 143 deletions(-) diff --git a/js/src/jsbuiltins.cpp b/js/src/jsbuiltins.cpp index 961e11331276..fe088a03aad8 100644 --- a/js/src/jsbuiltins.cpp +++ b/js/src/jsbuiltins.cpp @@ -242,8 +242,6 @@ JSBool FASTCALL js_AddProperty(JSContext* cx, JSObject* obj, JSScopeProperty* sprop) { JS_ASSERT(OBJ_IS_NATIVE(obj)); - JS_ASSERT(SPROP_HAS_STUB_SETTER(sprop)); - JS_LOCK_OBJ(cx, obj); JSScope* scope = OBJ_SCOPE(obj); diff --git a/js/src/jsinterp.cpp b/js/src/jsinterp.cpp index 6f60b4b0821c..25506179975c 100644 --- a/js/src/jsinterp.cpp +++ b/js/src/jsinterp.cpp @@ -4802,7 +4802,6 @@ js_Interpret(JSContext *cx) } JS_UNLOCK_SCOPE(cx, scope); PCMETER(cache->setpcmisses++); - atom = NULL; } } diff --git a/js/src/jsinterp.h b/js/src/jsinterp.h index 3848d5498b1a..23e77a7171ef 100644 --- a/js/src/jsinterp.h +++ b/js/src/jsinterp.h @@ -243,6 +243,10 @@ struct JSPropCacheEntry { jsuword kshape; /* key shape if pc, else obj for atom */ jsuword vcap; /* value capability, see above */ jsuword vword; /* value word, see PCVAL_* below */ + + bool adding() const { + return PCVCAP_TAG(vcap) == 0 && kshape != PCVCAP_SHAPE(vcap); + } }; /* diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 5d4da83d8742..dba2449d3bee 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -2950,7 +2950,8 @@ TraceRecorder::snapshot(ExitType exitType) bool resumeAfter = (pendingTraceableNative && JSTN_ERRTYPE(pendingTraceableNative) == FAIL_STATUS); if (resumeAfter) { - JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW); + JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW || + *pc == JSOP_SETPROP || *pc == JSOP_SETNAME); pc += cs.length; regs->pc = pc; MUST_FLOW_THROUGH("restore_pc"); @@ -5410,11 +5411,11 @@ LeaveTree(InterpState& state, VMSideExit* lr) * js_ExecuteTree. We are about to return to the interpreter. Adjust * the top stack frame to resume on the next op. */ - JS_ASSERT(*cx->fp->regs->pc == JSOP_CALL || - *cx->fp->regs->pc == JSOP_APPLY || - *cx->fp->regs->pc == JSOP_NEW); - uintN argc = GET_ARGC(cx->fp->regs->pc); - cx->fp->regs->pc += JSOP_CALL_LENGTH; + jsbytecode *pc = cx->fp->regs->pc; + JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW || + *pc == JSOP_SETPROP || *pc == JSOP_SETNAME); + uintN argc = (js_CodeSpec[*pc].format & JOF_INVOKE) ? GET_ARGC(pc) : 0; + cx->fp->regs->pc += js_CodeSpec[*pc].length; cx->fp->regs->sp -= argc + 1; JS_ASSERT_IF(!cx->fp->imacpc, cx->fp->slots + cx->fp->script->nfixed + @@ -7581,6 +7582,41 @@ TraceRecorder::map_is_native(JSObjectMap* map, LIns* map_ins, LIns*& ops_ins, si return true; } +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::guardNativePropertyOp(JSObject* aobj, LIns* map_ins) +{ + /* + * Interpreter calls to PROPERTY_CACHE_TEST guard on native object ops + * which is required to use native objects (those whose maps are scopes), + * or even more narrow conditions required because the cache miss case + * will call a particular object-op (js_GetProperty, js_SetProperty). + * + * We parameterize using offsetof and guard on match against the hook at + * the given offset in js_ObjectOps. TraceRecorder::record_JSOP_SETPROP + * guards the js_SetProperty case. + */ + uint32 format = js_CodeSpec[*cx->fp->regs->pc].format; + uint32 mode = JOF_MODE(format); + + // No need to guard native-ness of global object. + JS_ASSERT(OBJ_IS_NATIVE(globalObj)); + if (aobj != globalObj) { + size_t op_offset = offsetof(JSObjectOps, objectMap); + if (mode == JOF_PROP || mode == JOF_VARPROP) { + op_offset = (format & JOF_SET) + ? offsetof(JSObjectOps, setProperty) + : offsetof(JSObjectOps, getProperty); + } else { + JS_ASSERT(mode == JOF_NAME); + } + + LIns* ops_ins; + if (!map_is_native(aobj->map, map_ins, ops_ins, op_offset)) + ABORT_TRACE("non-native map"); + } + return JSRS_CONTINUE; +} + JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2, jsuword& pcval) { @@ -7598,33 +7634,8 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 } LIns* map_ins = map(obj_ins); - LIns* ops_ins; - // Interpreter calls to PROPERTY_CACHE_TEST guard on native object ops - // which is required to use native objects (those whose maps are scopes), - // or even more narrow conditions required because the cache miss case - // will call a particular object-op (js_GetProperty, js_SetProperty). - // - // We parameterize using offsetof and guard on match against the hook at - // the given offset in js_ObjectOps. TraceRecorder::record_JSOP_SETPROP - // guards the js_SetProperty case. - uint32 format = js_CodeSpec[*pc].format; - uint32 mode = JOF_MODE(format); - - // No need to guard native-ness of global object. - JS_ASSERT(OBJ_IS_NATIVE(globalObj)); - if (aobj != globalObj) { - size_t op_offset = offsetof(JSObjectOps, objectMap); - if (mode == JOF_PROP || mode == JOF_VARPROP) { - JS_ASSERT(!(format & JOF_SET)); - op_offset = offsetof(JSObjectOps, getProperty); - } else { - JS_ASSERT(mode == JOF_NAME); - } - - if (!map_is_native(aobj->map, map_ins, ops_ins, op_offset)) - ABORT_TRACE("non-native map"); - } + CHECK_STATUS(guardNativePropertyOp(aobj, map_ins)); JSAtom* atom; JSPropCacheEntry* entry; @@ -7692,31 +7703,60 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 JS_ASSERT(cx->requestDepth); #endif - // Emit guard(s), common code for both hit and miss cases. + return guardPropertyCacheHit(obj_ins, map_ins, aobj, obj2, entry, pcval); +} + +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::guardPropertyCacheHit(LIns* obj_ins, + LIns* map_ins, + JSObject* aobj, + JSObject* obj2, + JSPropCacheEntry* entry, + jsuword& pcval) +{ + uint32 vshape = PCVCAP_SHAPE(entry->vcap); + // Check for first-level cache hit and guard on kshape if possible. // Otherwise guard on key object exact match. if (PCVCAP_TAG(entry->vcap) <= 1) { if (aobj != globalObj) { LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)), "shape"); - guard(true, addName(lir->ins2i(LIR_eq, shape_ins, entry->kshape), "guard(kshape)(test_property_cache)"), + guard(true, + addName(lir->ins2i(LIR_eq, shape_ins, entry->kshape), "guard_kshape"), BRANCH_EXIT); } + + if (entry->adding()) { + if (aobj == globalObj) + ABORT_TRACE("adding a property to the global object"); + + LIns *vshape_ins = addName( + lir->insLoad(LIR_ld, + addName(lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, runtime)), + "runtime"), + offsetof(JSRuntime, protoHazardShape)), + "protoHazardShape"); + guard(true, + addName(lir->ins2i(LIR_eq, vshape_ins, vshape), "guard_protoHazardShape"), + MISMATCH_EXIT); + } } else { #ifdef DEBUG - JSOp op = js_GetOpcode(cx, cx->fp->script, pc); + JSOp op = js_GetOpcode(cx, cx->fp->script, cx->fp->regs->pc); JSAtom *pcatom; if (op == JSOP_LENGTH) { pcatom = cx->runtime->atomState.lengthAtom; } else { ptrdiff_t pcoff = (JOF_TYPE(js_CodeSpec[op].format) == JOF_SLOTATOM) ? SLOTNO_LEN : 0; - GET_ATOM_FROM_BYTECODE(cx->fp->script, pc, pcoff, pcatom); + GET_ATOM_FROM_BYTECODE(cx->fp->script, cx->fp->regs->pc, pcoff, pcatom); } JS_ASSERT(entry->kpc == (jsbytecode *) pcatom); JS_ASSERT(entry->kshape == jsuword(aobj)); #endif if (aobj != globalObj && !obj_ins->isconstp()) { - guard(true, addName(lir->ins2i(LIR_eq, obj_ins, entry->kshape), "guard(kobj)"), + guard(true, + addName(lir->ins2i(LIR_eq, obj_ins, entry->kshape), "guard_kobj"), BRANCH_EXIT); } } @@ -7724,26 +7764,25 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 // For any hit that goes up the scope and/or proto chains, we will need to // guard on the shape of the object containing the property. if (PCVCAP_TAG(entry->vcap) >= 1) { - jsuword vcap = entry->vcap; - uint32 vshape = PCVCAP_SHAPE(vcap); JS_ASSERT(OBJ_SHAPE(obj2) == vshape); LIns* obj2_ins; if (PCVCAP_TAG(entry->vcap) == 1) { // Duplicate the special case in PROPERTY_CACHE_TEST. - obj2_ins = stobj_get_fslot(obj_ins, JSSLOT_PROTO); + obj2_ins = addName(stobj_get_fslot(obj_ins, JSSLOT_PROTO), "proto"); guard(false, lir->ins_eq0(obj2_ins), BRANCH_EXIT); } else { obj2_ins = INS_CONSTPTR(obj2); } map_ins = map(obj2_ins); + LIns* ops_ins; if (!map_is_native(obj2->map, map_ins, ops_ins)) ABORT_TRACE("non-native map"); LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)), - "shape"); + "obj2_shape"); guard(true, - addName(lir->ins2i(LIR_eq, shape_ins, vshape), "guard(vshape)(test_property_cache)"), + addName(lir->ins2i(LIR_eq, shape_ins, vshape), "guard_vshape"), BRANCH_EXIT); } @@ -7801,16 +7840,6 @@ TraceRecorder::stobj_get_slot(LIns* obj_ins, unsigned slot, LIns*& dslots_ins) return stobj_get_dslot(obj_ins, slot - JS_INITIAL_NSLOTS, dslots_ins); } -JSRecordingStatus -TraceRecorder::native_set(LIns* obj_ins, JSScopeProperty* sprop, LIns*& dslots_ins, LIns* v_ins) -{ - if (SPROP_HAS_STUB_SETTER(sprop) && sprop->slot != SPROP_INVALID_SLOT) { - stobj_set_slot(obj_ins, sprop->slot, dslots_ins, v_ins); - return JSRS_CONTINUE; - } - ABORT_TRACE("unallocated or non-stub sprop"); -} - JSRecordingStatus TraceRecorder::native_get(LIns* obj_ins, LIns* pobj_ins, JSScopeProperty* sprop, LIns*& dslots_ins, LIns*& v_ins) @@ -8703,6 +8732,83 @@ TraceRecorder::newArray(JSObject* ctor, uint32 argc, jsval* argv, jsval* rval) return JSRS_CONTINUE; } +JS_REQUIRES_STACK void +TraceRecorder::propagateFailureToBuiltinStatus(LIns* ok_ins, LIns*& status_ins) +{ + /* + * Check the boolean return value (ok_ins) of a native JSNative, + * JSFastNative, or JSPropertyOp hook for failure. On failure, set the + * JSBUILTIN_ERROR bit of cx->builtinStatus. + * + * If the return value (ok_ins) is true, status' == status. Otherwise + * status' = status | JSBUILTIN_ERROR. We calculate (rval&1)^1, which is 1 + * if rval is JS_FALSE (error), and then shift that by 1, which is the log2 + * of JSBUILTIN_ERROR. + */ + JS_STATIC_ASSERT(((JS_TRUE & 1) ^ 1) << 1 == 0); + JS_STATIC_ASSERT(((JS_FALSE & 1) ^ 1) << 1 == JSBUILTIN_ERROR); + status_ins = lir->ins2(LIR_or, + status_ins, + lir->ins2i(LIR_lsh, + lir->ins2i(LIR_xor, + lir->ins2i(LIR_and, ok_ins, 1), + 1), + 1)); + lir->insStorei(status_ins, lirbuf->state, (int) offsetof(InterpState, builtinStatus)); +} + +JS_REQUIRES_STACK void +TraceRecorder::emitNativePropertyOp(JSScope* scope, JSScopeProperty* sprop, LIns* obj_ins, + bool setflag, LIns* boxed_ins) +{ + JS_ASSERT(!(sprop->attrs & (setflag ? JSPROP_SETTER : JSPROP_GETTER))); + JS_ASSERT(setflag ? !SPROP_HAS_STUB_SETTER(sprop) : !SPROP_HAS_STUB_GETTER(sprop)); + + // Take snapshot for js_DeepBail and store it in cx->bailExit. + VMSideExit* exit = snapshot(DEEP_BAIL_EXIT); + lir->insStorei(INS_CONSTPTR(exit), cx_ins, offsetof(JSContext, bailExit)); + + // Tell nanojit not to discard or defer stack writes before this call. + LIns* guardRec = createGuardRecord(exit); + lir->insGuard(LIR_xbarrier, guardRec, guardRec); + + // It is unsafe to pass the address of an object slot as the out parameter, + // because the getter or setter could end up resizing the object's dslots. + // Instead, use a word of stack and root it in nativeVp. + LIns* vp_ins = lir->insAlloc(sizeof(jsval)); + lir->insStorei(vp_ins, cx_ins, offsetof(JSContext, nativeVp)); + lir->insStorei(INS_CONST(1), cx_ins, offsetof(JSContext, nativeVpLen)); + if (setflag) + lir->insStorei(boxed_ins, vp_ins, 0); + + CallInfo* ci = (CallInfo*) lir->insSkip(sizeof(struct CallInfo))->payload(); + ci->_address = uintptr_t(setflag ? sprop->setter : sprop->getter); + ci->_argtypes = ARGSIZE_LO | ARGSIZE_LO << 2 | ARGSIZE_LO << 4 | ARGSIZE_LO << 6 | ARGSIZE_LO << 8; + ci->_cse = ci->_fold = 0; + ci->_abi = ABI_CDECL; +#ifdef DEBUG + ci->_name = "JSPropertyOp"; +#endif + LIns* args[] = { vp_ins, INS_CONSTWORD(SPROP_USERID(sprop)), obj_ins, cx_ins }; + LIns* ok_ins = lir->insCall(ci, args); + + // Unroot the vp. + lir->insStorei(INS_CONSTPTR(NULL), cx_ins, offsetof(JSContext, nativeVp)); + + // Guard that the call succeeded and builtinStatus is still 0. + // If the native op succeeds but we deep-bail here, the result value is + // lost! Therefore this can only be used for setters of shared properties. + // In that case we ignore the result value anyway. + LIns* status_ins = lir->insLoad(LIR_ld, + lirbuf->state, + (int) offsetof(InterpState, builtinStatus)); + propagateFailureToBuiltinStatus(ok_ins, status_ins); + guard(true, lir->ins_eq0(status_ins), STATUS_EXIT); + + // Re-load the value--but this is currently unused, so commented out. + //boxed_ins = lir->insLoad(LIR_ldp, vp_ins, 0); +} + JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::emitNativeCall(JSTraceableNative* known, uintN argc, LIns* args[]) { @@ -9261,30 +9367,90 @@ TraceRecorder::record_JSOP_SETPROP() return JSRS_CONTINUE; } +/* Emit a specialized, inlined copy of js_NativeSet. */ JS_REQUIRES_STACK JSRecordingStatus -TraceRecorder::record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop) +TraceRecorder::nativeSet(JSObject* obj, LIns* obj_ins, JSScopeProperty* sprop, + jsval v, LIns* v_ins) +{ + JSScope* scope = OBJ_SCOPE(obj); + uint32 slot = sprop->slot; + + /* + * We do not trace assignment to properties that have both a nonstub setter + * and a slot, for several reasons. + * + * First, that would require sampling rt->propertyRemovals before and after + * (see js_NativeSet), and even more code to handle the case where the two + * samples differ. A mere guard is not enough, because you can't just bail + * off trace in the middle of a property assignment without storing the + * value and making the stack right. + * + * If obj is the global object, there are two additional problems. We would + * have to emit still more code to store the result in the object (not the + * native global frame) if the setter returned successfully after + * deep-bailing. And we would have to cope if the run-time type of the + * setter's return value differed from the record-time type of v, in which + * case unboxing would fail and, having called a native setter, we could + * not just retry the instruction in the interpreter. + */ + JS_ASSERT(SPROP_HAS_STUB_SETTER(sprop) || slot == SPROP_INVALID_SLOT); + + // Box the value to be stored, if necessary. + LIns* boxed_ins = NULL; + if (!SPROP_HAS_STUB_SETTER(sprop) || (slot != SPROP_INVALID_SLOT && obj != globalObj)) { + boxed_ins = v_ins; + box_jsval(v, boxed_ins); + } + + // Call the setter, if any. + if (!SPROP_HAS_STUB_SETTER(sprop)) + emitNativePropertyOp(scope, sprop, obj_ins, true, boxed_ins); + + // Store the value, if this property has a slot. + if (slot != SPROP_INVALID_SLOT) { + JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, scope)); + JS_ASSERT(!(sprop->attrs & JSPROP_SHARED)); + if (obj == globalObj) { + if (!lazilyImportGlobalSlot(slot)) + ABORT_TRACE("lazy import of global slot failed"); + + // If we called a native setter, unbox the result. + if (!SPROP_HAS_STUB_SETTER(sprop)) { + v_ins = boxed_ins; + unbox_jsval(STOBJ_GET_SLOT(obj, slot), v_ins, snapshot(BRANCH_EXIT)); + } + set(&STOBJ_GET_SLOT(obj, slot), v_ins); + } else { + LIns* dslots_ins = NULL; + stobj_set_slot(obj_ins, slot, dslots_ins, boxed_ins); + } + } + + return JSRS_CONTINUE; +} + +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::setProp(jsval &l, JSPropCacheEntry* entry, JSScopeProperty* sprop, + jsval &v, LIns*& v_ins) { if (entry == JS_NO_PROP_CACHE_FILL) ABORT_TRACE("can't trace uncacheable property set"); - if (PCVCAP_TAG(entry->vcap) >= 1) - ABORT_TRACE("can't trace inherited property set"); + JS_ASSERT_IF(PCVCAP_TAG(entry->vcap) >= 1, sprop->attrs & JSPROP_SHARED); + if (!SPROP_HAS_STUB_SETTER(sprop) && sprop->slot != SPROP_INVALID_SLOT) + ABORT_TRACE("can't trace set of property with setter and slot"); + if (sprop->attrs & JSPROP_SETTER) + ABORT_TRACE("can't trace JavaScript function setter"); - jsbytecode* pc = cx->fp->regs->pc; - JS_ASSERT(entry->kpc == pc); - - jsval& r = stackval(-1); - jsval& l = stackval(-2); + // These two cases are actually errors and can't be cached. + JS_ASSERT(!(sprop->attrs & JSPROP_GETTER)); // getter without setter + JS_ASSERT(!(sprop->attrs & JSPROP_READONLY)); JS_ASSERT(!JSVAL_IS_PRIMITIVE(l)); JSObject* obj = JSVAL_TO_OBJECT(l); LIns* obj_ins = get(&l); JSScope* scope = OBJ_SCOPE(obj); - JS_ASSERT(scope->owned()); - JS_ASSERT(scope->has(sprop)); - - if (!isValidSlot(scope, sprop)) - return JSRS_STOP; + JS_ASSERT_IF(entry->vcap == PCVCAP_MAKE(entry->kshape, 0, 0), scope->has(sprop)); /* * Setting a function-valued property might need to rebrand the object; we @@ -9292,57 +9458,54 @@ TraceRecorder::record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop * separating functions into the trace-time type TT_FUNCTION will save the * day! */ - if (scope->branded() && VALUE_IS_FUNCTION(cx, r)) + if (scope->branded() && VALUE_IS_FUNCTION(cx, v)) ABORT_TRACE("can't trace function-valued property set in branded scope"); - if (obj == globalObj) { - JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, scope)); - uint32 slot = sprop->slot; - if (!lazilyImportGlobalSlot(slot)) - ABORT_TRACE("lazy import of global slot failed"); + // Find obj2. If entry->adding(), the TAG bits are all 0. + JSObject* obj2 = obj; + for (jsuword i = PCVCAP_TAG(entry->vcap) >> PCVCAP_PROTOBITS; i; i--) + obj2 = OBJ_GET_PARENT(cx, obj2); + for (jsuword j = PCVCAP_TAG(entry->vcap) & PCVCAP_PROTOMASK; j; j--) + obj2 = OBJ_GET_PROTO(cx, obj2); + scope = OBJ_SCOPE(obj2); + JS_ASSERT_IF(entry->adding(), obj2 == obj); - LIns* r_ins = get(&r); - set(&STOBJ_GET_SLOT(obj, slot), r_ins); - - JS_ASSERT(*pc != JSOP_INITPROP); - if (pc[JSOP_SETPROP_LENGTH] != JSOP_POP) - set(&l, r_ins); - return JSRS_CONTINUE; - } - - // The global object's shape is guarded at trace entry, all others need a guard here. + // Guard before anything else. LIns* map_ins = map(obj_ins); - LIns* ops_ins; - if (!map_is_native(obj->map, map_ins, ops_ins, offsetof(JSObjectOps, setProperty))) - ABORT_TRACE("non-native map"); + CHECK_STATUS(guardNativePropertyOp(obj, map_ins)); + jsuword pcval; + CHECK_STATUS(guardPropertyCacheHit(obj_ins, map_ins, obj, obj2, entry, pcval)); + JS_ASSERT(scope->object == obj2); + JS_ASSERT(scope->has(sprop)); + JS_ASSERT_IF(obj2 != obj, sprop->attrs & JSPROP_SHARED); - LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)), "shape"); - guard(true, - addName(lir->ins2i(LIR_eq, shape_ins, entry->kshape), "guard(kshape)(record_SetPropHit)"), - BRANCH_EXIT); - - uint32 vshape = PCVCAP_SHAPE(entry->vcap); - if (entry->kshape != vshape) { - LIns *vshape_ins = lir->insLoad(LIR_ld, - lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, runtime)), - offsetof(JSRuntime, protoHazardShape)); - guard(true, - addName(lir->ins2i(LIR_eq, vshape_ins, vshape), "guard(vshape)(record_SetPropHit)"), - MISMATCH_EXIT); + // Add a property to the object if necessary. + if (entry->adding()) { + JS_ASSERT(!(sprop->attrs & JSPROP_SHARED)); + if (obj == globalObj) + ABORT_TRACE("adding a property to the global object"); LIns* args[] = { INS_CONSTPTR(sprop), obj_ins, cx_ins }; LIns* ok_ins = lir->insCall(&js_AddProperty_ci, args); guard(false, lir->ins_eq0(ok_ins), OOM_EXIT); } - LIns* dslots_ins = NULL; - LIns* v_ins = get(&r); - LIns* boxed_ins = v_ins; - box_jsval(r, boxed_ins); - CHECK_STATUS(native_set(obj_ins, sprop, dslots_ins, boxed_ins)); + v_ins = get(&v); + return nativeSet(obj, obj_ins, sprop, v, v_ins); +} +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop) +{ + jsval& r = stackval(-1); + jsval& l = stackval(-2); + LIns* v_ins; + CHECK_STATUS(setProp(l, entry, sprop, r, v_ins)); + + jsbytecode* pc = cx->fp->regs->pc; if (*pc != JSOP_INITPROP && pc[JSOP_SETPROP_LENGTH] != JSOP_POP) set(&l, v_ins); + return JSRS_CONTINUE; } @@ -10097,22 +10260,24 @@ TraceRecorder::record_NativeCallComplete() jsbytecode* pc = cx->fp->regs->pc; JS_ASSERT(pendingTraceableNative); - JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW); + JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW || *pc == JSOP_SETPROP); jsval& v = stackval(-1); LIns* v_ins = get(&v); - /* At this point the generated code has already called the native function - and we can no longer fail back to the original pc location (JSOP_CALL) - because that would cause the interpreter to re-execute the native - function, which might have side effects. - - Instead, the snapshot() call below sees that we are currently parked on - a traceable native's JSOP_CALL instruction, and it will advance the pc - to restore by the length of the current opcode. If the native's return - type is jsval, snapshot() will also indicate in the type map that the - element on top of the stack is a boxed value which doesn't need to be - boxed if the type guard generated by unbox_jsval() fails. */ + /* + * At this point the generated code has already called the native function + * and we can no longer fail back to the original pc location (JSOP_CALL) + * because that would cause the interpreter to re-execute the native + * function, which might have side effects. + * + * Instead, the snapshot() call below sees that we are currently parked on + * a traceable native's JSOP_CALL instruction, and it will advance the pc + * to restore by the length of the current opcode. If the native's return + * type is jsval, snapshot() will also indicate in the type map that the + * element on top of the stack is a boxed value which doesn't need to be + * boxed if the type guard generated by unbox_jsval() fails. + */ if (JSTN_ERRTYPE(pendingTraceableNative) == FAIL_STATUS) { // Keep cx->bailExit null when it's invalid. @@ -10145,27 +10310,9 @@ TraceRecorder::record_NativeCallComplete() } set(&v, v_ins); - /* - * If this is a generic traceable native invocation, propagate the boolean return - * value of the native into builtinStatus. If the return value (v_ins) - * is true, status' == status. Otherwise status' = status | JSBUILTIN_ERROR. - * We calculate (rval&1)^1, which is 1 if rval is JS_FALSE (error), and then - * shift that by 1 which is JSBUILTIN_ERROR. - */ - JS_STATIC_ASSERT((1 - JS_TRUE) << 1 == 0); - JS_STATIC_ASSERT((1 - JS_FALSE) << 1 == JSBUILTIN_ERROR); - status = lir->ins2(LIR_or, - status, - lir->ins2i(LIR_lsh, - lir->ins2i(LIR_xor, - lir->ins2i(LIR_and, ok_ins, 1), - 1), - 1)); - lir->insStorei(status, lirbuf->state, (int) offsetof(InterpState, builtinStatus)); + propagateFailureToBuiltinStatus(ok_ins, status); } - guard(true, - lir->ins_eq0(status), - STATUS_EXIT); + guard(true, lir->ins_eq0(status), STATUS_EXIT); } JSRecordingStatus ok = JSRS_CONTINUE; @@ -10303,9 +10450,8 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32& slot, LIns*& v_ins) return JSRS_CONTINUE; } - /* Insist if setting on obj being the directly addressed object. */ - uint32 setflags = (cs.format & (JOF_SET | JOF_INCDEC | JOF_FOR)); - LIns* dslots_ins = NULL; + uint32 setflags = (cs.format & (JOF_INCDEC | JOF_FOR)); + JS_ASSERT(!(cs.format & JOF_SET)); /* Don't trace getter or setter calls, our caller wants a direct slot. */ if (PCVAL_IS_SPROP(pcval)) { @@ -10364,11 +10510,12 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32& slot, LIns*& v_ins) * obj_ins the last proto-load. */ while (obj != obj2) { - obj_ins = stobj_get_slot(obj_ins, JSSLOT_PROTO, dslots_ins); + obj_ins = stobj_get_fslot(obj_ins, JSSLOT_PROTO); obj = STOBJ_GET_PROTO(obj); } } + LIns* dslots_ins = NULL; v_ins = stobj_get_slot(obj_ins, slot, dslots_ins); unbox_jsval(STOBJ_GET_SLOT(obj, slot), v_ins, snapshot(BRANCH_EXIT)); diff --git a/js/src/jstracer.h b/js/src/jstracer.h index 82a1b13cdb43..61dca54ecd8c 100644 --- a/js/src/jstracer.h +++ b/js/src/jstracer.h @@ -687,6 +687,15 @@ class TraceRecorder : public avmplus::GCObject { nanojit::LIns*& ops_ins, size_t op_offset = 0); JS_REQUIRES_STACK JSRecordingStatus test_property_cache(JSObject* obj, nanojit::LIns* obj_ins, JSObject*& obj2, jsuword& pcval); + JS_REQUIRES_STACK JSRecordingStatus guardNativePropertyOp(JSObject* aobj, + nanojit::LIns* map_ins); + JS_REQUIRES_STACK JSRecordingStatus guardPropertyCacheHit(nanojit::LIns* obj_ins, + nanojit::LIns* map_ins, + JSObject* aobj, + JSObject* obj2, + JSPropCacheEntry* entry, + jsuword& pcval); + void stobj_set_fslot(nanojit::LIns *obj_ins, unsigned slot, nanojit::LIns* v_ins, const char *name); void stobj_set_dslot(nanojit::LIns *obj_ins, unsigned slot, nanojit::LIns*& dslots_ins, @@ -704,8 +713,6 @@ class TraceRecorder : public avmplus::GCObject { stobj_get_fslot(obj_ins, JSSLOT_PRIVATE), lir->insImmPtr((void*) ~mask)); } - JSRecordingStatus native_set(nanojit::LIns* obj_ins, JSScopeProperty* sprop, - nanojit::LIns*& dslots_ins, nanojit::LIns* v_ins); JSRecordingStatus native_get(nanojit::LIns* obj_ins, nanojit::LIns* pobj_ins, JSScopeProperty* sprop, nanojit::LIns*& dslots_ins, nanojit::LIns*& v_ins); @@ -722,6 +729,13 @@ class TraceRecorder : public avmplus::GCObject { JS_REQUIRES_STACK JSRecordingStatus getProp(jsval& v); JS_REQUIRES_STACK JSRecordingStatus getThis(nanojit::LIns*& this_ins); + JS_REQUIRES_STACK JSRecordingStatus nativeSet(JSObject* obj, nanojit::LIns* obj_ins, + JSScopeProperty* sprop, + jsval v, nanojit::LIns* v_ins); + JS_REQUIRES_STACK JSRecordingStatus setProp(jsval &l, JSPropCacheEntry* entry, + JSScopeProperty* sprop, + jsval &v, nanojit::LIns*& v_ins); + JS_REQUIRES_STACK void box_jsval(jsval v, nanojit::LIns*& v_ins); JS_REQUIRES_STACK void unbox_jsval(jsval v, nanojit::LIns*& v_ins, VMSideExit* exit); JS_REQUIRES_STACK bool guardClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp, @@ -748,8 +762,15 @@ class TraceRecorder : public avmplus::GCObject { jsval* rval); JS_REQUIRES_STACK JSRecordingStatus interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc, bool constructing); + JS_REQUIRES_STACK void propagateFailureToBuiltinStatus(nanojit::LIns *ok_ins, + nanojit::LIns *&status_ins); JS_REQUIRES_STACK JSRecordingStatus emitNativeCall(JSTraceableNative* known, uintN argc, nanojit::LIns* args[]); + JS_REQUIRES_STACK void emitNativePropertyOp(JSScope* scope, + JSScopeProperty* sprop, + nanojit::LIns* obj_ins, + bool setflag, + nanojit::LIns* boxed_ins); JS_REQUIRES_STACK JSRecordingStatus callTraceableNative(JSFunction* fun, uintN argc, bool constructing); JS_REQUIRES_STACK JSRecordingStatus callNative(uintN argc, JSOp mode); diff --git a/js/src/trace-test.js b/js/src/trace-test.js index df5f7746560f..5525267faef4 100644 --- a/js/src/trace-test.js +++ b/js/src/trace-test.js @@ -5497,6 +5497,21 @@ testOwnPropertyWithInOperator.jitstats = { }; test(testEliminatedGuardWithinAnchor); +function testNativeSetter() { + var re = /foo/; + var N = RUNLOOP + 10; + for (var i = 0; i < N; i++) + re.lastIndex = i; + assertEq(re.lastIndex, N - 1); +} +testNativeSetter.jitstats = { + recorderStarted: 1, + recorderAborted: 0, + traceTriggered: 1, + sideExitIntoInterpreter: 1 +}; +test(testNativeSetter); + /***************************************************************************** * * * _____ _ _ _____ ______ _____ _______ * From f9b40174d451439d0a38f8929ad12c7811f3c474 Mon Sep 17 00:00:00 2001 From: David Mandelin Date: Mon, 27 Jul 2009 18:13:53 -0700 Subject: [PATCH 08/19] Bug 495329: Trace JSOP_BINDNAME/JSOP_SETNAME for closures, r=brendan --- js/src/jsbuiltins.h | 5 +++++ js/src/jsfun.cpp | 19 +++++++++++++++++-- js/src/jsfun.h | 20 +++++++++++++++++++- js/src/jstracer.cpp | 41 ++++++++++++++++++++++++++++++++++++----- 4 files changed, 77 insertions(+), 8 deletions(-) diff --git a/js/src/jsbuiltins.h b/js/src/jsbuiltins.h index baef4f07be6e..d576e32424fc 100644 --- a/js/src/jsbuiltins.h +++ b/js/src/jsbuiltins.h @@ -187,6 +187,7 @@ struct JSTraceableNative { #define _JS_CTYPE_JSVAL _JS_JSVAL_CTYPE( _JS_PTR, "","v", INFALLIBLE) #define _JS_CTYPE_JSVAL_RETRY _JS_JSVAL_CTYPE( _JS_PTR, --, --, FAIL_COOKIE) #define _JS_CTYPE_JSVAL_FAIL _JS_JSVAL_CTYPE( _JS_PTR, --, --, FAIL_STATUS) +#define _JS_CTYPE_JSID _JS_CTYPE(jsid, _JS_PTR, --, --, INFALLIBLE) #define _JS_CTYPE_BOOL _JS_CTYPE(JSBool, _JS_I32, "","i", INFALLIBLE) #define _JS_CTYPE_BOOL_RETRY _JS_CTYPE(JSBool, _JS_I32, --, --, FAIL_VOID) #define _JS_CTYPE_BOOL_FAIL _JS_CTYPE(JSBool, _JS_I32, --, --, FAIL_STATUS) @@ -444,6 +445,10 @@ JS_DECLARE_CALLINFO(js_ArrayCompPush) JS_DECLARE_CALLINFO(js_AllocFlatClosure) JS_DECLARE_CALLINFO(js_PutArguments) +/* Defined in jsfun.cpp. */ +JS_DECLARE_CALLINFO(js_SetCallVar) +JS_DECLARE_CALLINFO(js_SetCallArg) + /* Defined in jsnum.cpp. */ JS_DECLARE_CALLINFO(js_NumberToString) diff --git a/js/src/jsfun.cpp b/js/src/jsfun.cpp index c3076d055a0a..90fc581a95c0 100644 --- a/js/src/jsfun.cpp +++ b/js/src/jsfun.cpp @@ -1133,7 +1133,7 @@ js_GetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp) return CallPropertyOp(cx, obj, id, vp, JSCPK_ARG, JS_FALSE); } -static JSBool +JSBool SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp) { return CallPropertyOp(cx, obj, id, vp, JSCPK_ARG, JS_TRUE); @@ -1154,12 +1154,27 @@ js_GetCallVarChecked(JSContext *cx, JSObject *obj, jsid id, jsval *vp) return CheckForEscapingClosure(cx, obj, vp); } -static JSBool +JSBool SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval *vp) { return CallPropertyOp(cx, obj, id, vp, JSCPK_VAR, JS_TRUE); } +JSBool JS_FASTCALL +js_SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval v) +{ + return CallPropertyOp(cx, obj, id, &v, JSCPK_ARG, JS_TRUE); +} + +JSBool JS_FASTCALL +js_SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval v) +{ + return CallPropertyOp(cx, obj, id, &v, JSCPK_VAR, JS_TRUE); +} + +JS_DEFINE_CALLINFO_4(extern, BOOL, js_SetCallArg, CONTEXT, OBJECT, JSID, JSVAL, 0, 0) +JS_DEFINE_CALLINFO_4(extern, BOOL, js_SetCallVar, CONTEXT, OBJECT, JSID, JSVAL, 0, 0) + static JSBool call_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags, JSObject **objp) diff --git a/js/src/jsfun.h b/js/src/jsfun.h index 6be8fb2a0976..9a651cf2958c 100644 --- a/js/src/jsfun.h +++ b/js/src/jsfun.h @@ -280,7 +280,25 @@ extern JSBool js_GetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp); extern JSBool -js_GetCallVar(JSContext *cx, JSObject *obj, jsval id, jsval *vp); +js_GetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval *vp); + +extern JSBool +SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp); + +extern JSBool +SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval *vp); + +/* + * js_SetCallArg and js_SetCallVar are extern fastcall copies of the setter + * functions. These versions are required in order to set call vars from traces. + * The normal versions must not be fastcall because they are stored in the + * property ops map. + */ +extern JSBool JS_FASTCALL +js_SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval v); + +extern JSBool JS_FASTCALL +js_SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval v); /* * Slower version of js_GetCallVar used when call_resolve detects an attempt to diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index dba2449d3bee..8ecbd8a80bcb 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -9452,6 +9452,29 @@ TraceRecorder::setProp(jsval &l, JSPropCacheEntry* entry, JSScopeProperty* sprop JS_ASSERT_IF(entry->vcap == PCVCAP_MAKE(entry->kshape, 0, 0), scope->has(sprop)); + // Fast path for CallClass. This is about 20% faster than the general case. + if (OBJ_GET_CLASS(cx, obj) == &js_CallClass) { + const CallInfo* ci = NULL; + if (sprop->setter == SetCallArg) + ci = &js_SetCallArg_ci; + else if (sprop->setter == SetCallVar) + ci = &js_SetCallVar_ci; + else + ABORT_TRACE("can't trace special CallClass setter"); + + LIns* v_ins = get(&v); + box_jsval(v, v_ins); + LIns* args[] = { + v_ins, + INS_CONST(SPROP_USERID(sprop)), + obj_ins, + cx_ins + }; + LIns* call_ins = lir->insCall(ci, args); + guard(false, addName(lir->ins_eq0(call_ins), "guard(set upvar)"), STATUS_EXIT); + return JSRS_CONTINUE; + } + /* * Setting a function-valued property might need to rebrand the object; we * don't trace that case. There's no need to guard on that, though, because @@ -11067,8 +11090,14 @@ TraceRecorder::record_JSOP_BINDNAME() } } - if (obj != globalObj) - ABORT_TRACE("JSOP_BINDNAME must return global object on trace"); + /* + * If obj is a js_CallClass object, then we are tracing a reference to an + * upvar in a heavyweight function. We cannot reach this point of the trace + * with a different call object because of the guard on the function call, + * so we can assume the result of the bindname is constant on this trace. + */ + if (obj != globalObj && OBJ_GET_CLASS(cx, obj) != &js_CallClass) + ABORT_TRACE("Can only trace JSOP_BINDNAME with global or call object"); // The trace is specialized to this global object. Furthermore, // we know it is the sole 'global' object on the scope chain: we @@ -11076,7 +11105,7 @@ TraceRecorder::record_JSOP_BINDNAME() // reached it starting from the function closure or the current // scopeChain, so there is nothing inner to it. So this must be // the right base object. - stack(0, INS_CONSTPTR(globalObj)); + stack(0, INS_CONSTPTR(obj)); return JSRS_CONTINUE; } @@ -11087,10 +11116,12 @@ TraceRecorder::record_JSOP_SETNAME() JS_ASSERT(!JSVAL_IS_PRIMITIVE(l)); /* - * Trace cases that are global code or in lightweight functions scoped by - * the global object only. + * Trace only cases that are global code, in lightweight functions + * scoped by the global object only, or in call objects. */ JSObject* obj = JSVAL_TO_OBJECT(l); + if (OBJ_GET_CLASS(cx, obj) == &js_CallClass) + return JSRS_CONTINUE; if (obj != cx->fp->scopeChain || obj != globalObj) ABORT_TRACE("JSOP_SETNAME left operand is not the global object"); From 5c1ca3e00a8aeee5671b21d479a76bbe8ba4ff55 Mon Sep 17 00:00:00 2001 From: David Mandelin Date: Mon, 27 Jul 2009 18:40:12 -0700 Subject: [PATCH 09/19] Bug 506821: clean up trailing whitespace, r=brendan --- js/src/jsapi.cpp | 12 ++++++------ js/src/jsapi.h | 6 +++--- js/src/jsarray.cpp | 4 ++-- js/src/jsbuiltins.cpp | 2 +- js/src/jsbuiltins.h | 6 +++--- js/src/jscntxt.cpp | 8 ++++---- js/src/jsfun.h | 2 +- js/src/jsinterp.cpp | 8 ++++---- js/src/jsinterp.h | 6 +++--- js/src/jsinttypes.h | 2 +- js/src/jsmath.cpp | 2 +- js/src/json.cpp | 22 +++++++++++----------- js/src/jsparse.cpp | 2 +- js/src/jspubtd.h | 4 ++-- js/src/jsregexp.cpp | 6 +++--- js/src/jsscan.cpp | 14 +++++++------- js/src/jsscope.cpp | 4 ++-- js/src/jsstdint.h | 2 +- js/src/jstracer.cpp | 34 +++++++++++++++++----------------- js/src/jstracer.h | 12 ++++++------ js/src/prmjtime.cpp | 12 ++++++------ js/src/resource.h | 2 +- 22 files changed, 86 insertions(+), 86 deletions(-) diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp index 93d3a2e542a6..f93cd95bf3c3 100644 --- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -1328,7 +1328,7 @@ JS_InitStandardClasses(JSContext *cx, JSObject *obj) /* Define a top-level property 'undefined' with the undefined value. */ atom = cx->runtime->atomState.typeAtoms[JSTYPE_VOID]; if (!OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID, - JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, + JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, NULL)) { return JS_FALSE; } @@ -1535,7 +1535,7 @@ JS_ResolveStandardClass(JSContext *cx, JSObject *obj, jsval id, if (idstr == ATOM_TO_STRING(atom)) { *resolved = JS_TRUE; return OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID, - JS_PropertyStub, JS_PropertyStub, + JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, NULL); } @@ -1630,7 +1630,7 @@ JS_EnumerateStandardClasses(JSContext *cx, JSObject *obj) atom = rt->atomState.typeAtoms[JSTYPE_VOID]; if (!AlreadyHasOwnProperty(cx, obj, atom) && !OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID, - JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, + JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT, NULL)) { return JS_FALSE; } @@ -3001,7 +3001,7 @@ DefinePropertyById(JSContext *cx, JSObject *obj, jsid id, jsval value, attrs, flags, tinyid, NULL); } return OBJ_DEFINE_PROPERTY(cx, obj, id, value, getter, setter, attrs, - NULL); + NULL); } static JSBool @@ -3720,7 +3720,7 @@ JS_HasUCProperty(JSContext *cx, JSObject *obj, JSProperty *prop; CHECK_REQUEST(cx); - ok = LookupUCProperty(cx, obj, name, namelen, + ok = LookupUCProperty(cx, obj, name, namelen, JSRESOLVE_QUALIFIED | JSRESOLVE_DETECTING, &obj2, &prop); if (ok) { @@ -5197,7 +5197,7 @@ JS_SetOperationCallback(JSContext *cx, JSOperationCallback callback) { #ifdef JS_THREADSAFE JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread)); -#endif +#endif JSOperationCallback old = cx->operationCallback; cx->operationCallback = callback; return old; diff --git a/js/src/jsapi.h b/js/src/jsapi.h index 8d6de69f2301..2a6a422331d0 100644 --- a/js/src/jsapi.h +++ b/js/src/jsapi.h @@ -2298,14 +2298,14 @@ JS_CallFunctionValue(JSContext *cx, JSObject *obj, jsval fval, uintN argc, * These functions allow setting an operation callback that will be called * from the thread the context is associated with some time after any thread * triggered the callback using JS_TriggerOperationCallback(cx). - * + * * In a threadsafe build the engine internally triggers operation callbacks * under certain circumstances (i.e. GC and title transfer) to force the - * context to yield its current request, which the engine always + * context to yield its current request, which the engine always * automatically does immediately prior to calling the callback function. * The embedding should thus not rely on callbacks being triggered through * the external API only. - * + * * Important note: Additional callbacks can occur inside the callback handler * if it re-enters the JS engine. The embedding must ensure that the callback * is disconnected before attempting such re-entry. diff --git a/js/src/jsarray.cpp b/js/src/jsarray.cpp index 2d121e3ddc4b..d3bff3916b4d 100644 --- a/js/src/jsarray.cpp +++ b/js/src/jsarray.cpp @@ -1368,7 +1368,7 @@ array_toSource(JSContext *cx, uintN argc, jsval *vp) JSBool ok = JS_TRUE; /* - * This object will take responsibility for the jschar buffer until the + * This object will take responsibility for the jschar buffer until the * buffer is transferred to the returned JSString. */ JSTempVector buf(cx); @@ -1520,7 +1520,7 @@ array_toString_sub(JSContext *cx, JSObject *obj, JSBool locale, } /* - * This object will take responsibility for the jschar buffer until the + * This object will take responsibility for the jschar buffer until the * buffer is transferred to the returned JSString. */ JSTempVector buf(cx); diff --git a/js/src/jsbuiltins.cpp b/js/src/jsbuiltins.cpp index fe088a03aad8..1f1408bf460c 100644 --- a/js/src/jsbuiltins.cpp +++ b/js/src/jsbuiltins.cpp @@ -129,7 +129,7 @@ js_BoxInt32(JSContext* cx, int32 i) if (!js_NewDoubleInRootedValue(cx, d, &v)) return JSVAL_ERROR_COOKIE; return v; -} +} JS_DEFINE_CALLINFO_2(extern, JSVAL, js_BoxInt32, CONTEXT, INT32, 1, 1) jsdouble FASTCALL diff --git a/js/src/jsbuiltins.h b/js/src/jsbuiltins.h index d576e32424fc..96a1c904f34f 100644 --- a/js/src/jsbuiltins.h +++ b/js/src/jsbuiltins.h @@ -86,7 +86,7 @@ struct JSTraceableNative { const nanojit::CallInfo *builtin; const char *prefix; const char *argtypes; - uintN flags; /* JSTNErrType | JSTN_UNBOX_AFTER | JSTN_MORE | + uintN flags; /* JSTNErrType | JSTN_UNBOX_AFTER | JSTN_MORE | JSTN_CONSTRUCTOR */ }; @@ -117,7 +117,7 @@ struct JSTraceableNative { #endif /* - * Supported types for builtin functions. + * Supported types for builtin functions. * * Types with -- for the two string fields are not permitted as argument types * in JS_DEFINE_TRCINFO. @@ -165,7 +165,7 @@ struct JSTraceableNative { * trace. If an exception is pending, it is thrown; otherwise, we assume the * builtin had no side effects and retry the current bytecode in the * interpreter. - * + * * So a builtin must not return a value indicating failure after causing side * effects (such as reporting an error), without setting an exception pending. * The operation would be retried, despite the first attempt's observable diff --git a/js/src/jscntxt.cpp b/js/src/jscntxt.cpp index 9ce2db55eb7d..6bcd70f97f3f 100644 --- a/js/src/jscntxt.cpp +++ b/js/src/jscntxt.cpp @@ -819,7 +819,7 @@ js_NextActiveContext(JSRuntime *rt, JSContext *cx) return cx; #else return js_ContextIterator(rt, JS_FALSE, &iter); -#endif +#endif } #ifdef JS_THREADSAFE @@ -1738,10 +1738,10 @@ JSBool js_InvokeOperationCallback(JSContext *cx) { JS_ASSERT(cx->operationCallbackFlag); - + /* * Reset the callback flag first, then yield. If another thread is racing - * us here we will accumulate another callback request which will be + * us here we will accumulate another callback request which will be * serviced at the next opportunity. */ cx->operationCallbackFlag = 0; @@ -1755,7 +1755,7 @@ js_InvokeOperationCallback(JSContext *cx) */ if (cx->runtime->gcIsNeeded) js_GC(cx, GC_NORMAL); -#ifdef JS_THREADSAFE +#ifdef JS_THREADSAFE else JS_YieldRequest(cx); #endif diff --git a/js/src/jsfun.h b/js/src/jsfun.h index 9a651cf2958c..cbf76932d6c8 100644 --- a/js/src/jsfun.h +++ b/js/src/jsfun.h @@ -289,7 +289,7 @@ extern JSBool SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval *vp); /* - * js_SetCallArg and js_SetCallVar are extern fastcall copies of the setter + * js_SetCallArg and js_SetCallVar are extern fastcall copies of the setter * functions. These versions are required in order to set call vars from traces. * The normal versions must not be fastcall because they are stored in the * property ops map. diff --git a/js/src/jsinterp.cpp b/js/src/jsinterp.cpp index 25506179975c..d77602e4782f 100644 --- a/js/src/jsinterp.cpp +++ b/js/src/jsinterp.cpp @@ -1534,7 +1534,7 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script, js_LeaveTrace(cx); #ifdef JS_TRACER - /* + /* * The JIT requires that the scope chain here is equal to its global * object. Disable the JIT for this call if this condition is not true. */ @@ -2095,7 +2095,7 @@ js_GetUpvar(JSContext *cx, uintN level, uintN cookie) } else if (slot == CALLEE_UPVAR_SLOT) { vp = &fp->argv[-2]; slot = 0; - } else { + } else { slot -= fp->fun->nargs; JS_ASSERT(slot < fp->script->nslots); vp = fp->slots; @@ -2132,7 +2132,7 @@ js_TraceOpcode(JSContext *cx) fp->script, cx->tracePrevPc); /* - * If there aren't that many elements on the stack, then + * If there aren't that many elements on the stack, then * we have probably entered a new frame, and printing output * would just be misleading. */ @@ -2717,7 +2717,7 @@ js_Interpret(JSContext *cx) * 'op=x; DO_OP()' to let another opcode's implementation finish * their work, and many opcodes share entry points with a run of * consecutive BEGIN_CASEs. - * + * * Take care to trace OP only when it is the opcode fetched from * the instruction stream, so the trace matches what one would * expect from looking at the code. (We do omit POPs after SETs; diff --git a/js/src/jsinterp.h b/js/src/jsinterp.h index 23e77a7171ef..5190a06114a0 100644 --- a/js/src/jsinterp.h +++ b/js/src/jsinterp.h @@ -90,7 +90,7 @@ struct JSStackFrame { * variables on the stack initially, note when they are closed * over, and copy those that are out to the heap when we leave * their dynamic scope. - * + * * The bytecode compiler produces a tree of block objects * accompanying each JSScript representing those lexical blocks in * the script that have let-bound variables associated with them. @@ -102,7 +102,7 @@ struct JSStackFrame { * When we are in the static scope of such a block, blockChain * points to its compiler-allocated block object; otherwise, it is * NULL. - * + * * scopeChain is the current scope chain, including 'call' and * 'block' objects for those function calls and lexical blocks * whose static scope we are currently executing in, and 'with' @@ -158,7 +158,7 @@ static JS_INLINE uintN GlobalVarCount(JSStackFrame *fp) { uintN n; - + JS_ASSERT(!fp->fun); n = fp->script->nfixed; if (fp->script->regexpsOffset != 0) diff --git a/js/src/jsinttypes.h b/js/src/jsinttypes.h index e239551f90c3..7e06b3edf0d5 100644 --- a/js/src/jsinttypes.h +++ b/js/src/jsinttypes.h @@ -43,7 +43,7 @@ * Types: * JSInt, JSUint (for = 8, 16, 32, and 64) * JSIntPtr, JSUIntPtr - * + * * JSInt and JSUint are signed and unsigned types known to be * bits long. Note that neither JSInt8 nor JSUInt8 is necessarily * equivalent to a plain "char". diff --git a/js/src/jsmath.cpp b/js/src/jsmath.cpp index daed88393b99..dd6fd51665c1 100644 --- a/js/src/jsmath.cpp +++ b/js/src/jsmath.cpp @@ -231,7 +231,7 @@ static inline jsdouble JS_FASTCALL math_ceil_kernel(jsdouble x) { #ifdef __APPLE__ - if (x < 0 && x > -1.0) + if (x < 0 && x > -1.0) return js_copysign(0, -1); #endif return ceil(x); diff --git a/js/src/json.cpp b/js/src/json.cpp index 7f7c2f2dfe43..43bf3ab03259 100644 --- a/js/src/json.cpp +++ b/js/src/json.cpp @@ -78,7 +78,7 @@ js_json_parse(JSContext *cx, uintN argc, jsval *vp) jsval *argv = vp + 2; jsval reviver = JSVAL_NULL; JSAutoTempValueRooter(cx, 1, &reviver); - + if (!JS_ConvertArguments(cx, argc, argv, "S / v", &s, &reviver)) return JS_FALSE; @@ -523,7 +523,7 @@ Str(JSContext *cx, jsid id, JSObject *holder, StringifyContext *scx, jsval *vp, char numBuf[DTOSTR_STANDARD_BUFFER_SIZE], *numStr; jsdouble d = JSVAL_IS_INT(*vp) ? jsdouble(JSVAL_TO_INT(*vp)) : *JSVAL_TO_DOUBLE(*vp); - numStr = JS_dtostr(numBuf, sizeof numBuf, DTOSTR_STANDARD, 0, d); + numStr = JS_dtostr(numBuf, sizeof numBuf, DTOSTR_STANDARD, 0, d); if (!numStr) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -546,7 +546,7 @@ Str(JSContext *cx, jsid id, JSObject *holder, StringifyContext *scx, jsval *vp, return ok; } - + *vp = JSVAL_VOID; return JS_TRUE; } @@ -640,7 +640,7 @@ static JSBool Walk(JSContext *cx, jsid id, JSObject *holder, jsval reviver, jsval *vp) { JS_CHECK_RECURSION(cx, return JS_FALSE); - + if (!OBJ_GET_PROPERTY(cx, holder, id, vp)) return JS_FALSE; @@ -649,7 +649,7 @@ Walk(JSContext *cx, jsid id, JSObject *holder, jsval reviver, jsval *vp) if (!JSVAL_IS_PRIMITIVE(*vp) && !js_IsCallable(obj = JSVAL_TO_OBJECT(*vp), cx)) { jsval propValue = JSVAL_NULL; JSAutoTempValueRooter tvr(cx, 1, &propValue); - + if(OBJ_IS_ARRAY(cx, obj)) { jsuint length = 0; if (!js_GetLengthProperty(cx, obj, &length)) @@ -713,7 +713,7 @@ Walk(JSContext *cx, jsid id, JSObject *holder, jsval reviver, jsval *vp) static JSBool Revive(JSContext *cx, jsval reviver, jsval *vp) { - + JSObject *obj = js_NewObject(cx, &js_ObjectClass, NULL, NULL); if (!obj) return JS_FALSE; @@ -820,7 +820,7 @@ PushState(JSContext *cx, JSONParser *jp, JSONParserState state) if (*jp->statep == JSON_PARSE_STATE_FINISHED) { // extra input JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_JSON_BAD_PARSE); - return JS_FALSE; + return JS_FALSE; } jp->statep++; @@ -993,10 +993,10 @@ HandleNumber(JSContext *cx, JSONParser *jp, const jschar *buf, uint32 len) return JS_FALSE; } - jsval numVal; + jsval numVal; if (!JS_NewNumberValue(cx, val, &numVal)) return JS_FALSE; - + return PushPrimitive(cx, jp, numVal); } @@ -1248,7 +1248,7 @@ js_ConsumeJSONText(JSContext *cx, JSONParser *jp, const jschar *data, uint32 len JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_JSON_BAD_PARSE); return JS_FALSE; } - + if (++(jp->numHex) == 4) { js_FastAppendChar(&jp->buffer, jp->hexChar); jp->hexChar = 0; @@ -1265,7 +1265,7 @@ js_ConsumeJSONText(JSContext *cx, JSONParser *jp, const jschar *data, uint32 len i--; if (!PopState(cx, jp)) return JS_FALSE; - + if (!HandleData(cx, jp, JSON_DATA_KEYWORD)) return JS_FALSE; } diff --git a/js/src/jsparse.cpp b/js/src/jsparse.cpp index 8d87d9df77e5..161dfbc94ae2 100644 --- a/js/src/jsparse.cpp +++ b/js/src/jsparse.cpp @@ -3802,7 +3802,7 @@ CheckDestructuring(JSContext *cx, BindData *data, /* * This is a greatly pared down version of CheckDestructuring that extends the * pn_pos.end source coordinate of each name in a destructuring binding such as - * + * * var [x, y] = [function () y, 42]; * * to cover its corresponding initializer, so that the initialized binding does diff --git a/js/src/jspubtd.h b/js/src/jspubtd.h index 6c28b136a792..c2068d65d07c 100644 --- a/js/src/jspubtd.h +++ b/js/src/jspubtd.h @@ -106,8 +106,8 @@ typedef enum JSAccessMode { JSACC_PROTO = 0, /* XXXbe redundant w.r.t. id */ JSACC_PARENT = 1, /* XXXbe redundant w.r.t. id */ - /* - * enum value #2 formerly called JSACC_IMPORT, + /* + * enum value #2 formerly called JSACC_IMPORT, * gap preserved for ABI compatibility. */ diff --git a/js/src/jsregexp.cpp b/js/src/jsregexp.cpp index 960593e235a1..05762c296b49 100644 --- a/js/src/jsregexp.cpp +++ b/js/src/jsregexp.cpp @@ -1997,7 +1997,7 @@ CompileRegExpToAST(JSContext* cx, JSTokenStream* ts, + GetCompactIndexWidth(len); return JS_TRUE; } - + return ParseRegExp(&state); } @@ -2410,7 +2410,7 @@ class RegExpNativeCompiler { LIns *branch = lir->insBranch(LIR_jt, test, 0); extras[i].match = branch; } - + fails.pushBack(lir->insBranch(LIR_jf, lir->ins2(LIR_eq, text_ch, lir->insImm(ch)), 0)); for (int i = 0; i < nextras; ++i) @@ -2418,7 +2418,7 @@ class RegExpNativeCompiler { return lir->ins2(LIR_piadd, pos, lir->insImm(2)); } - JS_INLINE bool hasCases(jschar ch) + JS_INLINE bool hasCases(jschar ch) { return JS_TOLOWER(ch) != JS_TOUPPER(ch); } diff --git a/js/src/jsscan.cpp b/js/src/jsscan.cpp index 4cab27e92684..37e50a80d85a 100644 --- a/js/src/jsscan.cpp +++ b/js/src/jsscan.cpp @@ -308,7 +308,7 @@ GetChar(JSTokenStream *ts) ts->flags |= TSF_EOF; return EOF; } - + /* Fill ts->userbuf so that \r and \r\n convert to \n. */ crflag = (ts->flags & TSF_CRFLAG) != 0; len = js_fgets(cbuf, JS_LINE_LIMIT - crflag, ts->file); @@ -336,7 +336,7 @@ GetChar(JSTokenStream *ts) ts->listener(ts->filename, ts->lineno, ts->userbuf.ptr, len, &ts->listenerTSData, ts->listenerData); } - + nl = ts->saveEOL; if (!nl) { /* @@ -362,7 +362,7 @@ GetChar(JSTokenStream *ts) } } } - + /* * If there was a line terminator, copy thru it into linebuf. * Else copy JS_LINE_LIMIT-1 bytes into linebuf. @@ -378,7 +378,7 @@ GetChar(JSTokenStream *ts) js_strncpy(ts->linebuf.base, ts->userbuf.ptr, len); ts->userbuf.ptr += len; olen = len; - + /* * Make sure linebuf contains \n for EOL (don't do this in * userbuf because the user's string might be readonly). @@ -420,11 +420,11 @@ GetChar(JSTokenStream *ts) ts->linebuf.base[len-1] = '\n'; } } - + /* Reset linebuf based on adjusted segment length. */ ts->linebuf.limit = ts->linebuf.base + len; ts->linebuf.ptr = ts->linebuf.base; - + /* Update position of linebuf within physical userbuf line. */ if (!(ts->flags & TSF_NLFLAG)) ts->linepos += ts->linelen; @@ -434,7 +434,7 @@ GetChar(JSTokenStream *ts) ts->flags |= TSF_NLFLAG; else ts->flags &= ~TSF_NLFLAG; - + /* Update linelen from original segment length. */ ts->linelen = olen; } diff --git a/js/src/jsscope.cpp b/js/src/jsscope.cpp index 7da51cd43f71..929f01f72715 100644 --- a/js/src/jsscope.cpp +++ b/js/src/jsscope.cpp @@ -1594,7 +1594,7 @@ JSScope::replacingShapeChange(JSContext *cx, JSScopeProperty *sprop, JSScopeProp { if (shape == sprop->shape) shape = newsprop->shape; - else + else generateOwnShape(cx); } @@ -1604,7 +1604,7 @@ JSScope::sealingShapeChange(JSContext *cx) generateOwnShape(cx); } -void +void JSScope::shadowingShapeChange(JSContext *cx, JSScopeProperty *sprop) { generateOwnShape(cx); diff --git a/js/src/jsstdint.h b/js/src/jsstdint.h index 1ce569aea1b5..247149bc592d 100644 --- a/js/src/jsstdint.h +++ b/js/src/jsstdint.h @@ -40,7 +40,7 @@ /* * This header provides definitions for the types we use, * even on systems that lack . - * + * * NOTE: This header should only be included in private SpiderMonkey * code; public headers should use only the JS{Int,Uint}N types; see * the comment for them in "jsinttypes.h". diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 8ecbd8a80bcb..f693fcd16847 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -1757,7 +1757,7 @@ TraceRecorder::isGlobal(jsval* p) const (size_t(p - globalObj->dslots) < (STOBJ_NSLOTS(globalObj) - JS_INITIAL_NSLOTS))); } -/* +/* * Return the offset in the native stack for the given jsval. More formally, * |p| must be the address of a jsval that is represented in the native stack * area. The return value is the offset, from InterpState::stackBase, in bytes, @@ -2240,7 +2240,7 @@ js_GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 cal } /* - * Generic function to read upvars from Call objects of active heavyweight functions. + * Generic function to read upvars from Call objects of active heavyweight functions. * callee Callee Function object in which the upvar is accessed. * scopeIndex Number of parent steps to make from |callee| to find upvar definition. * This must be at least 1 because |callee| is a Function and we must reach a Call. @@ -2249,7 +2249,7 @@ js_GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 cal */ template uint32 JS_INLINE -js_GetFromClosure(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, +js_GetFromClosure(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, double* result) { JS_ASSERT(scopeIndex >= 1); @@ -2305,7 +2305,7 @@ private: }; uint32 JS_FASTCALL -js_GetClosureArg(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, +js_GetClosureArg(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, double* result) { return js_GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); @@ -2320,7 +2320,7 @@ private: }; uint32 JS_FASTCALL -js_GetClosureVar(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, +js_GetClosureVar(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, double* result) { return js_GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); @@ -4788,14 +4788,14 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom if (e2->numGlobalSlots < e1->numGlobalSlots) { /* * Watch out for an extremely rare case (bug 502714). The sequence of events is: - * + * * 1) Inner tree compiles not knowing about global X (which has type A). * 2) Inner tree learns about global X and specializes it to a different type * (type B). * 3) Outer tree records inner tree with global X as type A, exiting as B. * 4) Outer tree now has a nesting guard with typeof(X)=B. * 5) Inner tree takes its original exit that does not know about X. - * + * * In this case, the nesting guard fails, and now it is illegal to use the nested * typemap entry for X. The correct entry is in the inner guard's TreeInfo, * analogous to the solution for bug 476653. @@ -6523,9 +6523,9 @@ TraceRecorder::frameIfInRange(JSObject* obj, unsigned* depthp) const return NULL; } -JS_DEFINE_CALLINFO_6(extern, UINT32, js_GetClosureVar, CONTEXT, OBJECT, UINT32, +JS_DEFINE_CALLINFO_6(extern, UINT32, js_GetClosureVar, CONTEXT, OBJECT, UINT32, UINT32, UINT32, DOUBLEPTR, 0, 0) -JS_DEFINE_CALLINFO_6(extern, UINT32, js_GetClosureArg, CONTEXT, OBJECT, UINT32, +JS_DEFINE_CALLINFO_6(extern, UINT32, js_GetClosureArg, CONTEXT, OBJECT, UINT32, UINT32, UINT32, DOUBLEPTR, 0, 0) /* @@ -6622,13 +6622,13 @@ TraceRecorder::scopeChainProp(JSObject* obj, jsval*& vp, LIns*& ins, bool& track LIns* callee_ins = get(&cx->fp->argv[-2]); LIns* outp = lir->insAlloc(sizeof(double)); - LIns* args[] = { + LIns* args[] = { outp, INS_CONST(callDepth), INS_CONST(slot), - INS_CONST(scopeIndex), + INS_CONST(scopeIndex), callee_ins, - cx_ins + cx_ins }; const CallInfo* ci; if (sprop->getter == js_GetCallArg) @@ -9700,7 +9700,7 @@ TraceRecorder::record_JSOP_GETELEM() typemap_ins = lir->ins2(LIR_add, fip_ins, INS_CONST(sizeof(FrameInfo) + 2/*callee,this*/ * sizeof(JSTraceType))); } - LIns* typep_ins = lir->ins2(LIR_add, typemap_ins, + LIns* typep_ins = lir->ins2(LIR_add, typemap_ins, lir->ins2(LIR_mul, idx_ins, INS_CONST(sizeof(JSTraceType)))); LIns* type_ins = lir->insLoad(LIR_ldcb, typep_ins, 0); guard(true, @@ -9709,7 +9709,7 @@ TraceRecorder::record_JSOP_GETELEM() BRANCH_EXIT); // Read the value out of the native stack area. - guard(true, lir->ins2(LIR_ult, idx_ins, INS_CONST(afp->argc)), + guard(true, lir->ins2(LIR_ult, idx_ins, INS_CONST(afp->argc)), snapshot(BRANCH_EXIT)); size_t stackOffset = -treeInfo->nativeStackBase + nativeStackOffset(&afp->argv[0]); LIns* args_addr_ins = lir->ins2(LIR_add, lirbuf->sp, INS_CONST(stackOffset)); @@ -9717,7 +9717,7 @@ TraceRecorder::record_JSOP_GETELEM() lir->ins2(LIR_mul, idx_ins, INS_CONST(sizeof(double)))); v_ins = stackLoad(argi_addr_ins, type); } else { - guard(false, lir->ins2(LIR_ult, idx_ins, INS_CONST(afp->argc)), + guard(false, lir->ins2(LIR_ult, idx_ins, INS_CONST(afp->argc)), snapshot(BRANCH_EXIT)); v_ins = INS_VOID(); } @@ -10225,7 +10225,7 @@ TraceRecorder::record_JSOP_APPLY() aobj = JSVAL_TO_OBJECT(vp[3]); aobj_ins = get(&vp[3]); - /* + /* * We trace dense arrays and arguments objects. The code we generate for apply * uses imacros to handle a specific number of arguments. */ @@ -11093,7 +11093,7 @@ TraceRecorder::record_JSOP_BINDNAME() /* * If obj is a js_CallClass object, then we are tracing a reference to an * upvar in a heavyweight function. We cannot reach this point of the trace - * with a different call object because of the guard on the function call, + * with a different call object because of the guard on the function call, * so we can assume the result of the bindname is constant on this trace. */ if (obj != globalObj && OBJ_GET_CLASS(cx, obj) != &js_CallClass) diff --git a/js/src/jstracer.h b/js/src/jstracer.h index 61dca54ecd8c..523ea7047457 100644 --- a/js/src/jstracer.h +++ b/js/src/jstracer.h @@ -281,7 +281,7 @@ typedef int8_t JSTraceType; /* * This indicates an invalid type or error. Note that it should not be used in typemaps, - * because it is the wrong size. It can only be used as a uint32, for example as the + * because it is the wrong size. It can only be used as a uint32, for example as the * return value from a function that returns a type as a uint32. */ const uint32 TT_INVALID = uint32(-1); @@ -332,7 +332,7 @@ public: _(TIMEOUT) \ _(DEEP_BAIL) \ _(STATUS) - + enum ExitType { #define MAKE_EXIT_CODE(x) x##_EXIT, @@ -408,7 +408,7 @@ struct FrameInfo { * stack frame for the caller *before* the slots covered by spdist. * This may be negative if the caller is the top level script. * The key fact is that if we let 'cpos' be the start of the caller's - * native stack frame, then (cpos + spoffset) points to the first + * native stack frame, then (cpos + spoffset) points to the first * non-argument slot in the callee's native stack frame. */ int32 spoffset; @@ -548,7 +548,7 @@ struct JSRecordingStatus JSRS_ERROR = { JSRS_ERROR_code }; #define STATUS_ABORTS_RECORDING(s) ((s) == JSRS_STOP || (s) == JSRS_ERROR) #else enum JSRecordingStatus { - JSRS_ERROR, // Error; propagate to interpreter. + JSRS_ERROR, // Error; propagate to interpreter. JSRS_STOP, // Abort recording. JSRS_CONTINUE, // Continue recording. JSRS_IMACRO // Entered imacro; continue recording. @@ -995,13 +995,13 @@ js_LogTraceVisState(TraceVisState s, TraceVisExitReason r) } } -static inline void +static inline void js_EnterTraceVisState(TraceVisState s, TraceVisExitReason r) { js_LogTraceVisState(s, r); } -static inline void +static inline void js_ExitTraceVisState(TraceVisExitReason r) { js_LogTraceVisState(S_EXITLAST, r); diff --git a/js/src/prmjtime.cpp b/js/src/prmjtime.cpp index 61b99b65bdcc..986f480a30be 100644 --- a/js/src/prmjtime.cpp +++ b/js/src/prmjtime.cpp @@ -109,7 +109,7 @@ PRMJ_LocalGMTDifference() #if defined(XP_WIN) && !defined(WINCE) /* Windows does not follow POSIX. Updates to the - * TZ environment variable are not reflected + * TZ environment variable are not reflected * immediately on that platform as they are * on UNIX systems without this call. */ @@ -170,8 +170,8 @@ static const JSInt64 win2un = JSLL_INIT(0x19DB1DE, 0xD53E8000); #if defined(HAVE_GETSYSTEMTIMEASFILETIME) inline void LowResTime(LPFILETIME lpft) -{ - GetSystemTimeAsFileTime(lpft); +{ + GetSystemTimeAsFileTime(lpft); } #elif defined(HAVE_SYSTEMTIMETOFILETIME) inline void @@ -229,9 +229,9 @@ NowCalibrate() LowResTime(&ft); } while (memcmp(&ftStart,&ft, sizeof(ft)) == 0); timeEndPeriod(1); - + #ifdef WINCE - calibration.granularity = (FILETIME2INT64(ft) - + calibration.granularity = (FILETIME2INT64(ft) - FILETIME2INT64(ftStart))/10; #endif /* @@ -581,7 +581,7 @@ PRMJ_DSTOffset(JSInt64 local_time) #if defined(XP_WIN) && !defined(WINCE) /* Windows does not follow POSIX. Updates to the - * TZ environment variable are not reflected + * TZ environment variable are not reflected * immediately on that platform as they are * on UNIX systems without this call. */ diff --git a/js/src/resource.h b/js/src/resource.h index 9301810e4448..59dbde3775e9 100644 --- a/js/src/resource.h +++ b/js/src/resource.h @@ -4,7 +4,7 @@ // // Next default values for new objects -// +// #ifdef APSTUDIO_INVOKED #ifndef APSTUDIO_READONLY_SYMBOLS #define _APS_NEXT_RESOURCE_VALUE 101 From dce181923ffc352aacb7a81f865b1ec06e26e862 Mon Sep 17 00:00:00 2001 From: David Mandelin Date: Mon, 27 Jul 2009 18:49:27 -0700 Subject: [PATCH 10/19] Fix incompatible enum warnings in static asserts --- js/src/jstracer.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index f693fcd16847..a2c45c0b627d 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -515,9 +515,9 @@ static inline JSTraceType getPromotedType(jsval v) } uint8_t tag = JSVAL_TAG(v); JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_BOOLEAN); - JS_STATIC_ASSERT(TT_DOUBLE == JSVAL_DOUBLE); - JS_STATIC_ASSERT(TT_STRING == JSVAL_STRING); - JS_STATIC_ASSERT(TT_PSEUDOBOOLEAN == JSVAL_BOOLEAN); + JS_STATIC_ASSERT(static_cast(TT_DOUBLE) == JSVAL_DOUBLE); + JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING); + JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_BOOLEAN); return JSTraceType(tag); } @@ -535,9 +535,9 @@ static inline JSTraceType getCoercedType(jsval v) } uint8_t tag = JSVAL_TAG(v); JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_BOOLEAN); - JS_STATIC_ASSERT(TT_DOUBLE == JSVAL_DOUBLE); - JS_STATIC_ASSERT(TT_STRING == JSVAL_STRING); - JS_STATIC_ASSERT(TT_PSEUDOBOOLEAN == JSVAL_BOOLEAN); + JS_STATIC_ASSERT(static_cast(TT_DOUBLE) == JSVAL_DOUBLE); + JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING); + JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_BOOLEAN); return JSTraceType(tag); } @@ -2895,8 +2895,8 @@ TraceRecorder::determineSlotType(jsval* vp) m = TT_OBJECT; } else { JS_ASSERT(JSVAL_TAG(*vp) == JSVAL_STRING || JSVAL_TAG(*vp) == JSVAL_BOOLEAN); - JS_STATIC_ASSERT(TT_STRING == JSVAL_STRING); - JS_STATIC_ASSERT(TT_PSEUDOBOOLEAN == JSVAL_BOOLEAN); + JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING); + JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_BOOLEAN); m = JSTraceType(JSVAL_TAG(*vp)); } JS_ASSERT(m != TT_INT32 || isInt32(*vp)); From d1254f2c2c17c11b6f40c2a2d570e91aeba53890 Mon Sep 17 00:00:00 2001 From: Andreas Gal Date: Mon, 27 Jul 2009 21:10:12 -0700 Subject: [PATCH 11/19] When finalizing, deallocate memory in a separate thread (505612, r=bent,brendan,waldo). --- js/src/Makefile.in | 2 + js/src/jsapi.cpp | 84 ++++++++++++----------------- js/src/jsarena.cpp | 12 ++--- js/src/jsarray.cpp | 28 +++++----- js/src/jsatom.cpp | 2 +- js/src/jscntxt.cpp | 66 +++++++++++------------ js/src/jscntxt.h | 104 ++++++++++++++++++++++++++++++++++-- js/src/jsdate.cpp | 2 +- js/src/jsdbgapi.cpp | 30 +++++------ js/src/jsdhash.cpp | 12 ++--- js/src/jsdtoa.cpp | 6 +-- js/src/jsemit.cpp | 23 ++++---- js/src/jsexn.cpp | 24 ++++----- js/src/jsfile.cpp | 127 ++++++++++++++++++++++---------------------- js/src/jsfun.cpp | 24 ++++----- js/src/jsgc.cpp | 30 +++++++---- js/src/jsgc.h | 23 ++++++++ js/src/jshash.cpp | 4 +- js/src/jsinterp.cpp | 8 +-- js/src/jsiter.cpp | 6 +-- js/src/jslock.cpp | 10 ++-- js/src/jsnum.cpp | 24 ++++----- js/src/jsobj.cpp | 42 +++++++-------- js/src/json.cpp | 7 ++- js/src/jsopcode.cpp | 86 +++++++++++++++--------------- js/src/jsparse.cpp | 4 +- js/src/jsprf.cpp | 20 +++---- js/src/jspubtd.h | 1 - js/src/jsregexp.cpp | 57 ++++++++++---------- js/src/jsscan.cpp | 26 ++++----- js/src/jsscope.cpp | 30 +++++------ js/src/jsscript.cpp | 32 +++++------ js/src/jsstr.cpp | 97 +++++++++++++++++---------------- js/src/jstask.cpp | 126 +++++++++++++++++++++++++++++++++++++++++++ js/src/jstask.h | 84 +++++++++++++++++++++++++++++ js/src/jstracer.cpp | 6 +-- js/src/jsutil.cpp | 2 +- js/src/jsutil.h | 25 ++++++++- js/src/jsxdrapi.cpp | 30 +++++------ js/src/jsxml.cpp | 46 ++++++++-------- 40 files changed, 859 insertions(+), 513 deletions(-) create mode 100644 js/src/jstask.cpp create mode 100644 js/src/jstask.h diff --git a/js/src/Makefile.in b/js/src/Makefile.in index 1653c69c0e45..9e8d426ebf00 100644 --- a/js/src/Makefile.in +++ b/js/src/Makefile.in @@ -144,6 +144,7 @@ CPPSRCS = \ jsscope.cpp \ jsscript.cpp \ jsstr.cpp \ + jstask.cpp \ jsutil.cpp \ jsxdrapi.cpp \ jsxml.cpp \ @@ -200,6 +201,7 @@ INSTALLED_HEADERS = \ jsscript.h \ jsstaticcheck.h \ jsstr.h \ + jstask.h \ jstracer.h \ jstypes.h \ jsutil.h \ diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp index f93cd95bf3c3..00e8a6ec37b3 100644 --- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -79,6 +79,7 @@ #include "jsscope.h" #include "jsscript.h" #include "jsstr.h" +#include "jstask.h" #include "jstracer.h" #include "jsdbgapi.h" #include "prmjtime.h" @@ -448,7 +449,7 @@ JS_AddArgumentFormatter(JSContext *cx, const char *format, goto out; mpp = &map->next; } - map = (JSArgumentFormatMap *) JS_malloc(cx, sizeof *map); + map = (JSArgumentFormatMap *) cx->malloc(sizeof *map); if (!map) return JS_FALSE; map->format = format; @@ -471,7 +472,7 @@ JS_RemoveArgumentFormatter(JSContext *cx, const char *format) while ((map = *mpp) != NULL) { if (map->length == length && !strcmp(map->format, format)) { *mpp = map->next; - JS_free(cx, map); + cx->free(map); return; } mpp = &map->next; @@ -773,7 +774,7 @@ JS_NewRuntime(uint32 maxbytes) } #endif /* DEBUG */ - rt = (JSRuntime *) malloc(sizeof(JSRuntime)); + rt = (JSRuntime *) js_malloc(sizeof(JSRuntime)); if (!rt) return NULL; @@ -817,6 +818,9 @@ JS_NewRuntime(uint32 maxbytes) rt->debuggerLock = JS_NEW_LOCK(); if (!rt->debuggerLock) goto bad; + rt->deallocatorThread = new JSBackgroundThread(); + if (!rt->deallocatorThread || !rt->deallocatorThread->init()) + goto bad; #endif if (!js_InitPropertyTree(rt)) goto bad; @@ -886,9 +890,13 @@ JS_DestroyRuntime(JSRuntime *rt) JS_DESTROY_CONDVAR(rt->titleSharingDone); if (rt->debuggerLock) JS_DESTROY_LOCK(rt->debuggerLock); + if (rt->deallocatorThread) { + rt->deallocatorThread->cancel(); + delete rt->deallocatorThread; + } #endif js_FinishPropertyTree(rt); - free(rt); + js_free(rt); } JS_PUBLIC_API(void) @@ -1653,7 +1661,7 @@ NewIdArray(JSContext *cx, jsint length) JSIdArray *ida; ida = (JSIdArray *) - JS_malloc(cx, offsetof(JSIdArray, vector) + length * sizeof(jsval)); + cx->malloc(offsetof(JSIdArray, vector) + length * sizeof(jsval)); if (ida) ida->length = length; return ida; @@ -1831,41 +1839,19 @@ JS_ComputeThis(JSContext *cx, jsval *vp) JS_PUBLIC_API(void *) JS_malloc(JSContext *cx, size_t nbytes) { - void *p; - - JS_ASSERT(nbytes != 0); - if (nbytes == 0) - nbytes = 1; - - p = malloc(nbytes); - if (!p) { - JS_ReportOutOfMemory(cx); - return NULL; - } - cx->updateMallocCounter(nbytes); - - return p; + return cx->malloc(nbytes); } JS_PUBLIC_API(void *) JS_realloc(JSContext *cx, void *p, size_t nbytes) { - void *orig = p; - p = realloc(p, nbytes); - if (!p) { - JS_ReportOutOfMemory(cx); - return NULL; - } - if (!orig) - cx->updateMallocCounter(nbytes); - return p; + return cx->realloc(p, nbytes); } JS_PUBLIC_API(void) JS_free(JSContext *cx, void *p) { - if (p) - free(p); + return cx->free(p); } JS_PUBLIC_API(char *) @@ -1875,7 +1861,7 @@ JS_strdup(JSContext *cx, const char *s) void *p; n = strlen(s) + 1; - p = JS_malloc(cx, n); + p = cx->malloc(n); if (!p) return NULL; return (char *)memcpy(p, s, n); @@ -2260,7 +2246,7 @@ DumpNotify(JSTracer *trc, void *thing, uint32 kind) edgeNameSize = strlen(edgeName) + 1; node = (JSHeapDumpNode *) - JS_malloc(cx, offsetof(JSHeapDumpNode, edgeName) + edgeNameSize); + cx->malloc(offsetof(JSHeapDumpNode, edgeName) + edgeNameSize); if (!node) { dtrc->ok = JS_FALSE; return; @@ -2412,7 +2398,7 @@ JS_DumpHeap(JSContext *cx, FILE *fp, void* startThing, uint32 startKind, for (;;) { next = node->next; parent = node->parent; - JS_free(cx, node); + cx->free(node); node = next; if (node) break; @@ -2679,7 +2665,7 @@ JS_SetScriptStackQuota(JSContext *cx, size_t quota) JS_PUBLIC_API(void) JS_DestroyIdArray(JSContext *cx, JSIdArray *ida) { - JS_free(cx, ida); + cx->free(ida); } JS_PUBLIC_API(JSBool) @@ -4644,7 +4630,7 @@ JS_CompileScript(JSContext *cx, JSObject *obj, if (!chars) return NULL; script = JS_CompileUCScript(cx, obj, chars, length, filename, lineno); - JS_free(cx, chars); + cx->free(chars); return script; } @@ -4663,7 +4649,7 @@ JS_CompileScriptForPrincipals(JSContext *cx, JSObject *obj, return NULL; script = JS_CompileUCScriptForPrincipals(cx, obj, principals, chars, length, filename, lineno); - JS_free(cx, chars); + cx->free(chars); return script; } @@ -4748,7 +4734,7 @@ JS_BufferIsCompilableUnit(JSContext *cx, JSObject *obj, JS_SetErrorReporter(cx, older); } } - JS_free(cx, chars); + cx->free(chars); JS_RestoreExceptionState(cx, exnState); return result; } @@ -4857,7 +4843,7 @@ JS_CompileFunction(JSContext *cx, JSObject *obj, const char *name, return NULL; fun = JS_CompileUCFunction(cx, obj, name, nargs, argnames, chars, length, filename, lineno); - JS_free(cx, chars); + cx->free(chars); return fun; } @@ -4878,7 +4864,7 @@ JS_CompileFunctionForPrincipals(JSContext *cx, JSObject *obj, fun = JS_CompileUCFunctionForPrincipals(cx, obj, principals, name, nargs, argnames, chars, length, filename, lineno); - JS_free(cx, chars); + cx->free(chars); return fun; } @@ -5088,7 +5074,7 @@ JS_EvaluateScript(JSContext *cx, JSObject *obj, if (!chars) return JS_FALSE; ok = JS_EvaluateUCScript(cx, obj, chars, length, filename, lineno, rval); - JS_free(cx, chars); + cx->free(chars); return ok; } @@ -5110,7 +5096,7 @@ JS_EvaluateScriptForPrincipals(JSContext *cx, JSObject *obj, return JS_FALSE; ok = JS_EvaluateUCScriptForPrincipals(cx, obj, principals, chars, length, filename, lineno, rval); - JS_free(cx, chars); + cx->free(chars); return ok; } @@ -5319,13 +5305,13 @@ JS_NewString(JSContext *cx, char *bytes, size_t nbytes) /* Free chars (but not bytes, which caller frees on error) if we fail. */ str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return NULL; } /* Hand off bytes to the deflated string cache, if possible. */ if (!js_SetStringBytes(cx, str, bytes, nbytes)) - JS_free(cx, bytes); + cx->free(bytes); return str; } @@ -5341,7 +5327,7 @@ JS_NewStringCopyN(JSContext *cx, const char *s, size_t n) return NULL; str = js_NewString(cx, js, n); if (!str) - JS_free(cx, js); + cx->free(js); return str; } @@ -5361,7 +5347,7 @@ JS_NewStringCopyZ(JSContext *cx, const char *s) return NULL; str = js_NewString(cx, js, n); if (!str) - JS_free(cx, js); + cx->free(js); return str; } @@ -5449,7 +5435,7 @@ JS_GetStringChars(JSString *str) if (str->isDependent()) { n = str->dependentLength(); size = (n + 1) * sizeof(jschar); - s = (jschar *) malloc(size); + s = (jschar *) js_malloc(size); if (s) { memcpy(s, str->dependentChars(), n * sizeof *s); s[n] = 0; @@ -5727,7 +5713,7 @@ JS_NewRegExpObject(JSContext *cx, char *bytes, size_t length, uintN flags) if (!chars) return NULL; obj = js_NewRegExpObject(cx, NULL, chars, length, flags); - JS_free(cx, chars); + cx->free(chars); return obj; } @@ -5857,7 +5843,7 @@ JS_SaveExceptionState(JSContext *cx) JSExceptionState *state; CHECK_REQUEST(cx); - state = (JSExceptionState *) JS_malloc(cx, sizeof(JSExceptionState)); + state = (JSExceptionState *) cx->malloc(sizeof(JSExceptionState)); if (state) { state->throwing = JS_GetPendingException(cx, &state->exception); if (state->throwing && JSVAL_IS_GCTHING(state->exception)) @@ -5886,7 +5872,7 @@ JS_DropExceptionState(JSContext *cx, JSExceptionState *state) if (state) { if (state->throwing && JSVAL_IS_GCTHING(state->exception)) JS_RemoveRoot(cx, &state->exception); - JS_free(cx, state); + cx->free(state); } } diff --git a/js/src/jsarena.cpp b/js/src/jsarena.cpp index 254d6801e1d4..68407af0a632 100644 --- a/js/src/jsarena.cpp +++ b/js/src/jsarena.cpp @@ -160,12 +160,12 @@ JS_ArenaAllocate(JSArenaPool *pool, size_t nb) if (pool->quotap) { if (gross > *pool->quotap) return NULL; - b = (JSArena *) malloc(gross); + b = (JSArena *) js_malloc(gross); if (!b) return NULL; *pool->quotap -= gross; } else { - b = (JSArena *) malloc(gross); + b = (JSArena *) js_malloc(gross); if (!b) return NULL; } @@ -227,12 +227,12 @@ JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr) growth = gross - (a->limit - (jsuword) a); if (growth > *pool->quotap) return NULL; - a = (JSArena *) realloc(a, gross); + a = (JSArena *) js_realloc(a, gross); if (!a) return NULL; *pool->quotap -= growth; } else { - a = (JSArena *) realloc(a, gross); + a = (JSArena *) js_realloc(a, gross); if (!a) return NULL; } @@ -315,7 +315,7 @@ FreeArenaList(JSArenaPool *pool, JSArena *head) *pool->quotap += a->limit - (jsuword) a; JS_CLEAR_ARENA(a); JS_COUNT_ARENA(pool,--); - free(a); + js_free(a); } while ((a = *ap) != NULL); pool->current = head; @@ -354,7 +354,7 @@ JS_FinishArenaPool(JSArenaPool *pool) JSArenaStats *stats, **statsp; if (pool->stats.name) { - free(pool->stats.name); + js_free(pool->stats.name); pool->stats.name = NULL; } for (statsp = &arena_stats_list; (stats = *statsp) != 0; diff --git a/js/src/jsarray.cpp b/js/src/jsarray.cpp index d3bff3916b4d..f4b1039efab6 100644 --- a/js/src/jsarray.cpp +++ b/js/src/jsarray.cpp @@ -314,7 +314,7 @@ ResizeSlots(JSContext *cx, JSObject *obj, uint32 oldsize, uint32 size) if (size == 0) { if (obj->dslots) { - JS_free(cx, obj->dslots - 1); + cx->free(obj->dslots - 1); obj->dslots = NULL; } return JS_TRUE; @@ -330,7 +330,7 @@ ResizeSlots(JSContext *cx, JSObject *obj, uint32 oldsize, uint32 size) } slots = obj->dslots ? obj->dslots - 1 : NULL; - newslots = (jsval *) JS_realloc(cx, slots, (size + 1) * sizeof(jsval)); + newslots = (jsval *) cx->realloc(slots, (size + 1) * sizeof(jsval)); if (!newslots) return JS_FALSE; @@ -1099,7 +1099,7 @@ array_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (obj->dslots[i] == JSVAL_HOLE) { if (!ii) { ii = (JSIndexIterState *) - JS_malloc(cx, offsetof(JSIndexIterState, holes) + + cx->malloc(offsetof(JSIndexIterState, holes) + JS_BITMAP_SIZE(capacity)); if (!ii) return JS_FALSE; @@ -1116,7 +1116,7 @@ array_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, break; } ii = (JSIndexIterState *) - JS_malloc(cx, offsetof(JSIndexIterState, holes)); + cx->malloc(offsetof(JSIndexIterState, holes)); if (!ii) return JS_FALSE; ii->hasHoles = JS_FALSE; @@ -1157,7 +1157,7 @@ array_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (JSVAL_TAG(*statep) != JSVAL_BOOLEAN) { JS_ASSERT((*statep & INDEX_ITER_TAG) == INDEX_ITER_TAG); ii = (JSIndexIterState *) (*statep & ~INDEX_ITER_TAG); - JS_free(cx, ii); + cx->free(ii); } *statep = JSVAL_NULL; break; @@ -1188,7 +1188,7 @@ static void array_finalize(JSContext *cx, JSObject *obj) { if (obj->dslots) - JS_free(cx, obj->dslots - 1); + cx->free(obj->dslots - 1); obj->dslots = NULL; } @@ -1336,7 +1336,7 @@ BufferToString(JSContext *cx, JSTempVector &buf, jsval *rval) jschar *chars = buf.extractRawBuffer(); JSString *str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } *rval = STRING_TO_JSVAL(str); @@ -1392,7 +1392,7 @@ array_toSource(JSContext *cx, uintN argc, jsval *vp) if (!(ok = buf.pushBack(arr, arr + 3))) goto done; if (sharpchars) - JS_free(cx, sharpchars); + cx->free(sharpchars); goto make_string; } #endif @@ -2151,7 +2151,7 @@ array_sort(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; } #endif - vec = (jsval *) JS_malloc(cx, 2 * (size_t) len * sizeof(jsval)); + vec = (jsval *) cx->malloc(2 * (size_t) len * sizeof(jsval)); if (!vec) return JS_FALSE; @@ -2280,8 +2280,8 @@ array_sort(JSContext *cx, uintN argc, jsval *vp) } while (i != 0); JS_ASSERT(tvr.u.array == vec); - vec = (jsval *) JS_realloc(cx, vec, - 4 * (size_t) newlen * sizeof(jsval)); + vec = (jsval *) cx->realloc(vec, + 4 * (size_t) newlen * sizeof(jsval)); if (!vec) { vec = tvr.u.array; ok = JS_FALSE; @@ -2342,7 +2342,7 @@ array_sort(JSContext *cx, uintN argc, jsval *vp) out: JS_POP_TEMP_ROOT(cx, &tvr); - JS_free(cx, vec); + cx->free(vec); if (!ok) return JS_FALSE; @@ -3507,7 +3507,7 @@ js_ArrayInfo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) if (JSVAL_IS_PRIMITIVE(argv[i]) || !OBJ_IS_ARRAY(cx, (array = JSVAL_TO_OBJECT(argv[i])))) { fprintf(stderr, "%s: not array\n", bytes); - JS_free(cx, bytes); + cx->free(bytes); continue; } fprintf(stderr, "%s: %s (len %lu", bytes, @@ -3519,7 +3519,7 @@ js_ArrayInfo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) js_DenseArrayCapacity(array)); } fputs(")\n", stderr); - JS_free(cx, bytes); + cx->free(bytes); } return JS_TRUE; } diff --git a/js/src/jsatom.cpp b/js/src/jsatom.cpp index 6a921d9b0b92..8943f22f224d 100644 --- a/js/src/jsatom.cpp +++ b/js/src/jsatom.cpp @@ -783,7 +783,7 @@ js_Atomize(JSContext *cx, const char *bytes, size_t length, uintN flags) str.initFlat(chars, inflatedLength); atom = js_AtomizeString(cx, &str, ATOM_TMPSTR | flags); if (chars != inflated && str.flatChars()) - JS_free(cx, chars); + cx->free(chars); return atom; } diff --git a/js/src/jscntxt.cpp b/js/src/jscntxt.cpp index 6bcd70f97f3f..42b579b363bf 100644 --- a/js/src/jscntxt.cpp +++ b/js/src/jscntxt.cpp @@ -142,7 +142,7 @@ static JSThread * NewThread(jsword id) { JS_ASSERT(js_CurrentThreadId() == id); - JSThread *thread = (JSThread *) calloc(1, sizeof(JSThread)); + JSThread *thread = (JSThread *) js_calloc(sizeof(JSThread)); if (!thread) return NULL; JS_INIT_CLIST(&thread->contextList); @@ -158,7 +158,7 @@ DestroyThread(JSThread *thread) JS_ASSERT(JS_CLIST_IS_EMPTY(&thread->contextList)); JS_ASSERT(!thread->titleToShare); FinishThreadData(&thread->data); - free(thread); + js_free(thread); } JSBool @@ -370,7 +370,7 @@ js_NewContext(JSRuntime *rt, size_t stackChunkSize) * runtime list. After that it can be accessed from another thread via * js_ContextIterator. */ - cx = (JSContext *) calloc(1, sizeof *cx); + cx = (JSContext *) js_calloc(sizeof *cx); if (!cx) return NULL; @@ -743,14 +743,14 @@ FreeContext(JSContext *cx) JS_FinishArenaPool(&cx->tempPool); if (cx->lastMessage) - free(cx->lastMessage); + js_free(cx->lastMessage); /* Remove any argument formatters. */ map = cx->argumentFormatMap; while (map) { JSArgumentFormatMap *temp = map; map = map->next; - JS_free(cx, temp); + cx->free(temp); } /* Destroy the busy array table. */ @@ -769,13 +769,13 @@ FreeContext(JSContext *cx) if (lrs) { while ((lrc = lrs->topChunk) != &lrs->firstChunk) { lrs->topChunk = lrc->down; - JS_free(cx, lrc); + cx->free(lrc); } - JS_free(cx, lrs); + cx->free(lrs); } /* Finally, free cx itself. */ - free(cx); + js_free(cx); } JSBool @@ -1013,7 +1013,7 @@ js_EnterLocalRootScope(JSContext *cx) lrs = cx->localRootStack; if (!lrs) { - lrs = (JSLocalRootStack *) JS_malloc(cx, sizeof *lrs); + lrs = (JSLocalRootStack *) cx->malloc(sizeof *lrs); if (!lrs) return JS_FALSE; lrs->scopeMark = JSLRS_NULL_MARK; @@ -1056,7 +1056,7 @@ js_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval) lrc = lrs->topChunk; JS_ASSERT(lrc != &lrs->firstChunk); lrs->topChunk = lrc->down; - JS_free(cx, lrc); + cx->free(lrc); --n; } @@ -1096,10 +1096,10 @@ js_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval) */ if (mark == 0) { cx->localRootStack = NULL; - JS_free(cx, lrs); + cx->free(lrs); } else if (m == 0) { lrs->topChunk = lrc->down; - JS_free(cx, lrc); + cx->free(lrc); } } @@ -1158,7 +1158,7 @@ js_ForgetLocalRoot(JSContext *cx, jsval v) JS_ASSERT(n != 0); JS_ASSERT(lrc != &lrs->firstChunk); lrs->topChunk = lrc->down; - JS_free(cx, lrc); + cx->free(lrc); } } @@ -1187,7 +1187,7 @@ js_PushLocalRoot(JSContext *cx, JSLocalRootStack *lrs, jsval v) * After lrs->firstChunk, trying to index at a power-of-two chunk * boundary: need a new chunk. */ - lrc = (JSLocalRootChunk *) JS_malloc(cx, sizeof *lrc); + lrc = (JSLocalRootChunk *) cx->malloc(sizeof *lrc); if (!lrc) return -1; lrc->down = lrs->topChunk; @@ -1380,8 +1380,8 @@ js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap) } ReportError(cx, message, &report); - free(message); - JS_free(cx, ucmessage); + js_free(message); + cx->free(ucmessage); return warning; } @@ -1432,7 +1432,7 @@ js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback, * pointers later. */ reportp->messageArgs = (const jschar **) - JS_malloc(cx, sizeof(jschar *) * (argCount + 1)); + cx->malloc(sizeof(jschar *) * (argCount + 1)); if (!reportp->messageArgs) return JS_FALSE; reportp->messageArgs[argCount] = NULL; @@ -1476,9 +1476,9 @@ js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback, * is used once and only once in the expansion !!! */ reportp->ucmessage = out = (jschar *) - JS_malloc(cx, (expandedLength + 1) * sizeof(jschar)); + cx->malloc((expandedLength + 1) * sizeof(jschar)); if (!out) { - JS_free (cx, buffer); + cx->free(buffer); goto error; } while (*fmt) { @@ -1498,7 +1498,7 @@ js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback, } JS_ASSERT(expandedArgs == argCount); *out = 0; - JS_free (cx, buffer); + cx->free(buffer); *messagep = js_DeflateString(cx, reportp->ucmessage, (size_t)(out - reportp->ucmessage)); @@ -1527,7 +1527,7 @@ js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback, const char *defaultErrorMessage = "No error message available for error number %d"; size_t nbytes = strlen(defaultErrorMessage) + 16; - *messagep = (char *)JS_malloc(cx, nbytes); + *messagep = (char *)cx->malloc(nbytes); if (!*messagep) goto error; JS_snprintf(*messagep, nbytes, defaultErrorMessage, errorNumber); @@ -1540,17 +1540,17 @@ error: if (charArgs) { i = 0; while (reportp->messageArgs[i]) - JS_free(cx, (void *)reportp->messageArgs[i++]); + cx->free((void *)reportp->messageArgs[i++]); } - JS_free(cx, (void *)reportp->messageArgs); + cx->free((void *)reportp->messageArgs); reportp->messageArgs = NULL; } if (reportp->ucmessage) { - JS_free(cx, (void *)reportp->ucmessage); + cx->free((void *)reportp->ucmessage); reportp->ucmessage = NULL; } if (*messagep) { - JS_free(cx, (void *)*messagep); + cx->free((void *)*messagep); *messagep = NULL; } return JS_FALSE; @@ -1581,7 +1581,7 @@ js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback, ReportError(cx, message, &report); if (message) - JS_free(cx, message); + cx->free(message); if (report.messageArgs) { /* * js_ExpandErrorArguments owns its messageArgs only if it had to @@ -1590,12 +1590,12 @@ js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback, if (charArgs) { int i = 0; while (report.messageArgs[i]) - JS_free(cx, (void *)report.messageArgs[i++]); + cx->free((void *)report.messageArgs[i++]); } - JS_free(cx, (void *)report.messageArgs); + cx->free((void *)report.messageArgs); } if (report.ucmessage) - JS_free(cx, (void *)report.ucmessage); + cx->free((void *)report.ucmessage); return warning; } @@ -1609,7 +1609,7 @@ js_ReportErrorAgain(JSContext *cx, const char *message, JSErrorReport *reportp) return; if (cx->lastMessage) - free(cx->lastMessage); + js_free(cx->lastMessage); cx->lastMessage = JS_strdup(cx, message); if (!cx->lastMessage) return; @@ -1667,7 +1667,7 @@ js_ReportIsNullOrUndefined(JSContext *cx, intN spindex, jsval v, js_null_str, NULL); } - JS_free(cx, bytes); + cx->free(bytes); return ok; } @@ -1690,7 +1690,7 @@ js_ReportMissingArg(JSContext *cx, jsval *vp, uintN arg) JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_MISSING_FUN_ARG, argbuf, bytes ? bytes : ""); - JS_free(cx, bytes); + cx->free(bytes); } JSBool @@ -1709,7 +1709,7 @@ js_ReportValueErrorFlags(JSContext *cx, uintN flags, const uintN errorNumber, ok = JS_ReportErrorFlagsAndNumber(cx, flags, js_GetErrorMessage, NULL, errorNumber, bytes, arg1, arg2); - JS_free(cx, bytes); + cx->free(bytes); return ok; } diff --git a/js/src/jscntxt.h b/js/src/jscntxt.h index b079cb014d19..c3d2465472ec 100644 --- a/js/src/jscntxt.h +++ b/js/src/jscntxt.h @@ -57,6 +57,7 @@ #include "jsregexp.h" #include "jsutil.h" #include "jsarray.h" +#include "jstask.h" JS_BEGIN_EXTERN_C @@ -255,6 +256,13 @@ struct JSThreadData { * locks on each JS_malloc. */ size_t gcMallocBytes; + +#ifdef JS_THREADSAFE + /* + * Deallocator task for this thread. + */ + JSFreePointerListTask *deallocatorTask; +#endif }; #ifdef JS_THREADSAFE @@ -699,6 +707,26 @@ struct JSRuntime { void setGCTriggerFactor(uint32 factor); void setGCLastBytes(size_t lastBytes); + + inline void* malloc(size_t bytes) { + return ::js_malloc(bytes); + } + + inline void* calloc(size_t bytes) { + return ::js_calloc(bytes); + } + + inline void* realloc(void* p, size_t bytes) { + return ::js_realloc(p, bytes); + } + + inline void free(void* p) { + ::js_free(p); + } + +#ifdef JS_THREADSAFE + JSBackgroundThread *deallocatorThread; +#endif }; /* Common macros to access thread-local caches in JSThread or JSRuntime. */ @@ -1050,16 +1078,86 @@ struct JSContext { jsval *nativeVp; #endif +#ifdef JS_THREADSAFE + inline void createDeallocatorTask() { + JSThreadData* tls = JS_THREAD_DATA(this); + JS_ASSERT(!tls->deallocatorTask); + if (runtime->deallocatorThread && !runtime->deallocatorThread->busy()) + tls->deallocatorTask = new JSFreePointerListTask(); + } + + inline void submitDeallocatorTask() { + JSThreadData* tls = JS_THREAD_DATA(this); + if (tls->deallocatorTask) { + runtime->deallocatorThread->schedule(tls->deallocatorTask); + tls->deallocatorTask = NULL; + } + } +#endif + /* Call this after succesful malloc of memory for GC-related things. */ - inline void - updateMallocCounter(size_t nbytes) - { + inline void updateMallocCounter(size_t nbytes) { size_t *pbytes, bytes; pbytes = &JS_THREAD_DATA(this)->gcMallocBytes; bytes = *pbytes; *pbytes = (size_t(-1) - bytes <= nbytes) ? size_t(-1) : bytes + nbytes; } + + inline void* malloc(size_t bytes) { + JS_ASSERT(bytes != 0); + void *p = runtime->malloc(bytes); + if (!p) { + JS_ReportOutOfMemory(this); + return NULL; + } + updateMallocCounter(bytes); + return p; + } + + inline void* calloc(size_t bytes) { + JS_ASSERT(bytes != 0); + void *p = runtime->calloc(bytes); + if (!p) { + JS_ReportOutOfMemory(this); + return NULL; + } + updateMallocCounter(bytes); + return p; + } + + inline void* realloc(void* p, size_t bytes) { + void *orig = p; + p = runtime->realloc(p, bytes); + if (!p) { + JS_ReportOutOfMemory(this); + return NULL; + } + if (!orig) + updateMallocCounter(bytes); + return p; + } + +#ifdef JS_THREADSAFE + inline void free(void* p) { + if (!p) + return; + if (thread) { + JSFreePointerListTask* task = JS_THREAD_DATA(this)->deallocatorTask; + if (task) { + task->add(p); + return; + } + } + runtime->free(p); + } +#else + inline void free(void* p) { + if (!p) + return; + runtime->free(p); + } +#endif }; #ifdef JS_THREADSAFE diff --git a/js/src/jsdate.cpp b/js/src/jsdate.cpp index c6a5f870a57c..9eaa85e60fc6 100644 --- a/js/src/jsdate.cpp +++ b/js/src/jsdate.cpp @@ -1961,7 +1961,7 @@ date_toSource(JSContext *cx, uintN argc, jsval *vp) str = JS_NewString(cx, bytes, strlen(bytes)); if (!str) { - free(bytes); + js_free(bytes); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); diff --git a/js/src/jsdbgapi.cpp b/js/src/jsdbgapi.cpp index 92f35ed6c407..d9e9ed62a2fb 100644 --- a/js/src/jsdbgapi.cpp +++ b/js/src/jsdbgapi.cpp @@ -123,7 +123,7 @@ js_UntrapScriptCode(JSContext *cx, JSScript *script) continue; nbytes += (sn - notes + 1) * sizeof *sn; - code = (jsbytecode *) JS_malloc(cx, nbytes); + code = (jsbytecode *) cx->malloc(nbytes); if (!code) break; memcpy(code, script->code, nbytes); @@ -155,12 +155,12 @@ JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc, } else { sample = rt->debuggerMutations; DBG_UNLOCK(rt); - trap = (JSTrap *) JS_malloc(cx, sizeof *trap); + trap = (JSTrap *) cx->malloc(sizeof *trap); if (!trap) return JS_FALSE; trap->closure = NULL; if(!js_AddRoot(cx, &trap->closure, "trap->closure")) { - JS_free(cx, trap); + cx->free(trap); return JS_FALSE; } DBG_LOCK(rt); @@ -184,7 +184,7 @@ JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc, DBG_UNLOCK(rt); if (junk) { js_RemoveRoot(rt, &junk->closure); - JS_free(cx, junk); + cx->free(junk); } return JS_TRUE; } @@ -213,7 +213,7 @@ DestroyTrapAndUnlock(JSContext *cx, JSTrap *trap) DBG_UNLOCK(cx->runtime); js_RemoveRoot(cx->runtime, &trap->closure); - JS_free(cx, trap); + cx->free(trap); } JS_PUBLIC_API(void) @@ -413,7 +413,7 @@ DropWatchPointAndUnlock(JSContext *cx, JSWatchPoint *wp, uintN flag) } } - JS_free(cx, wp); + cx->free(wp); return ok; } @@ -619,7 +619,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp) if (nslots <= JS_ARRAY_LENGTH(smallv)) { argv = smallv; } else { - argv = (jsval *) JS_malloc(cx, nslots * sizeof(jsval)); + argv = (jsval *) cx->malloc(nslots * sizeof(jsval)); if (!argv) { DBG_LOCK(rt); DropWatchPointAndUnlock(cx, wp, JSWP_HELD); @@ -651,7 +651,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp) JSFUN_HEAVYWEIGHT_TEST(fun->flags) && !js_GetCallObject(cx, &frame)) { if (argv != smallv) - JS_free(cx, argv); + cx->free(argv); DBG_LOCK(rt); DropWatchPointAndUnlock(cx, wp, JSWP_HELD); return JS_FALSE; @@ -679,7 +679,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp) cx->fp = frame.down; if (argv != smallv) - JS_free(cx, argv); + cx->free(argv); } } DBG_LOCK(rt); @@ -825,7 +825,7 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval, goto out; } - wp = (JSWatchPoint *) JS_malloc(cx, sizeof *wp); + wp = (JSWatchPoint *) cx->malloc(sizeof *wp); if (!wp) { ok = JS_FALSE; goto out; @@ -1343,7 +1343,7 @@ JS_EvaluateInStackFrame(JSContext *cx, JSStackFrame *fp, length = (uintN) len; ok = JS_EvaluateUCInStackFrame(cx, fp, chars, length, filename, lineno, rval); - JS_free(cx, chars); + cx->free(chars); return ok; } @@ -1469,7 +1469,7 @@ JS_GetPropertyDescArray(JSContext *cx, JSObject *obj, JSPropertyDescArray *pda) } n = scope->entryCount; - pd = (JSPropertyDesc *) JS_malloc(cx, (size_t)n * sizeof(JSPropertyDesc)); + pd = (JSPropertyDesc *) cx->malloc((size_t)n * sizeof(JSPropertyDesc)); if (!pd) return JS_FALSE; i = 0; @@ -1511,7 +1511,7 @@ JS_PutPropertyDescArray(JSContext *cx, JSPropertyDescArray *pda) if (pd[i].flags & JSPD_ALIAS) js_RemoveRoot(cx->runtime, &pd[i].alias); } - JS_free(cx, pd); + cx->free(pd); } /************************************************************************/ @@ -1884,7 +1884,7 @@ js_DumpCallgrind(JSContext *cx, JSObject *obj, cstr = js_DeflateString(cx, str->chars(), str->length()); if (cstr) { CALLGRIND_DUMP_STATS_AT(cstr); - JS_free(cx, cstr); + cx->free(cstr); return JS_TRUE; } } @@ -1962,7 +1962,7 @@ js_StartVtune(JSContext *cx, JSObject *obj, status = VTStartSampling(¶ms); if (params.tb5Filename != default_filename) - JS_free(cx, params.tb5Filename); + cx->free(params.tb5Filename); if (status != 0) { if (status == VTAPI_MULTIPLE_RUNS) diff --git a/js/src/jsdhash.cpp b/js/src/jsdhash.cpp index 8252b4b651b4..c8b9db09dc0d 100644 --- a/js/src/jsdhash.cpp +++ b/js/src/jsdhash.cpp @@ -111,13 +111,13 @@ JS_PUBLIC_API(void *) JS_DHashAllocTable(JSDHashTable *table, uint32 nbytes) { - return malloc(nbytes); + return js_malloc(nbytes); } JS_PUBLIC_API(void) JS_DHashFreeTable(JSDHashTable *table, void *ptr) { - free(ptr); + js_free(ptr); } JS_PUBLIC_API(JSDHashNumber) @@ -180,7 +180,7 @@ JS_DHashFreeStringKey(JSDHashTable *table, JSDHashEntryHdr *entry) { const JSDHashEntryStub *stub = (const JSDHashEntryStub *)entry; - free((void *) stub->key); + js_free((void *) stub->key); memset(entry, 0, table->entrySize); } @@ -212,11 +212,11 @@ JS_NewDHashTable(const JSDHashTableOps *ops, void *data, uint32 entrySize, { JSDHashTable *table; - table = (JSDHashTable *) malloc(sizeof *table); + table = (JSDHashTable *) js_malloc(sizeof *table); if (!table) return NULL; if (!JS_DHashTableInit(table, ops, data, entrySize, capacity)) { - free(table); + js_free(table); return NULL; } return table; @@ -226,7 +226,7 @@ JS_PUBLIC_API(void) JS_DHashTableDestroy(JSDHashTable *table) { JS_DHashTableFinish(table); - free(table); + js_free(table); } JS_PUBLIC_API(JSBool) diff --git a/js/src/jsdtoa.cpp b/js/src/jsdtoa.cpp index b1197ac043d3..111cb5dec9d3 100644 --- a/js/src/jsdtoa.cpp +++ b/js/src/jsdtoa.cpp @@ -368,7 +368,7 @@ JS_dtobasestr(int base, double dinput) JS_ASSERT(base >= 2 && base <= 36); dval(d) = dinput; - buffer = (char*) malloc(DTOBASESTR_BUFFER_SIZE); + buffer = (char*) js_malloc(DTOBASESTR_BUFFER_SIZE); if (buffer) { p = buffer; if (dval(d) < 0.0 @@ -412,7 +412,7 @@ JS_dtobasestr(int base, double dinput) nomem1: Bfree(b); UNLOCK_DTOA(); - free(buffer); + js_free(buffer); return NULL; } do { @@ -449,7 +449,7 @@ JS_dtobasestr(int base, double dinput) Bfree(mlo); Bfree(mhi); UNLOCK_DTOA(); - free(buffer); + js_free(buffer); return NULL; } JS_ASSERT(e < 0); diff --git a/js/src/jsemit.cpp b/js/src/jsemit.cpp index f591e844b6f0..5105d8bba498 100644 --- a/js/src/jsemit.cpp +++ b/js/src/jsemit.cpp @@ -112,10 +112,10 @@ JSCodeGenerator::~JSCodeGenerator() /* NB: non-null only after OOM. */ if (spanDeps) - JS_free(compiler->context, spanDeps); + compiler->context->free(spanDeps); if (upvarMap.vector) - JS_free(compiler->context, upvarMap.vector); + compiler->context->free(upvarMap.vector); } static ptrdiff_t @@ -549,7 +549,7 @@ AddSpanDep(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc, jsbytecode *pc2, if ((index & (index - 1)) == 0 && (!(sdbase = cg->spanDeps) || index >= SPANDEPS_MIN)) { size = sdbase ? SPANDEPS_SIZE(index) : SPANDEPS_SIZE_MIN / 2; - sdbase = (JSSpanDep *) JS_realloc(cx, sdbase, size + size); + sdbase = (JSSpanDep *) cx->realloc(sdbase, size + size); if (!sdbase) return JS_FALSE; cg->spanDeps = sdbase; @@ -1165,7 +1165,7 @@ OptimizeSpanDeps(JSContext *cx, JSCodeGenerator *cg) * can span top-level statements, because JS lacks goto. */ size = SPANDEPS_SIZE(JS_BIT(JS_CeilingLog2(cg->numSpanDeps))); - JS_free(cx, cg->spanDeps); + cx->free(cg->spanDeps); cg->spanDeps = NULL; FreeJumpTargets(cg, cg->jumpTargets); cg->jumpTargets = NULL; @@ -1899,7 +1899,7 @@ MakeUpvarForEval(JSParseNode *pn, JSCodeGenerator *cg) JS_ASSERT(ALE_INDEX(ale) <= length); if (ALE_INDEX(ale) == length) { length = 2 * JS_MAX(2, length); - vector = (uint32 *) JS_realloc(cx, vector, length * sizeof *vector); + vector = (uint32 *) cx->realloc(vector, length * sizeof *vector); if (!vector) return false; cg->upvarMap.vector = vector; @@ -2197,7 +2197,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) if (!vector) { uint32 length = cg->lexdeps.count; - vector = (uint32 *) calloc(length, sizeof *vector); + vector = (uint32 *) js_calloc(length * sizeof *vector); if (!vector) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -3144,9 +3144,8 @@ EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn, /* Just grab 8K for the worst-case bitmap. */ intmap_bitlen = JS_BIT(16); intmap = (jsbitmap *) - JS_malloc(cx, - (JS_BIT(16) >> JS_BITS_PER_WORD_LOG2) - * sizeof(jsbitmap)); + cx->malloc((JS_BIT(16) >> JS_BITS_PER_WORD_LOG2) + * sizeof(jsbitmap)); if (!intmap) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -3163,7 +3162,7 @@ EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn, release: if (intmap && intmap != intmap_space) - JS_free(cx, intmap); + cx->free(intmap); if (!ok) return JS_FALSE; @@ -3307,7 +3306,7 @@ EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn, */ if (tableLength != 0) { tableSize = (size_t)tableLength * sizeof *table; - table = (JSParseNode **) JS_malloc(cx, tableSize); + table = (JSParseNode **) cx->malloc(tableSize); if (!table) return JS_FALSE; memset(table, 0, tableSize); @@ -3475,7 +3474,7 @@ EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn, out: if (table) - JS_free(cx, table); + cx->free(table); if (ok) { ok = js_PopStatementCG(cx, cg); diff --git a/js/src/jsexn.cpp b/js/src/jsexn.cpp index 34d9312cce8e..a83d658a4e61 100644 --- a/js/src/jsexn.cpp +++ b/js/src/jsexn.cpp @@ -164,7 +164,7 @@ CopyErrorReport(JSContext *cx, JSErrorReport *report) */ mallocSize = sizeof(JSErrorReport) + argsArraySize + argsCopySize + ucmessageSize + uclinebufSize + linebufSize + filenameSize; - cursor = (uint8 *)JS_malloc(cx, mallocSize); + cursor = (uint8 *)cx->malloc(mallocSize); if (!cursor) return NULL; @@ -301,7 +301,7 @@ InitExnPrivate(JSContext *cx, JSObject *exnObject, JSString *message, js_ReportAllocationOverflow(cx); return JS_FALSE; } - priv = (JSExnPrivate *)JS_malloc(cx, size); + priv = (JSExnPrivate *)cx->malloc(size); if (!priv) return JS_FALSE; @@ -417,8 +417,8 @@ exn_finalize(JSContext *cx, JSObject *obj) priv = GetExnPrivate(cx, obj); if (priv) { if (priv->errorReport) - JS_free(cx, priv->errorReport); - JS_free(cx, priv); + cx->free(priv->errorReport); + cx->free(priv); } } @@ -586,7 +586,7 @@ StackTraceToString(JSContext *cx, JSExnPrivate *priv) if (stackmax >= STACK_LENGTH_LIMIT) \ goto done; \ stackmax = stackmax ? 2 * stackmax : 64; \ - ptr_ = JS_realloc(cx, stackbuf, (stackmax+1) * sizeof(jschar)); \ + ptr_ = cx->realloc(stackbuf, (stackmax+1) * sizeof(jschar)); \ if (!ptr_) \ goto bad; \ stackbuf = (jschar *) ptr_; \ @@ -608,7 +608,7 @@ StackTraceToString(JSContext *cx, JSExnPrivate *priv) goto done; \ } \ stackmax = JS_BIT(JS_CeilingLog2(stacklen + length_)); \ - ptr_ = JS_realloc(cx, stackbuf, (stackmax+1) * sizeof(jschar)); \ + ptr_ = cx->realloc(stackbuf, (stackmax+1) * sizeof(jschar)); \ if (!ptr_) \ goto bad; \ stackbuf = (jschar *) ptr_; \ @@ -659,7 +659,7 @@ StackTraceToString(JSContext *cx, JSExnPrivate *priv) * don't use JS_realloc here; simply let the oversized allocation * be owned by the string in that rare case. */ - void *shrunk = JS_realloc(cx, stackbuf, (stacklen+1) * sizeof(jschar)); + void *shrunk = cx->realloc(stackbuf, (stacklen+1) * sizeof(jschar)); if (shrunk) stackbuf = (jschar *) shrunk; } @@ -671,7 +671,7 @@ StackTraceToString(JSContext *cx, JSExnPrivate *priv) bad: if (stackbuf) - JS_free(cx, stackbuf); + cx->free(stackbuf); return NULL; } @@ -800,7 +800,7 @@ exn_toString(JSContext *cx, uintN argc, jsval *vp) name_length = name->length(); message_length = message->length(); length = (name_length ? name_length + 2 : 0) + message_length; - cp = chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + cp = chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; @@ -815,7 +815,7 @@ exn_toString(JSContext *cx, uintN argc, jsval *vp) result = js_NewString(cx, chars, length); if (!result) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } } else { @@ -915,7 +915,7 @@ exn_toSource(JSContext *cx, uintN argc, jsval *vp) } } - cp = chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + cp = chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) { ok = JS_FALSE; goto out; @@ -955,7 +955,7 @@ exn_toSource(JSContext *cx, uintN argc, jsval *vp) result = js_NewString(cx, chars, length); if (!result) { - JS_free(cx, chars); + cx->free(chars); ok = JS_FALSE; goto out; } diff --git a/js/src/jsfile.cpp b/js/src/jsfile.cpp index b3d618275ddb..8228c44add60 100644 --- a/js/src/jsfile.cpp +++ b/js/src/jsfile.cpp @@ -296,7 +296,7 @@ static char* js_combinePath(JSContext *cx, const char *base, const char *name) { int len = strlen(base); - char* result = JS_malloc(cx, len + strlen(name) + 2); + char* result = cx->malloc(len + strlen(name) + 2); if (!result) return NULL; @@ -335,7 +335,7 @@ js_fileBaseName(JSContext *cx, const char *pathname) } /* Allocate and copy. */ - result = JS_malloc(cx, aux - index + 1); + result = cx->malloc(aux - index + 1); if (!result) return NULL; strncpy(result, pathname + index + 1, aux - index); @@ -366,7 +366,7 @@ js_fileDirectoryName(JSContext *cx, const char *pathname) if (cp < pathname && end != pathname) { /* There were just /s, return the root. */ - result = JS_malloc(cx, 1 + 1); /* The separator + trailing NUL. */ + result = cx->malloc(1 + 1); /* The separator + trailing NUL. */ result[0] = FILESEPARATOR; result[1] = '\0'; return result; @@ -388,7 +388,7 @@ js_fileDirectoryName(JSContext *cx, const char *pathname) } pathsize = end - pathname + 1; - result = JS_malloc(cx, pathsize + 1); + result = cx->malloc(pathsize + 1); if (!result) return NULL; @@ -401,7 +401,7 @@ js_fileDirectoryName(JSContext *cx, const char *pathname) /* Return everything up to and including the seperator. */ pathsize = cp - pathname + 1; - result = JS_malloc(cx, pathsize + 1); + result = cx->malloc(pathsize + 1); if (!result) return NULL; @@ -462,7 +462,7 @@ js_canonicalPath(JSContext *cx, char *oldpath) while (j >= 0 && path[j] == ' ') j--; - tmp = JS_malloc(cx, j-i+2); + tmp = cx->malloc(j-i+2); if (!tmp) return NULL; @@ -478,7 +478,7 @@ js_canonicalPath(JSContext *cx, char *oldpath) /* file:// support. */ if (!strncmp(path, URL_PREFIX, strlen(URL_PREFIX))) { tmp = js_canonicalPath(cx, path + strlen(URL_PREFIX)); - JS_free(cx, path); + cx->free(path); return tmp; } @@ -486,7 +486,7 @@ js_canonicalPath(JSContext *cx, char *oldpath) tmp = js_absolutePath(cx, path); if (!tmp) return NULL; - JS_free(cx, path); + cx->free(path); path = tmp; } @@ -505,7 +505,7 @@ js_canonicalPath(JSContext *cx, char *oldpath) back--; } else { tmp = result; - result = JS_malloc(cx, strlen(base) + 1 + strlen(tmp) + 1); + result = cx->malloc(strlen(base) + 1 + strlen(tmp) + 1); if (!result) goto out; @@ -516,18 +516,18 @@ js_canonicalPath(JSContext *cx, char *oldpath) result[c + 1] = '\0'; strcat(result, tmp); } - JS_free(cx, tmp); + cx->free(tmp); } } - JS_free(cx, current); - JS_free(cx, base); + cx->free(current); + cx->free(base); current = dir; base = js_fileBaseName(cx, current); dir = js_fileDirectoryName(cx, current); } tmp = result; - result = JS_malloc(cx, strlen(dir)+1+strlen(tmp)+1); + result = cx->malloc(strlen(dir) + 1 + strlen(tmp) + 1); if (!result) goto out; @@ -543,13 +543,13 @@ js_canonicalPath(JSContext *cx, char *oldpath) out: if (tmp) - JS_free(cx, tmp); + cx->free(tmp); if (dir) - JS_free(cx, dir); + cx->free(dir); if (base) - JS_free(cx, base); + cx->free(base); if (current) - JS_free(cx, current); + cx->free(current); return result; } @@ -753,7 +753,7 @@ js_FileHasOption(JSContext *cx, const char *oldoptions, const char *name) break; current = comma + 1; } - JS_free(cx, options); + cx->free(options); return found; } @@ -838,20 +838,20 @@ js_FileRead(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode) switch (mode) { case ASCII: - aux = (unsigned char*)JS_malloc(cx, len); + aux = (unsigned char*)cx->malloc(len); if (!aux) return 0; count = js_BufferedRead(file, aux, len); if (count == -1) { - JS_free(cx, aux); + cx->free(aux); return 0; } for (i = 0; i < len; i++) buf[i] = (jschar)aux[i]; - JS_free(cx, aux); + cx->free(aux); break; case UTF8: @@ -977,7 +977,7 @@ js_FileWrite(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode) switch (mode) { case ASCII: - aux = (unsigned char*)JS_malloc(cx, len); + aux = (unsigned char*)cx->malloc(len); if (!aux) return 0; @@ -989,21 +989,21 @@ js_FileWrite(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode) : fwrite(aux, 1, len, file->nativehandle); if (count==-1) { - JS_free(cx, aux); + cx->free(aux); return 0; } - JS_free(cx, aux); + cx->free(aux); break; case UTF8: - utfbuf = (unsigned char*)JS_malloc(cx, len*3); + utfbuf = (unsigned char*)cx->malloc(len*3); if (!utfbuf) return 0; i = 0; for (count = 0;countfree(utfbuf); return 0; } i+=j; @@ -1013,10 +1013,10 @@ js_FileWrite(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode) : fwrite(utfbuf, 1, i, file->nativehandle); if (jfree(utfbuf); return 0; } - JS_free(cx, utfbuf); + cx->free(utfbuf); break; case UCS2: @@ -1179,13 +1179,13 @@ js_parent(JSContext *cx, JSFile *file, jsval *resultp) } else { JSObject *obj = js_NewFileObject(cx, str); if (!obj) { - JS_free(cx, str); + cx->free(str); return JS_FALSE; } *resultp = OBJECT_TO_JSVAL(obj); } - JS_free(cx, str); + cx->free(str); return JS_TRUE; } @@ -1206,7 +1206,7 @@ js_name(JSContext *cx, JSFile *file, jsval *vp) str = JS_NewString(cx, name, strlen(name)); if (!str) { - JS_free(cx, name); + cx->free(name); return JS_FALSE; } @@ -1353,7 +1353,7 @@ file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) pipemode[i++] = '\0'; file->nativehandle = POPEN(&file->path[1], pipemode); } else if(file->path[len-1] == PIPE_SYMBOL) { - char *command = JS_malloc(cx, len); + char *command = cx->malloc(len); strncpy(command, file->path, len-1); command[len-1] = '\0'; @@ -1364,7 +1364,7 @@ file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) #endif pipemode[i++] = '\0'; file->nativehandle = POPEN(command, pipemode); - JS_free(cx, command); + cx->free(command); } /* set the flags */ file->isNative = JS_TRUE; @@ -1377,7 +1377,7 @@ file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) } js_ResetBuffers(file); - JS_free(cx, mode); + cx->free(mode); mode = NULL; /* Set the open flag and return result */ @@ -1396,7 +1396,7 @@ good: out: if(mode) - JS_free(cx, mode); + cx->free(mode); return JS_FALSE; } @@ -1511,13 +1511,13 @@ file_copyTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) goto out; } - buffer = JS_malloc(cx, size); + buffer = cx->malloc(size); count = INT_TO_JSVAL(PR_Read(file->handle, buffer, size)); /* reading panic */ if (count!=size) { - JS_free(cx, buffer); + cx->free(buffer); JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, JSFILEMSG_COPY_READ_ERROR, file->path); goto out; @@ -1527,13 +1527,13 @@ file_copyTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) /* writing panic */ if (count!=size) { - JS_free(cx, buffer); + cx->free(buffer); JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, JSFILEMSG_COPY_WRITE_ERROR, file->path); goto out; } - JS_free(cx, buffer); + cx->free(buffer); if(!fileInitiallyOpen){ if(!file_close(cx, obj, 0, NULL, rval)) goto out; @@ -1577,7 +1577,7 @@ file_renameTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval if (PR_Rename(file->path, dest)==PR_SUCCESS){ /* copy the new filename */ - JS_free(cx, file->path); + cx->free(file->path); file->path = dest; *rval = JSVAL_TRUE; return JS_TRUE; @@ -1729,17 +1729,17 @@ file_read(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) /* want = (want>262144)?262144:want; * arbitrary size limitation */ - buf = JS_malloc(cx, want*sizeof buf[0]); + buf = cx->malloc(want*sizeof buf[0]); if (!buf) goto out; count = js_FileRead(cx, file, buf, want, file->type); if (count>0) { str = JS_NewUCStringCopyN(cx, buf, count); *rval = STRING_TO_JSVAL(str); - JS_free(cx, buf); + cx->free(buf); return JS_TRUE; } else { - JS_free(cx, buf); + cx->free(buf); goto out; } out: @@ -1760,7 +1760,7 @@ file_readln(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) SECURITY_CHECK(cx, NULL, "readln", file); JSFILE_CHECK_READ; - buf = JS_malloc(cx, MAX_LINE_LENGTH * sizeof data); + buf = cx->malloc(MAX_LINE_LENGTH * sizeof data); if (!buf) return JS_FALSE; @@ -1792,8 +1792,7 @@ file_readln(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) default: if (--room < 0) { - tmp = JS_realloc(cx, buf, - (offset + MAX_LINE_LENGTH) * sizeof data); + tmp = cx->realloc(buf, (offset + MAX_LINE_LENGTH) * sizeof data); if (!tmp) goto out; @@ -1814,7 +1813,7 @@ eof: done: buf[offset] = 0; - tmp = JS_realloc(cx, buf, (offset + 1) * sizeof data); + tmp = cx->realloc(buf, (offset + 1) * sizeof data); if (!tmp) goto out; @@ -1827,7 +1826,7 @@ done: out: if (buf) - JS_free(cx, buf); + cx->free(buf); return JS_FALSE; } @@ -1980,7 +1979,7 @@ file_list(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) filePath = js_combinePath(cx, file->path, (char*)entry->name); eachFile = js_NewFileObject(cx, filePath); - JS_free(cx, filePath); + cx->free(filePath); if (!eachFile){ JS_ReportWarning(cx, "File %s cannot be retrieved", filePath); continue; @@ -2017,7 +2016,7 @@ file_mkdir(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) char *dir = js_fileDirectoryName(cx, file->path); JSObject *dirObj = js_NewFileObject(cx, dir); - JS_free(cx, dir); + cx->free(dir); /* call file_mkdir with the right set of parameters if needed */ if (file_mkdir(cx, dirObj, argc, argv, rval)) @@ -2031,12 +2030,12 @@ file_mkdir(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) fullName = js_combinePath(cx, file->path, dirName); if (PR_MkDir(fullName, 0755)==PR_SUCCESS){ *rval = JSVAL_TRUE; - JS_free(cx, fullName); + cx->free(fullName); return JS_TRUE; }else{ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, JSFILEMSG_OP_FAILED, "mkdir", fullName); - JS_free(cx, fullName); + cx->free(fullName); goto out; } } @@ -2077,7 +2076,7 @@ file_toURL(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) return JS_FALSE; str = js_NewString(cx, urlChars, len); if (!str) { - JS_free(cx, urlChars); + cx->free(urlChars); return JS_FALSE; } *rval = STRING_TO_JSVAL(str); @@ -2104,9 +2103,9 @@ file_finalize(JSContext *cx, JSObject *obj) } if (file->path) - JS_free(cx, file->path); + cx->free(file->path); - JS_free(cx, file); + cx->free(file); } } @@ -2118,7 +2117,7 @@ file_init(JSContext *cx, JSObject *obj, char *bytes) { JSFile *file; - file = JS_malloc(cx, sizeof *file); + file = cx->malloc(sizeof *file); if (!file) return NULL; memset(file, 0 , sizeof *file); @@ -2130,7 +2129,7 @@ file_init(JSContext *cx, JSObject *obj, char *bytes) if (!JS_SetPrivate(cx, obj, file)) { JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, JSFILEMSG_CANNOT_SET_PRIVATE_FILE, file->path); - JS_free(cx, file); + cx->free(file); return NULL; } @@ -2176,7 +2175,7 @@ js_NewFileObjectFromFILE(JSContext *cx, FILE *nativehandle, char *filename, /* free result of RESOLVE_PATH from file_init. */ JS_ASSERT(file->path != NULL); - JS_free(cx, file->path); + cx->free(file->path); file->path = strdup(filename); file->isOpen = open; @@ -2399,7 +2398,7 @@ file_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) case FILE_MODE: SECURITY_CHECK(cx, NULL, "mode", file); JSFILE_CHECK_OPEN("mode"); - bytes = JS_malloc(cx, MODE_SIZE); + bytes = cx->malloc(MODE_SIZE); bytes[0] = '\0'; flag = JS_FALSE; @@ -2439,7 +2438,7 @@ file_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) flag = JS_TRUE; } *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, bytes)); - JS_free(cx, bytes); + cx->free(bytes); break; case FILE_CREATED: SECURITY_CHECK(cx, NULL, "creationTime", file); @@ -2575,7 +2574,7 @@ file_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) bytes = js_combinePath(cx, file->path, prop_name); *vp = OBJECT_TO_JSVAL(js_NewFileObject(cx, bytes)); PR_CloseDir(dir); - JS_free(cx, bytes); + cx->free(bytes); return !JSVAL_IS_NULL(*vp); } } @@ -2717,10 +2716,10 @@ js_InitFileClass(JSContext *cx, JSObject* obj) /* Define CURRENTDIR property. We are doing this to get a slash at the end of the current dir */ afile = js_NewFileObject(cx, CURRENT_DIR); - currentdir = JS_malloc(cx, MAX_PATH_LENGTH); - currentdir = getcwd(currentdir, MAX_PATH_LENGTH); + currentdir = cx->malloc(MAX_PATH_LENGTH); + currentdir = getcwd(currentdir, MAX_PATH_LENGTH); afile = js_NewFileObject(cx, currentdir); - JS_free(cx, currentdir); + cx->free(currentdir); vp = OBJECT_TO_JSVAL(afile); JS_DefinePropertyWithTinyId(cx, ctor, CURRENTDIR_PROPERTY, 0, vp, JS_PropertyStub, file_currentDirSetter, diff --git a/js/src/jsfun.cpp b/js/src/jsfun.cpp index 90fc581a95c0..d4b02a1cb220 100644 --- a/js/src/jsfun.cpp +++ b/js/src/jsfun.cpp @@ -136,7 +136,7 @@ MarkArgDeleted(JSContext *cx, JSStackFrame *fp, uintN slot) bitmap = (jsbitmap *) &bmapint; } else { nbytes = JS_HOWMANY(nbits, JS_BITS_PER_WORD) * sizeof(jsbitmap); - bitmap = (jsbitmap *) JS_malloc(cx, nbytes); + bitmap = (jsbitmap *) cx->malloc(nbytes); if (!bitmap) return JS_FALSE; memset(bitmap, 0, nbytes); @@ -311,7 +311,7 @@ js_PutArgsObject(JSContext *cx, JSStackFrame *fp) if (!JSVAL_IS_VOID(bmapval)) { JS_SetReservedSlot(cx, argsobj, 0, JSVAL_VOID); if (fp->argc > JSVAL_INT_BITS) - JS_free(cx, JSVAL_TO_PRIVATE(bmapval)); + cx->free(JSVAL_TO_PRIVATE(bmapval)); } /* @@ -2758,10 +2758,10 @@ FreeLocalNameHash(JSContext *cx, JSLocalNameMap *map) for (dup = map->lastdup; dup; dup = next) { next = dup->link; - JS_free(cx, dup); + cx->free(dup); } JS_DHashTableFinish(&map->names); - JS_free(cx, map); + cx->free(map); } static JSBool @@ -2789,7 +2789,7 @@ HashLocalName(JSContext *cx, JSLocalNameMap *map, JSAtom *name, if (entry->name) { JS_ASSERT(entry->name == name); JS_ASSERT(entry->localKind == JSLOCAL_ARG); - dup = (JSNameIndexPair *) JS_malloc(cx, sizeof *dup); + dup = (JSNameIndexPair *) cx->malloc(sizeof *dup); if (!dup) return JS_FALSE; dup->name = entry->name; @@ -2835,7 +2835,7 @@ js_AddLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, JSLocalKind kind) if (n > 1) { array = fun->u.i.names.array; } else { - array = (jsuword *) JS_malloc(cx, MAX_ARRAY_LOCALS * sizeof *array); + array = (jsuword *) cx->malloc(MAX_ARRAY_LOCALS * sizeof *array); if (!array) return JS_FALSE; array[0] = fun->u.i.names.taggedAtom; @@ -2860,7 +2860,7 @@ js_AddLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, JSLocalKind kind) } } else if (n == MAX_ARRAY_LOCALS) { array = fun->u.i.names.array; - map = (JSLocalNameMap *) JS_malloc(cx, sizeof *map); + map = (JSLocalNameMap *) cx->malloc(sizeof *map); if (!map) return JS_FALSE; if (!JS_DHashTableInit(&map->names, JS_DHashGetStubOps(), @@ -2868,7 +2868,7 @@ js_AddLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, JSLocalKind kind) JS_DHASH_DEFAULT_CAPACITY(MAX_ARRAY_LOCALS * 2))) { JS_ReportOutOfMemory(cx); - JS_free(cx, map); + cx->free(map); return JS_FALSE; } @@ -2901,7 +2901,7 @@ js_AddLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, JSLocalKind kind) * to replace fun->u.i.names with the built map. */ fun->u.i.names.map = map; - JS_free(cx, array); + cx->free(array); } else { if (*indexp == JS_BITMASK(16)) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, @@ -3123,7 +3123,7 @@ DestroyLocalNames(JSContext *cx, JSFunction *fun) if (n <= 1) return; if (n <= MAX_ARRAY_LOCALS) - JS_free(cx, fun->u.i.names.array); + cx->free(fun->u.i.names.array); else FreeLocalNameHash(cx, fun->u.i.names.map); } @@ -3139,8 +3139,8 @@ js_FreezeLocalNames(JSContext *cx, JSFunction *fun) n = fun->nargs + fun->u.i.nvars + fun->u.i.nupvars; if (2 <= n && n < MAX_ARRAY_LOCALS) { /* Shrink over-allocated array ignoring realloc failures. */ - array = (jsuword *) JS_realloc(cx, fun->u.i.names.array, - n * sizeof *array); + array = (jsuword *) cx->realloc(fun->u.i.names.array, + n * sizeof *array); if (array) fun->u.i.names.array = array; } diff --git a/js/src/jsgc.cpp b/js/src/jsgc.cpp index f6a76120c4d4..386a65c83e83 100644 --- a/js/src/jsgc.cpp +++ b/js/src/jsgc.cpp @@ -76,6 +76,7 @@ #include "jsscript.h" #include "jsstaticcheck.h" #include "jsstr.h" +#include "jstask.h" #include "jstracer.h" #if JS_HAS_XML_SUPPORT @@ -722,7 +723,7 @@ FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info) { if (table->array) { JS_ASSERT(table->count > 0); - free(table->array); + js_free(table->array); table->array = NULL; table->count = 0; } @@ -756,8 +757,8 @@ AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info, if (capacity > (size_t)-1 / sizeof table->array[0]) goto bad; } - array = (void **) realloc(table->array, - capacity * sizeof table->array[0]); + array = (void **) js_realloc(table->array, + capacity * sizeof table->array[0]); if (!array) goto bad; #ifdef DEBUG @@ -796,11 +797,11 @@ ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info, array = table->array; JS_ASSERT(array); if (capacity == 0) { - free(array); + js_free(array); table->array = NULL; return; } - array = (void **) realloc(array, capacity * sizeof array[0]); + array = (void **) js_realloc(array, capacity * sizeof array[0]); if (array) table->array = array; } @@ -881,7 +882,7 @@ NewGCChunk(void) * * bytes to ensure that we always have room to store the gap. */ - p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT); + p = js_malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT); if (!p) return 0; @@ -913,11 +914,11 @@ DestroyGCChunk(jsuword chunk) #endif #if HAS_POSIX_MEMALIGN - free((void *) chunk); + js_free((void *) chunk); #else /* See comments in NewGCChunk. */ JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE); - free((void *) (chunk - *GetMallocedChunkGapPtr(chunk))); + js_free((void *) (chunk - *GetMallocedChunkGapPtr(chunk))); #endif } @@ -3270,7 +3271,10 @@ js_FinalizeStringRT(JSRuntime *rt, JSString *str, intN type, JSContext *cx) JS_ASSERT(type < 0); rt->unitStrings[*chars] = NULL; } else if (type < 0) { - free(chars); + if (cx) + cx->free(chars); + else + rt->free(chars); } else { JS_ASSERT((uintN) type < JS_ARRAY_LENGTH(str_finalizers)); finalizer = str_finalizers[type]; @@ -3556,6 +3560,10 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) rt->gcMarkingTracer = NULL; +#ifdef JS_THREADSAFE + cx->createDeallocatorTask(); +#endif + /* * Sweep phase. * @@ -3734,6 +3742,10 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) */ DestroyGCArenas(rt, emptyArenas); +#ifdef JS_THREADSAFE + cx->submitDeallocatorTask(); +#endif + if (rt->gcCallback) (void) rt->gcCallback(cx, JSGC_FINALIZE_END); #ifdef DEBUG_srcnotesize diff --git a/js/src/jsgc.h b/js/src/jsgc.h index 417c6405274d..52ec7982fd68 100644 --- a/js/src/jsgc.h +++ b/js/src/jsgc.h @@ -47,6 +47,7 @@ #include "jsdhash.h" #include "jsbit.h" #include "jsutil.h" +#include "jstask.h" JS_BEGIN_EXTERN_C @@ -341,6 +342,28 @@ js_AddAsGCBytes(JSContext *cx, size_t sz); extern void js_RemoveAsGCBytes(JSRuntime* rt, size_t sz); +#ifdef JS_THREADSAFE +class JSFreePointerListTask : public JSBackgroundTask { + void *head; + public: + JSFreePointerListTask() : head(NULL) {} + + void add(void* ptr) { + *(void**)ptr = head; + head = ptr; + } + + void run() { + void *ptr = head; + while (ptr) { + void *next = *(void **)ptr; + js_free(ptr); + ptr = next; + } + } +}; +#endif + /* * Free the chars held by str when it is finalized by the GC. When type is * less then zero, it denotes an internal string. Otherwise it denotes the diff --git a/js/src/jshash.cpp b/js/src/jshash.cpp index 9e9466e4b6ed..e347744e7319 100644 --- a/js/src/jshash.cpp +++ b/js/src/jshash.cpp @@ -73,7 +73,7 @@ DefaultAllocTable(void *pool, size_t size) static void DefaultFreeTable(void *pool, void *item, size_t size) { - free(item); + js_free(item); } static JSHashEntry * @@ -86,7 +86,7 @@ static void DefaultFreeEntry(void *pool, JSHashEntry *he, uintN flag) { if (flag == HT_FREE_ENTRY) - free(he); + js_free(he); } static JSHashAllocOps defaultHashAllocOps = { diff --git a/js/src/jsinterp.cpp b/js/src/jsinterp.cpp index d77602e4782f..0d9a50ba5ad1 100644 --- a/js/src/jsinterp.cpp +++ b/js/src/jsinterp.cpp @@ -2145,7 +2145,7 @@ js_TraceOpcode(JSContext *cx) fprintf(tracefp, "%s %s", (n == -ndefs) ? " output:" : ",", bytes); - JS_free(cx, bytes); + cx->free(bytes); } } fprintf(tracefp, " @ %u\n", (uintN) (regs->sp - StackBase(fp))); @@ -2177,7 +2177,7 @@ js_TraceOpcode(JSContext *cx) fprintf(tracefp, "%s %s", (n == -nuses) ? " inputs:" : ",", bytes); - JS_free(cx, bytes); + cx->free(bytes); } } fprintf(tracefp, " @ %u\n", (uintN) (regs->sp - StackBase(fp))); @@ -2264,7 +2264,7 @@ js_DumpOpMeters() # define SIGNIFICANT(count,total) (200. * (count) >= (total)) - graph = (Edge *) calloc(nedges, sizeof graph[0]); + graph = (Edge *) js_calloc(nedges * sizeof graph[0]); for (i = nedges = 0; i < JSOP_LIMIT; i++) { from = js_CodeName[i]; for (j = 0; j < JSOP_LIMIT; j++) { @@ -2293,7 +2293,7 @@ js_DumpOpMeters() graph[i].from, graph[i].to, (unsigned long)graph[i].count, style); } - free(graph); + js_free(graph); fputs("}\n", fp); fclose(fp); diff --git a/js/src/jsiter.cpp b/js/src/jsiter.cpp index f941a6e3bd7e..a9c3473f9775 100644 --- a/js/src/jsiter.cpp +++ b/js/src/jsiter.cpp @@ -649,7 +649,7 @@ generator_finalize(JSContext *cx, JSObject *obj) */ JS_ASSERT(gen->state == JSGEN_NEWBORN || gen->state == JSGEN_CLOSED || gen->state == JSGEN_OPEN); - JS_free(cx, gen); + cx->free(gen); } } @@ -716,7 +716,7 @@ js_NewGenerator(JSContext *cx, JSStackFrame *fp) /* Allocate obj's private data struct. */ gen = (JSGenerator *) - JS_malloc(cx, sizeof(JSGenerator) + (nslots - 1) * sizeof(jsval)); + cx->malloc(sizeof(JSGenerator) + (nslots - 1) * sizeof(jsval)); if (!gen) goto bad; @@ -783,7 +783,7 @@ js_NewGenerator(JSContext *cx, JSStackFrame *fp) gen->state = JSGEN_NEWBORN; if (!JS_SetPrivate(cx, obj, gen)) { - JS_free(cx, gen); + cx->free(gen); goto bad; } return obj; diff --git a/js/src/jslock.cpp b/js/src/jslock.cpp index e31c15b9ab00..e1064f26a390 100644 --- a/js/src/jslock.cpp +++ b/js/src/jslock.cpp @@ -896,7 +896,7 @@ DestroyFatlock(JSFatLock *fl) { PR_DestroyLock(fl->slock); PR_DestroyCondVar(fl->svar); - free(fl); + js_free(fl); } static JSFatLock * @@ -990,7 +990,7 @@ js_SetupLocks(int listc, int globc) global_locks_log2 = JS_CeilingLog2(globc); global_locks_mask = JS_BITMASK(global_locks_log2); global_lock_count = JS_BIT(global_locks_log2); - global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*)); + global_locks = (PRLock **) js_malloc(global_lock_count * sizeof(PRLock*)); if (!global_locks) return JS_FALSE; for (i = 0; i < global_lock_count; i++) { @@ -1001,7 +1001,7 @@ js_SetupLocks(int listc, int globc) return JS_FALSE; } } - fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable)); + fl_list_table = (JSFatLockTable *) js_malloc(i * sizeof(JSFatLockTable)); if (!fl_list_table) { js_CleanupLocks(); return JS_FALSE; @@ -1023,7 +1023,7 @@ js_CleanupLocks() if (global_locks) { for (i = 0; i < global_lock_count; i++) PR_DestroyLock(global_locks[i]); - free(global_locks); + js_free(global_locks); global_locks = NULL; global_lock_count = 1; global_locks_log2 = 0; @@ -1036,7 +1036,7 @@ js_CleanupLocks() DeleteListOfFatlocks(fl_list_table[i].taken); fl_list_table[i].taken = NULL; } - free(fl_list_table); + js_free(fl_list_table); fl_list_table = NULL; fl_list_table_len = 0; } diff --git a/js/src/jsnum.cpp b/js/src/jsnum.cpp index 893f74aaaa2c..5c8993aa080e 100644 --- a/js/src/jsnum.cpp +++ b/js/src/jsnum.cpp @@ -384,7 +384,7 @@ num_toString(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; } str = JS_NewStringCopyZ(cx, dStr); - free(dStr); + js_free(dStr); } if (!str) return JS_FALSE; @@ -460,7 +460,7 @@ num_toLocaleString(JSContext *cx, uintN argc, jsval *vp) } tmpGroup--; - buf = (char *)JS_malloc(cx, size + 1); + buf = (char *)cx->malloc(size + 1); if (!buf) return JS_FALSE; @@ -492,7 +492,7 @@ num_toLocaleString(JSContext *cx, uintN argc, jsval *vp) str = JS_NewString(cx, buf, size); if (!str) { - JS_free(cx, buf); + cx->free(buf); return JS_FALSE; } @@ -739,9 +739,9 @@ js_FinishRuntimeNumberState(JSContext *cx) rt->jsNegativeInfinity = NULL; rt->jsPositiveInfinity = NULL; - JS_free(cx, (void *)rt->thousandsSeparator); - JS_free(cx, (void *)rt->decimalSeparator); - JS_free(cx, (void *)rt->numGrouping); + cx->free((void *)rt->thousandsSeparator); + cx->free((void *)rt->decimalSeparator); + cx->free((void *)rt->numGrouping); rt->thousandsSeparator = rt->decimalSeparator = rt->numGrouping = NULL; } @@ -852,7 +852,7 @@ NumberToStringWithBase(JSContext *cx, jsdouble d, jsint base) return NULL; s = JS_NewStringCopyZ(cx, numStr); if (!(numStr >= buf && numStr < buf + sizeof buf)) - free(numStr); + js_free(numStr); return s; } @@ -1251,7 +1251,7 @@ js_strtod(JSContext *cx, const jschar *s, const jschar *send, /* Use cbuf to avoid malloc */ if (length >= sizeof cbuf) { - cstr = (char *) JS_malloc(cx, length + 1); + cstr = (char *) cx->malloc(length + 1); if (!cstr) return JS_FALSE; } else { @@ -1292,7 +1292,7 @@ js_strtod(JSContext *cx, const jschar *s, const jschar *send, i = estr - cstr; if (cstr != cbuf) - JS_free(cx, cstr); + cx->free(cstr); *ep = i ? s1 + i : s; *dp = d; return JS_TRUE; @@ -1405,7 +1405,7 @@ js_strtointeger(JSContext *cx, const jschar *s, const jschar *send, */ size_t i; size_t length = s1 - start; - char *cstr = (char *) JS_malloc(cx, length + 1); + char *cstr = (char *) cx->malloc(length + 1); char *estr; int err=0; @@ -1418,12 +1418,12 @@ js_strtointeger(JSContext *cx, const jschar *s, const jschar *send, value = JS_strtod(cstr, &estr, &err); if (err == JS_DTOA_ENOMEM) { JS_ReportOutOfMemory(cx); - JS_free(cx, cstr); + cx->free(cstr); return JS_FALSE; } if (err == JS_DTOA_ERANGE && value == HUGE_VAL) value = *cx->runtime->jsPositiveInfinity; - JS_free(cx, cstr); + cx->free(cstr); } else if ((base & (base - 1)) == 0) { /* * The number may also be inaccurate for power-of-two bases. This diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp index 9ca39003a927..ed62904eecdc 100644 --- a/js/src/jsobj.cpp +++ b/js/src/jsobj.cpp @@ -560,7 +560,7 @@ out: ida = JS_Enumerate(cx, obj); if (!ida) { if (*sp) { - JS_free(cx, *sp); + cx->free(*sp); *sp = NULL; } goto bad; @@ -704,7 +704,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) if (!chars) { /* If outermost, allocate 4 + 1 for "({})" and the terminator. */ - chars = (jschar *) malloc(((outermost ? 4 : 2) + 1) * sizeof(jschar)); + chars = (jschar *) js_malloc(((outermost ? 4 : 2) + 1) * sizeof(jschar)); nchars = 0; if (!chars) goto error; @@ -715,9 +715,9 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) MAKE_SHARP(he); nchars = js_strlen(chars); chars = (jschar *) - realloc((ochars = chars), (nchars + 2 + 1) * sizeof(jschar)); + js_realloc((ochars = chars), (nchars + 2 + 1) * sizeof(jschar)); if (!chars) { - free(ochars); + js_free(ochars); goto error; } if (outermost) { @@ -958,11 +958,11 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) /* Allocate 1 + 1 at end for closing brace and terminating 0. */ chars = (jschar *) - realloc((ochars = chars), curlen * sizeof(jschar)); + js_realloc((ochars = chars), curlen * sizeof(jschar)); if (!chars) { /* Save code space on error: let JS_free ignore null vsharp. */ - JS_free(cx, vsharp); - free(ochars); + cx->free(vsharp); + js_free(ochars); goto error; } @@ -1005,7 +1005,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) nchars += vlength; if (vsharp) - JS_free(cx, vsharp); + cx->free(vsharp); } } @@ -1019,7 +1019,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) if (!ok) { if (chars) - free(chars); + js_free(chars); goto out; } @@ -1031,7 +1031,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) make_string: str = js_NewString(cx, chars, nchars); if (!str) { - free(chars); + js_free(chars); ok = JS_FALSE; goto out; } @@ -1042,8 +1042,8 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) return ok; overflow: - JS_free(cx, vsharp); - free(chars); + cx->free(vsharp); + js_free(chars); chars = NULL; goto error; } @@ -1064,7 +1064,7 @@ obj_toString(JSContext *cx, uintN argc, jsval *vp) obj = js_GetWrappedObject(cx, obj); clazz = OBJ_GET_CLASS(cx, obj)->name; nchars = 9 + strlen(clazz); /* 9 for "[object ]" */ - chars = (jschar *) JS_malloc(cx, (nchars + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((nchars + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; @@ -1079,7 +1079,7 @@ obj_toString(JSContext *cx, uintN argc, jsval *vp) str = js_NewString(cx, chars, nchars); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -2986,7 +2986,7 @@ AllocSlots(JSContext *cx, JSObject *obj, size_t nslots) JS_ASSERT(nslots > JS_INITIAL_NSLOTS); jsval* slots; - slots = (jsval*) JS_malloc(cx, SLOTS_TO_DYNAMIC_WORDS(nslots) * sizeof(jsval)); + slots = (jsval*) cx->malloc(SLOTS_TO_DYNAMIC_WORDS(nslots) * sizeof(jsval)); if (!slots) return true; @@ -3044,7 +3044,7 @@ js_GrowSlots(JSContext *cx, JSObject *obj, size_t nslots) size_t oslots = size_t(slots[-1]); - slots = (jsval*) JS_realloc(cx, slots - 1, nwords * sizeof(jsval)); + slots = (jsval*) cx->realloc(slots - 1, nwords * sizeof(jsval)); *slots++ = nslots; obj->dslots = slots; @@ -3069,11 +3069,11 @@ js_ShrinkSlots(JSContext *cx, JSObject *obj, size_t nslots) JS_ASSERT(nslots <= size_t(slots[-1])); if (nslots <= JS_INITIAL_NSLOTS) { - JS_free(cx, slots - 1); + cx->free(slots - 1); obj->dslots = NULL; } else { size_t nwords = SLOTS_TO_DYNAMIC_WORDS(nslots); - slots = (jsval*) JS_realloc(cx, slots - 1, nwords * sizeof(jsval)); + slots = (jsval*) cx->realloc(slots - 1, nwords * sizeof(jsval)); *slots++ = nslots; obj->dslots = slots; } @@ -4965,7 +4965,7 @@ js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, } allocated = NativeEnumeratorSize(length); - ne = (JSNativeEnumerator *) JS_malloc(cx, allocated); + ne = (JSNativeEnumerator *) cx->malloc(allocated); if (!ne) { JS_UNLOCK_SCOPE(cx, scope); return JS_FALSE; @@ -4997,7 +4997,7 @@ js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, JS_LOCK_GC(cx->runtime); if (!js_AddAsGCBytes(cx, allocated)) { /* js_AddAsGCBytes releases the GC lock on failures. */ - JS_free(cx, ne); + cx->free(ne); return JS_FALSE; } ne->next = cx->runtime->nativeEnumerators; @@ -5090,7 +5090,7 @@ js_TraceNativeEnumerators(JSTracer *trc) } else if (doGC) { js_RemoveAsGCBytes(rt, NativeEnumeratorSize(ne->length)); *nep = ne->next; - JS_free(trc->context, ne); + trc->context->free(ne); continue; } nep = &ne->next; diff --git a/js/src/json.cpp b/js/src/json.cpp index 43bf3ab03259..6f790f0e9843 100644 --- a/js/src/json.cpp +++ b/js/src/json.cpp @@ -38,7 +38,7 @@ * * ***** END LICENSE BLOCK ***** */ -#include /* memset */ +#include #include "jsapi.h" #include "jsarena.h" #include "jsarray.h" @@ -740,10 +740,9 @@ js_BeginJSONParse(JSContext *cx, jsval *rootVal) if (!arr) return NULL; - JSONParser *jp = (JSONParser*) JS_malloc(cx, sizeof(JSONParser)); + JSONParser *jp = (JSONParser*) cx->calloc(sizeof(JSONParser)); if (!jp) return NULL; - memset(jp, 0, sizeof *jp); jp->objectStack = arr; if (!js_AddRoot(cx, &jp->objectStack, "JSON parse stack")) @@ -798,7 +797,7 @@ js_FinishJSONParse(JSContext *cx, JSONParser *jp, jsval reviver) JSBool ok = *jp->statep == JSON_PARSE_STATE_FINISHED; jsval *vp = jp->rootVal; - JS_free(cx, jp); + cx->free(jp); if (!early_ok) return JS_FALSE; diff --git a/js/src/jsopcode.cpp b/js/src/jsopcode.cpp index 98e0970ceddb..28376ee0076b 100644 --- a/js/src/jsopcode.cpp +++ b/js/src/jsopcode.cpp @@ -604,7 +604,7 @@ Sprint(Sprinter *sp, const char *format, ...) return -1; } offset = SprintCString(sp, bp); - free(bp); + js_free(bp); return offset; } @@ -737,7 +737,7 @@ JS_NEW_PRINTER(JSContext *cx, const char *name, JSFunction *fun, { JSPrinter *jp; - jp = (JSPrinter *) JS_malloc(cx, sizeof(JSPrinter)); + jp = (JSPrinter *) cx->malloc(sizeof(JSPrinter)); if (!jp) return NULL; INIT_SPRINTER(cx, &jp->sprinter, &jp->pool, 0); @@ -764,7 +764,7 @@ void js_DestroyPrinter(JSPrinter *jp) { JS_FinishArenaPool(&jp->pool); - JS_free(jp->sprinter.context, jp); + jp->sprinter.context->free(jp); } JSString * @@ -832,7 +832,7 @@ js_printf(JSPrinter *jp, const char *format, ...) /* Allocate temp space, convert format, and put. */ bp = JS_vsmprintf(format, ap); /* XXX vsaprintf */ if (fp) { - JS_free(jp->sprinter.context, fp); + jp->sprinter.context->free(fp); format = NULL; } if (!bp) { @@ -843,7 +843,7 @@ js_printf(JSPrinter *jp, const char *format, ...) cc = strlen(bp); if (SprintPut(&jp->sprinter, bp, (size_t)cc) < 0) cc = -1; - free(bp); + js_free(bp); va_end(ap); return cc; @@ -929,7 +929,7 @@ GetOff(SprintStack *ss, uintN i) if (off < 0) off = 0; ss->offsets[i] = off; - JS_free(ss->sprinter.context, bytes); + ss->sprinter.context->free(bytes); return off; } if (!ss->sprinter.base && SprintPut(&ss->sprinter, "", 0) >= 0) { @@ -2508,14 +2508,14 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) len = 0; if (!Decompile(ss, done, pc - done, JSOP_POP)) { - JS_free(cx, (char *)lval); + cx->free((char *)lval); return NULL; } /* Pop Decompile result and print comma expression. */ rval = POP_STR(); todo = Sprint(&ss->sprinter, "%s, %s", lval, rval); - JS_free(cx, (char *)lval); + cx->free((char *)lval); break; case SRC_HIDDEN: @@ -2547,7 +2547,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) /* Set saveop to reflect what we will push. */ saveop = JSOP_LEAVEBLOCKEXPR; if (!Decompile(ss, pc, len, saveop)) { - JS_free(cx, (char *)lval); + cx->free((char *)lval); return NULL; } rval = PopStr(ss, JSOP_SETNAME); @@ -2556,7 +2556,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ? "let (%s) (%s)" : "let (%s) %s", lval, rval); - JS_free(cx, (char *)lval); + cx->free((char *)lval); } break; @@ -2620,7 +2620,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) if ((size_t)argc <= JS_ARRAY_LENGTH(smallv)) { atomv = smallv; } else { - atomv = (JSAtom **) JS_malloc(cx, argc * sizeof(JSAtom *)); + atomv = (JSAtom **) cx->malloc(argc * sizeof(JSAtom *)); if (!atomv) return NULL; } @@ -2755,7 +2755,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) #undef LOCAL_ASSERT_OUT enterblock_out: if (atomv != smallv) - JS_free(cx, atomv); + cx->free(atomv); if (!ok) return NULL; } @@ -3280,7 +3280,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) DECOMPILE_CODE(pc + oplen, len - oplen); lval = JS_strdup(cx, POP_STR()); if (!lval) { - JS_free(cx, (void *)xval); + cx->free((void *)xval); return NULL; } pc += len; @@ -3291,8 +3291,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) rval = POP_STR(); todo = Sprint(&ss->sprinter, "%s ? %s : %s", xval, lval, rval); - JS_free(cx, (void *)xval); - JS_free(cx, (void *)lval); + cx->free((void *)xval); + cx->free((void *)lval); break; default: @@ -3319,7 +3319,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) pc += len; len = done - pc; if (!Decompile(ss, pc, len, op)) { - JS_free(cx, (char *)lval); + cx->free((char *)lval); return NULL; } rval = POP_STR(); @@ -3332,14 +3332,14 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) todo = Sprint(&ss->sprinter, "%s %s\n", lval, xval); tail = Sprint(&ss->sprinter, "%*s%s", jp->indent + 4, "", rval); - JS_free(cx, (char *)rval); + cx->free((char *)rval); } if (tail < 0) todo = -1; } else { todo = Sprint(&ss->sprinter, "%s %s %s", lval, xval, rval); } - JS_free(cx, (char *)lval); + cx->free((char *)lval); break; case JSOP_AND: @@ -3532,7 +3532,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) #endif argc = GET_ARGC(pc); argv = (char **) - JS_malloc(cx, (size_t)(argc + 1) * sizeof *argv); + cx->malloc((size_t)(argc + 1) * sizeof *argv); if (!argv) return NULL; @@ -3590,8 +3590,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = JS_FALSE; for (i = 0; i <= argc; i++) - JS_free(cx, argv[i]); - JS_free(cx, argv); + cx->free(argv[i]); + cx->free(argv); if (!ok) return NULL; #if JS_HAS_LVALUE_RETURN @@ -4095,7 +4095,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) if (!rval) return NULL; todo = SprintCString(&ss->sprinter, rval); - JS_free(cx, (void *)rval); + cx->free((void *)rval); break; } #endif /* JS_HAS_GENERATOR_EXPRS */ @@ -4166,7 +4166,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = JS_TRUE; } else { table = (TableEntry *) - JS_malloc(cx, (size_t)n * sizeof *table); + cx->malloc((size_t)n * sizeof *table); if (!table) return NULL; for (i = j = 0; i < n; i++) { @@ -4186,12 +4186,12 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) pc2 += jmplen; } tmp = (TableEntry *) - JS_malloc(cx, (size_t)j * sizeof *table); + cx->malloc((size_t)j * sizeof *table); if (tmp) { VOUCH_DOES_NOT_REQUIRE_STACK(); ok = js_MergeSort(table, (size_t)j, sizeof(TableEntry), CompareOffsets, NULL, tmp); - JS_free(cx, tmp); + cx->free(tmp); } else { ok = JS_FALSE; } @@ -4201,7 +4201,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = DecompileSwitch(ss, table, (uintN)j, pc, len, off, JS_FALSE); } - JS_free(cx, table); + cx->free(table); if (!ok) return NULL; todo = -2; @@ -4227,7 +4227,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) pc2 += UINT16_LEN; table = (TableEntry *) - JS_malloc(cx, (size_t)npairs * sizeof *table); + cx->malloc((size_t)npairs * sizeof *table); if (!table) return NULL; for (k = 0; k < npairs; k++) { @@ -4248,7 +4248,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = DecompileSwitch(ss, table, (uintN)npairs, pc, len, off, JS_FALSE); - JS_free(cx, table); + cx->free(table); if (!ok) return NULL; todo = -2; @@ -4292,7 +4292,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) * and the distance to its statements in table[i].offset. */ table = (TableEntry *) - JS_malloc(cx, (size_t)ncases * sizeof *table); + cx->malloc((size_t)ncases * sizeof *table); if (!table) return NULL; pc2 = pc; @@ -4322,7 +4322,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) ok = DecompileSwitch(ss, table, (uintN)ncases, pc, len, off, JS_TRUE); - JS_free(cx, table); + cx->free(table); if (!ok) return NULL; todo = -2; @@ -4370,7 +4370,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) break; } - argv = (char **) JS_malloc(cx, size_t(argc) * sizeof *argv); + argv = (char **) cx->malloc(size_t(argc) * sizeof *argv); if (!argv) return NULL; @@ -4394,8 +4394,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) } for (i = 0; i < argc; i++) - JS_free(cx, argv[i]); - JS_free(cx, argv); + cx->free(argv[i]); + cx->free(argv); if (!ok) return NULL; @@ -4728,7 +4728,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) (*rval == '\0' || (SprintPut(&ss->sprinter, " ", 1) >= 0 && SprintCString(&ss->sprinter, rval))); - JS_free(cx, (char *)rval); + cx->free((char *)rval); if (!ok) return NULL; SprintPut(&ss->sprinter, "?>", 2); @@ -4836,7 +4836,7 @@ DecompileCode(JSPrinter *jp, JSScript *script, jsbytecode *pc, uintN len, ok = Decompile(&ss, pc, len, JSOP_NOP) != NULL; if (code != oldcode) { - JS_free(cx, jp->script->code); + cx->free(jp->script->code); jp->script->code = oldcode; jp->script->main = oldmain; } @@ -5055,7 +5055,7 @@ js_DecompileValueGenerator(JSContext *cx, intN spindex, jsval v, * populated interpreter's stack with its current content. */ pcstack = (jsbytecode **) - JS_malloc(cx, StackDepth(script) * sizeof *pcstack); + cx->malloc(StackDepth(script) * sizeof *pcstack); if (!pcstack) return NULL; pcdepth = ReconstructPCStack(cx, script, pc, pcstack); @@ -5096,7 +5096,7 @@ js_DecompileValueGenerator(JSContext *cx, intN spindex, jsval v, } release_pcstack: - JS_free(cx, pcstack); + cx->free(pcstack); if (pcdepth < 0) goto do_fallback; } @@ -5232,7 +5232,7 @@ DecompileExpression(JSContext *cx, JSScript *script, JSFunction *fun, } pcstack = (jsbytecode **) - JS_malloc(cx, StackDepth(script) * sizeof *pcstack); + cx->malloc(StackDepth(script) * sizeof *pcstack); if (!pcstack) { name = NULL; goto out; @@ -5259,12 +5259,12 @@ DecompileExpression(JSContext *cx, JSScript *script, JSFunction *fun, out: if (code != oldcode) { - JS_free(cx, script->code); + cx->free(script->code); script->code = oldcode; script->main = oldmain; } - JS_free(cx, pcstack); + cx->free(pcstack); return name; } @@ -5339,7 +5339,7 @@ SimulateImacroCFG(JSContext *cx, JSScript *script, jsbytecode **pcstack) { size_t nbytes = StackDepth(script) * sizeof *pcstack; - jsbytecode** tmp_pcstack = (jsbytecode **) JS_malloc(cx, nbytes); + jsbytecode** tmp_pcstack = (jsbytecode **) cx->malloc(nbytes); if (!tmp_pcstack) return -1; memcpy(tmp_pcstack, pcstack, nbytes); @@ -5379,11 +5379,11 @@ SimulateImacroCFG(JSContext *cx, JSScript *script, success: memcpy(pcstack, tmp_pcstack, nbytes); - JS_free(cx, tmp_pcstack); + cx->free(tmp_pcstack); return pcdepth; failure: - JS_free(cx, tmp_pcstack); + cx->free(tmp_pcstack); return -1; } diff --git a/js/src/jsparse.cpp b/js/src/jsparse.cpp index 161dfbc94ae2..35cd45dd9d63 100644 --- a/js/src/jsparse.cpp +++ b/js/src/jsparse.cpp @@ -9036,12 +9036,12 @@ js_FoldConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc, bool inCond) } /* Allocate a new buffer and string descriptor for the result. */ - chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } diff --git a/js/src/jsprf.cpp b/js/src/jsprf.cpp index 5686c1eabcda..6e02bc2d1857 100644 --- a/js/src/jsprf.cpp +++ b/js/src/jsprf.cpp @@ -412,7 +412,7 @@ static int cvt_ws(SprintfState *ss, const jschar *ws, int width, int prec, if (!s) return -1; /* JSStuffFunc error indicator. */ result = cvt_s(ss, s, width, prec, flags); - free(s); + js_free(s); } else { result = cvt_s(ss, NULL, width, prec, flags); } @@ -630,7 +630,7 @@ static struct NumArgState* BuildArgArray( const char *fmt, va_list ap, int* rv, if( *rv < 0 ){ if( nas != nasArray ) - free( nas ); + js_free( nas ); return NULL; } @@ -667,7 +667,7 @@ static struct NumArgState* BuildArgArray( const char *fmt, va_list ap, int* rv, default: if( nas != nasArray ) - free( nas ); + js_free( nas ); *rv = -1; return NULL; } @@ -756,7 +756,7 @@ static int dosprintf(SprintfState *ss, const char *fmt, va_list ap) if( nas[i-1].type == TYPE_UNKNOWN ){ if( nas && ( nas != nasArray ) ) - free( nas ); + js_free( nas ); return -1; } @@ -1037,7 +1037,7 @@ static int dosprintf(SprintfState *ss, const char *fmt, va_list ap) rv = (*ss->stuff)(ss, "\0", 1); if( nas && ( nas != nasArray ) ){ - free( nas ); + js_free( nas ); } return rv; @@ -1098,9 +1098,9 @@ static int GrowStuff(SprintfState *ss, const char *sp, JSUint32 len) /* Grow the buffer */ newlen = ss->maxlen + ((len > 32) ? len : 32); if (ss->base) { - newbase = (char*) realloc(ss->base, newlen); + newbase = (char*) js_realloc(ss->base, newlen); } else { - newbase = (char*) malloc(newlen); + newbase = (char*) js_malloc(newlen); } if (!newbase) { /* Ran out of memory */ @@ -1139,7 +1139,7 @@ JS_PUBLIC_API(char *) JS_smprintf(const char *fmt, ...) */ JS_PUBLIC_API(void) JS_smprintf_free(char *mem) { - free(mem); + js_free(mem); } JS_PUBLIC_API(char *) JS_vsmprintf(const char *fmt, va_list ap) @@ -1154,7 +1154,7 @@ JS_PUBLIC_API(char *) JS_vsmprintf(const char *fmt, va_list ap) rv = dosprintf(&ss, fmt, ap); if (rv < 0) { if (ss.base) { - free(ss.base); + js_free(ss.base); } return 0; } @@ -1253,7 +1253,7 @@ JS_PUBLIC_API(char *) JS_vsprintf_append(char *last, const char *fmt, va_list ap rv = dosprintf(&ss, fmt, ap); if (rv < 0) { if (ss.base) { - free(ss.base); + js_free(ss.base); } return 0; } diff --git a/js/src/jspubtd.h b/js/src/jspubtd.h index c2068d65d07c..7efcbfbcc64a 100644 --- a/js/src/jspubtd.h +++ b/js/src/jspubtd.h @@ -145,7 +145,6 @@ typedef struct JSObject JSObject; typedef struct JSObjectMap JSObjectMap; typedef struct JSObjectOps JSObjectOps; typedef struct JSRuntime JSRuntime; -typedef struct JSRuntime JSTaskState; /* XXX deprecated name */ typedef struct JSScript JSScript; typedef struct JSStackFrame JSStackFrame; typedef struct JSString JSString; diff --git a/js/src/jsregexp.cpp b/js/src/jsregexp.cpp index 05762c296b49..8a098ee42659 100644 --- a/js/src/jsregexp.cpp +++ b/js/src/jsregexp.cpp @@ -585,12 +585,12 @@ ParseRegExp(CompilerState *state) } operatorStack = (REOpData *) - JS_malloc(state->context, sizeof(REOpData) * operatorStackSize); + state->context->malloc(sizeof(REOpData) * operatorStackSize); if (!operatorStack) return JS_FALSE; operandStack = (RENode **) - JS_malloc(state->context, sizeof(RENode *) * operandStackSize); + state->context->malloc(sizeof(RENode *) * operandStackSize); if (!operandStack) goto out; @@ -682,8 +682,8 @@ pushOperand: RENode **tmp; operandStackSize += operandStackSize; tmp = (RENode **) - JS_realloc(state->context, operandStack, - sizeof(RENode *) * operandStackSize); + state->context->realloc(operandStack, + sizeof(RENode *) * operandStackSize); if (!tmp) goto out; operandStack = tmp; @@ -817,8 +817,8 @@ pushOperator: REOpData *tmp; operatorStackSize += operatorStackSize; tmp = (REOpData *) - JS_realloc(state->context, operatorStack, - sizeof(REOpData) * operatorStackSize); + state->context->realloc(operatorStack, + sizeof(REOpData) * operatorStackSize); if (!tmp) goto out; operatorStack = tmp; @@ -831,9 +831,9 @@ pushOperator: } out: if (operatorStack) - JS_free(state->context, operatorStack); + state->context->free(operatorStack); if (operandStack) - JS_free(state->context, operandStack); + state->context->free(operandStack); return result; } @@ -1647,9 +1647,8 @@ EmitREBytecode(CompilerState *state, JSRegExp *re, size_t treeDepth, emitStateStack = NULL; } else { emitStateStack = - (EmitStateStackEntry *)JS_malloc(state->context, - sizeof(EmitStateStackEntry) * - treeDepth); + (EmitStateStackEntry *) + state->context->malloc(sizeof(EmitStateStackEntry) * treeDepth); if (!emitStateStack) return NULL; } @@ -1951,7 +1950,7 @@ EmitREBytecode(CompilerState *state, JSRegExp *re, size_t treeDepth, cleanup: if (emitStateStack) - JS_free(state->context, emitStateStack); + state->context->free(emitStateStack); return pc; jump_too_big: @@ -3228,7 +3227,7 @@ js_NewRegExp(JSContext *cx, JSTokenStream *ts, goto out; resize = offsetof(JSRegExp, program) + state.progLength + 1; - re = (JSRegExp *) JS_malloc(cx, resize); + re = (JSRegExp *) cx->malloc(resize); if (!re) goto out; @@ -3237,7 +3236,7 @@ js_NewRegExp(JSContext *cx, JSTokenStream *ts, re->classCount = state.classCount; if (re->classCount) { re->classList = (RECharSet *) - JS_malloc(cx, re->classCount * sizeof(RECharSet)); + cx->malloc(re->classCount * sizeof(RECharSet)); if (!re->classList) { js_DestroyRegExp(cx, re); re = NULL; @@ -3266,7 +3265,7 @@ js_NewRegExp(JSContext *cx, JSTokenStream *ts, JSRegExp *tmp; JS_ASSERT((size_t)(endPC - re->program) < state.progLength + 1); resize = offsetof(JSRegExp, program) + (endPC - re->program); - tmp = (JSRegExp *) JS_realloc(cx, re, resize); + tmp = (JSRegExp *) cx->realloc(re, resize); if (tmp) re = tmp; } @@ -3606,7 +3605,7 @@ ProcessCharSet(JSContext *cx, JSRegExp *re, RECharSet *charSet) JS_ASSERT(end[0] == ']'); byteLength = (charSet->length >> 3) + 1; - charSet->u.bits = (uint8 *)JS_malloc(cx, byteLength); + charSet->u.bits = (uint8 *)cx->malloc(byteLength); if (!charSet->u.bits) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -3800,12 +3799,12 @@ js_DestroyRegExp(JSContext *cx, JSRegExp *re) uintN i; for (i = 0; i < re->classCount; i++) { if (re->classList[i].converted) - JS_free(cx, re->classList[i].u.bits); + cx->free(re->classList[i].u.bits); re->classList[i].u.bits = NULL; } - JS_free(cx, re->classList); + cx->free(re->classList); } - JS_free(cx, re); + cx->free(re); } } @@ -4870,12 +4869,12 @@ js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp, if (!morepar) { res->moreLength = 10; morepar = (JSSubString*) - JS_malloc(cx, 10 * sizeof(JSSubString)); + cx->malloc(10 * sizeof(JSSubString)); } else if (morenum >= res->moreLength) { res->moreLength += 10; morepar = (JSSubString*) - JS_realloc(cx, morepar, - res->moreLength * sizeof(JSSubString)); + cx->realloc(morepar, + res->moreLength * sizeof(JSSubString)); } if (!morepar) { cx->weakRoots.newborn[GCX_OBJECT] = NULL; @@ -5114,7 +5113,7 @@ js_FreeRegExpStatics(JSContext *cx) JSRegExpStatics *res = &cx->regExpStatics; if (res->moreParens) { - JS_free(cx, res->moreParens); + cx->free(res->moreParens); res->moreParens = NULL; } JS_FinishArenaPool(&cx->regexpPool); @@ -5365,7 +5364,7 @@ js_regexp_toString(JSContext *cx, JSObject *obj, jsval *vp) nflags = 0; for (flags = re->flags; flags != 0; flags &= flags - 1) nflags++; - chars = (jschar*) JS_malloc(cx, (length + nflags + 1) * sizeof(jschar)); + chars = (jschar*) cx->malloc((length + nflags + 1) * sizeof(jschar)); if (!chars) { JS_UNLOCK_OBJ(cx, obj); return JS_FALSE; @@ -5389,7 +5388,7 @@ js_regexp_toString(JSContext *cx, JSObject *obj, jsval *vp) str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -5472,15 +5471,15 @@ regexp_compile_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, if (*cp == '/' && (cp == start || cp[-1] != '\\')) { nbytes = (++length + 1) * sizeof(jschar); if (!nstart) { - nstart = (jschar *) JS_malloc(cx, nbytes); + nstart = (jschar *) cx->malloc(nbytes); if (!nstart) return JS_FALSE; ncp = nstart + (cp - start); js_strncpy(nstart, start, cp - start); } else { - tmp = (jschar *) JS_realloc(cx, nstart, nbytes); + tmp = (jschar *) cx->realloc(nstart, nbytes); if (!tmp) { - JS_free(cx, nstart); + cx->free(nstart); return JS_FALSE; } ncp = tmp + (ncp - nstart); @@ -5498,7 +5497,7 @@ regexp_compile_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, *ncp = 0; str = js_NewString(cx, nstart, length); if (!str) { - JS_free(cx, nstart); + cx->free(nstart); return JS_FALSE; } argv[0] = STRING_TO_JSVAL(str); diff --git a/js/src/jsscan.cpp b/js/src/jsscan.cpp index 37e50a80d85a..90f3547671d4 100644 --- a/js/src/jsscan.cpp +++ b/js/src/jsscan.cpp @@ -258,7 +258,7 @@ void js_CloseTokenStream(JSContext *cx, JSTokenStream *ts) { if (ts->flags & TSF_OWNFILENAME) - JS_free(cx, (void *) ts->filename); + cx->free((void *) ts->filename); } JS_FRIEND_API(int) @@ -562,7 +562,7 @@ js_ReportCompileErrorNumber(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, } report.lineno = ts->lineno; linelength = ts->linebuf.limit - ts->linebuf.base; - linechars = (jschar *)JS_malloc(cx, (linelength + 1) * sizeof(jschar)); + linechars = (jschar *)cx->malloc((linelength + 1) * sizeof(jschar)); if (!linechars) { warning = JS_FALSE; goto out; @@ -651,21 +651,21 @@ js_ReportCompileErrorNumber(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, out: if (linebytes) - JS_free(cx, linebytes); + cx->free(linebytes); if (linechars) - JS_free(cx, linechars); + cx->free(linechars); if (message) - JS_free(cx, message); + cx->free(message); if (report.ucmessage) - JS_free(cx, (void *)report.ucmessage); + cx->free((void *)report.ucmessage); if (report.messageArgs) { if (!(flags & JSREPORT_UC)) { i = 0; while (report.messageArgs[i]) - JS_free(cx, (void *)report.messageArgs[i++]); + cx->free((void *)report.messageArgs[i++]); } - JS_free(cx, (void *)report.messageArgs); + cx->free((void *)report.messageArgs); } if (!JSREPORT_IS_WARNING(flags)) { @@ -698,7 +698,7 @@ GrowStringBuffer(JSStringBuffer *sb, size_t amount) /* Now do the full overflow check. */ if (size_t(offset) < newlength && newlength < ~size_t(0) / sizeof(jschar)) { - jschar *bp = (jschar *) realloc(sb->base, newlength * sizeof(jschar)); + jschar *bp = (jschar *) js_realloc(sb->base, newlength * sizeof(jschar)); if (bp) { sb->base = bp; sb->ptr = bp + offset; @@ -709,7 +709,7 @@ GrowStringBuffer(JSStringBuffer *sb, size_t amount) } /* Either newlength overflow or realloc failure: poison the well. */ - free(sb->base); + js_free(sb->base); sb->base = STRING_BUFFER_ERROR_BASE; return false; } @@ -719,7 +719,7 @@ FreeStringBuffer(JSStringBuffer *sb) { JS_ASSERT(STRING_BUFFER_OK(sb)); if (sb->base) - free(sb->base); + js_free(sb->base); } void @@ -924,7 +924,7 @@ bad: if (bytes) { js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, msg, bytes); - JS_free(cx, bytes); + cx->free(bytes); } return JS_FALSE; } @@ -1788,7 +1788,7 @@ retry: if (c == '\n') { if (i > 0) { if (ts->flags & TSF_OWNFILENAME) - JS_free(cx, (void *) ts->filename); + cx->free((void *) ts->filename); ts->filename = JS_strdup(cx, filename); if (!ts->filename) goto error; diff --git a/js/src/jsscope.cpp b/js/src/jsscope.cpp index 929f01f72715..40c5add19fe3 100644 --- a/js/src/jsscope.cpp +++ b/js/src/jsscope.cpp @@ -166,7 +166,7 @@ JSScope::createTable(JSContext *cx, bool report) sizeLog2 = MIN_SCOPE_SIZE_LOG2; } - table = (JSScopeProperty **) calloc(JS_BIT(sizeLog2), sizeof(JSScopeProperty *)); + table = (JSScopeProperty **) js_calloc(JS_BIT(sizeLog2) * sizeof(JSScopeProperty *)); if (!table) { if (report) JS_ReportOutOfMemory(cx); @@ -188,7 +188,7 @@ JSScope::create(JSContext *cx, JSObjectOps *ops, JSClass *clasp, JSObject *obj) JS_ASSERT(OPS_IS_NATIVE(ops)); JS_ASSERT(obj); - JSScope *scope = (JSScope *) JS_malloc(cx, sizeof(JSScope)); + JSScope *scope = (JSScope *) cx->malloc(sizeof(JSScope)); if (!scope) return NULL; @@ -213,7 +213,7 @@ JSScope::createEmptyScope(JSContext *cx, JSClass *clasp) { JS_ASSERT(!emptyScope); - JSScope *scope = (JSScope *) JS_malloc(cx, sizeof(JSScope)); + JSScope *scope = (JSScope *) cx->malloc(sizeof(JSScope)); if (!scope) return NULL; @@ -252,13 +252,13 @@ JSScope::destroy(JSContext *cx, JSScope *scope) js_FinishTitle(cx, &scope->title); #endif if (scope->table) - JS_free(cx, scope->table); + cx->free(scope->table); if (scope->emptyScope) scope->emptyScope->drop(cx, NULL); LIVE_SCOPE_METER(cx, cx->runtime->liveScopeProps -= scope->entryCount); JS_RUNTIME_UNMETER(cx->runtime, liveScopes); - JS_free(cx, scope); + cx->free(scope); } #ifdef JS_DUMP_PROPTREE_STATS @@ -401,11 +401,9 @@ JSScope::changeTable(JSContext *cx, int change) oldsize = JS_BIT(oldlog2); newsize = JS_BIT(newlog2); nbytes = SCOPE_TABLE_NBYTES(newsize); - newtable = (JSScopeProperty **) calloc(nbytes, 1); - if (!newtable) { - JS_ReportOutOfMemory(cx); + newtable = (JSScopeProperty **) cx->calloc(nbytes); + if (!newtable) return false; - } /* Now that we have newtable allocated, update members. */ hashShift = JS_DHASH_BITS - newlog2; @@ -428,7 +426,7 @@ JSScope::changeTable(JSContext *cx, int change) } /* Finally, free the old table storage. */ - JS_free(cx, oldtable); + cx->free(oldtable); return true; } @@ -578,7 +576,7 @@ NewPropTreeKidsChunk(JSRuntime *rt) { PropTreeKidsChunk *chunk; - chunk = (PropTreeKidsChunk *) calloc(1, sizeof *chunk); + chunk = (PropTreeKidsChunk *) js_calloc(sizeof *chunk); if (!chunk) return NULL; JS_ASSERT(((jsuword)chunk & CHUNKY_KIDS_TAG) == 0); @@ -592,7 +590,7 @@ DestroyPropTreeKidsChunk(JSRuntime *rt, PropTreeKidsChunk *chunk) JS_RUNTIME_UNMETER(rt, propTreeKidsChunks); if (chunk->table) JS_DHashTableDestroy(chunk->table); - free(chunk); + js_free(chunk); } /* NB: Called with rt->gcLock held. */ @@ -1215,7 +1213,7 @@ JSScope::add(JSContext *cx, jsid id, splen = entryCount; JS_ASSERT(splen != 0); spvec = (JSScopeProperty **) - JS_malloc(cx, SCOPE_TABLE_NBYTES(splen)); + cx->malloc(SCOPE_TABLE_NBYTES(splen)); if (!spvec) goto fail_overwrite; i = splen; @@ -1248,7 +1246,7 @@ JSScope::add(JSContext *cx, jsid id, } else { sprop = GetPropertyTreeChild(cx, sprop, spvec[i]); if (!sprop) { - JS_free(cx, spvec); + cx->free(spvec); goto fail_overwrite; } @@ -1257,7 +1255,7 @@ JSScope::add(JSContext *cx, jsid id, SPROP_STORE_PRESERVING_COLLISION(spp2, sprop); } } while (++i < splen); - JS_free(cx, spvec); + cx->free(spvec); /* * Now sprop points to the last property in this scope, where @@ -1558,7 +1556,7 @@ JSScope::clear(JSContext *cx) LIVE_SCOPE_METER(cx, cx->runtime->liveScopeProps -= entryCount); if (table) - free(table); + js_free(table); clearMiddleDelete(); js_LeaveTraceIfGlobalObject(cx, object); initMinimal(cx); diff --git a/js/src/jsscript.cpp b/js/src/jsscript.cpp index 35d203d5e598..54e87a75697e 100644 --- a/js/src/jsscript.cpp +++ b/js/src/jsscript.cpp @@ -140,7 +140,7 @@ script_toSource(JSContext *cx, uintN argc, jsval *vp) } /* Allocate the source string and copy into it. */ - t = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + t = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!t) return JS_FALSE; for (i = 0; i < j; i++) @@ -154,7 +154,7 @@ script_toSource(JSContext *cx, uintN argc, jsval *vp) /* Create and return a JS string for t. */ str = JS_NewUCString(cx, t, n); if (!str) { - JS_free(cx, t); + cx->free(t); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -533,7 +533,7 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *hasMagic) ok = JS_XDRBytes(xdr, (char *) code, length * sizeof(jsbytecode)); if (code != script->code) - JS_free(cx, code); + cx->free(code); if (!ok) goto error; @@ -576,7 +576,7 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *hasMagic) filename = js_SaveScriptFilename(cx, filename); if (!filename) goto error; - JS_free(cx, (void *) script->filename); + cx->free((void *) script->filename); script->filename = filename; filenameWasSaved = JS_TRUE; } @@ -665,7 +665,7 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *hasMagic) if (xdr->mode == JSXDR_DECODE) { JS_POP_TEMP_ROOT(cx, &tvr); if (script->filename && !filenameWasSaved) { - JS_free(cx, (void *) script->filename); + cx->free((void *) script->filename); script->filename = NULL; } js_DestroyScript(cx, script); @@ -783,7 +783,7 @@ script_thaw(JSContext *cx, uintN argc, jsval *vp) /* Swap bytes in Unichars to keep frozen strings machine-independent. */ from = (jschar *)buf; - to = (jschar *) JS_malloc(cx, len * sizeof(jschar)); + to = (jschar *) cx->malloc(len * sizeof(jschar)); if (!to) { JS_XDRDestroy(xdr); return JS_FALSE; @@ -839,7 +839,7 @@ out: JS_XDRMemSetData(xdr, NULL, 0); JS_XDRDestroy(xdr); #if IS_BIG_ENDIAN - JS_free(cx, buf); + cx->free(buf); #endif *vp = JSVAL_TRUE; return ok; @@ -995,13 +995,13 @@ typedef struct ScriptFilenameEntry { static void * js_alloc_table_space(void *priv, size_t size) { - return malloc(size); + return js_malloc(size); } static void js_free_table_space(void *priv, void *item, size_t size) { - free(item); + js_free(item); } static JSHashEntry * @@ -1010,7 +1010,7 @@ js_alloc_sftbl_entry(void *priv, const void *key) size_t nbytes = offsetof(ScriptFilenameEntry, filename) + strlen((const char *) key) + 1; - return (JSHashEntry *) malloc(JS_MAX(nbytes, sizeof(JSHashEntry))); + return (JSHashEntry *) js_malloc(JS_MAX(nbytes, sizeof(JSHashEntry))); } static void @@ -1018,7 +1018,7 @@ js_free_sftbl_entry(void *priv, JSHashEntry *he, uintN flag) { if (flag != HT_FREE_ENTRY) return; - free(he); + js_free(he); } static JSHashAllocOps sftbl_alloc_ops = { @@ -1080,7 +1080,7 @@ js_FreeRuntimeScriptState(JSRuntime *rt) while (!JS_CLIST_IS_EMPTY(&rt->scriptFilenamePrefixes)) { sfp = (ScriptFilenamePrefix *) rt->scriptFilenamePrefixes.next; JS_REMOVE_LINK(&sfp->links); - free(sfp); + js_free(sfp); } js_FinishRuntimeScriptState(rt); } @@ -1143,7 +1143,7 @@ SaveScriptFilename(JSRuntime *rt, const char *filename, uint32 flags) if (!sfp) { /* No such prefix: add one now. */ - sfp = (ScriptFilenamePrefix *) malloc(sizeof(ScriptFilenamePrefix)); + sfp = (ScriptFilenamePrefix *) js_malloc(sizeof(ScriptFilenamePrefix)); if (!sfp) return NULL; JS_INSERT_AFTER(&sfp->links, link); @@ -1384,7 +1384,7 @@ js_NewScript(JSContext *cx, uint32 length, uint32 nsrcnotes, uint32 natoms, if (ntrynotes != 0) size += sizeof(JSTryNoteArray) + ntrynotes * sizeof(JSTryNote); - script = (JSScript *) JS_malloc(cx, size); + script = (JSScript *) cx->malloc(size); if (!script) return NULL; memset(script, 0, sizeof(JSScript)); @@ -1536,7 +1536,7 @@ js_NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg) memcpy(JS_SCRIPT_UPVARS(script)->vector, cg->upvarMap.vector, cg->upvarList.count * sizeof(uint32)); cg->upvarList.clear(); - JS_free(cx, cg->upvarMap.vector); + cx->free(cg->upvarMap.vector); cg->upvarMap.vector = NULL; } @@ -1648,7 +1648,7 @@ js_DestroyScript(JSContext *cx, JSScript *script) } } - JS_free(cx, script); + cx->free(script); } void diff --git a/js/src/jsstr.cpp b/js/src/jsstr.cpp index e9440dd82c22..c59efa86d02a 100644 --- a/js/src/jsstr.cpp +++ b/js/src/jsstr.cpp @@ -145,7 +145,7 @@ js_ConcatStrings(JSContext *cx, JSString *left, JSString *right) if (!left->isMutable()) { /* We must copy if left does not own a buffer to realloc. */ - s = (jschar *) JS_malloc(cx, (ln + rn + 1) * sizeof(jschar)); + s = (jschar *) cx->malloc((ln + rn + 1) * sizeof(jschar)); if (!s) return NULL; js_strncpy(s, ls, ln); @@ -153,7 +153,7 @@ js_ConcatStrings(JSContext *cx, JSString *left, JSString *right) } else { /* We can realloc left's space and make it depend on our result. */ JS_ASSERT(left->isFlat()); - s = (jschar *) JS_realloc(cx, ls, (ln + rn + 1) * sizeof(jschar)); + s = (jschar *) cx->realloc(ls, (ln + rn + 1) * sizeof(jschar)); if (!s) return NULL; @@ -173,9 +173,9 @@ js_ConcatStrings(JSContext *cx, JSString *left, JSString *right) if (!str) { /* Out of memory: clean up any space we (re-)allocated. */ if (!ldep) { - JS_free(cx, s); + cx->free(s); } else { - s = (jschar *) JS_realloc(cx, ls, (ln + 1) * sizeof(jschar)); + s = (jschar *) cx->realloc(ls, (ln + 1) * sizeof(jschar)); if (s) left->mChars = s; } @@ -210,7 +210,7 @@ js_UndependString(JSContext *cx, JSString *str) if (str->isDependent()) { n = str->dependentLength(); size = (n + 1) * sizeof(jschar); - s = (jschar *) JS_malloc(cx, size); + s = (jschar *) cx->malloc(size); if (!s) return NULL; @@ -402,7 +402,7 @@ js_str_escape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval return JS_FALSE; } - newchars = (jschar *) JS_malloc(cx, (newlength + 1) * sizeof(jschar)); + newchars = (jschar *) cx->malloc((newlength + 1) * sizeof(jschar)); if (!newchars) return JS_FALSE; for (i = 0, ni = 0; i < length; i++) { @@ -430,7 +430,7 @@ js_str_escape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval str = js_NewString(cx, newchars, newlength); if (!str) { - JS_free(cx, newchars); + cx->free(newchars); return JS_FALSE; } *rval = STRING_TO_JSVAL(str); @@ -464,7 +464,7 @@ str_unescape(JSContext *cx, uintN argc, jsval *vp) str->getCharsAndLength(chars, length); /* Don't bother allocating less space for the new string. */ - newchars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + newchars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!newchars) return JS_FALSE; ni = i = 0; @@ -493,7 +493,7 @@ str_unescape(JSContext *cx, uintN argc, jsval *vp) str = js_NewString(cx, newchars, ni); if (!str) { - JS_free(cx, newchars); + cx->free(newchars); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -695,7 +695,7 @@ str_toSource(JSContext *cx, uintN argc, jsval *vp) j = JS_snprintf(buf, sizeof buf, "(new %s(", js_StringClass.name); str->getCharsAndLength(s, k); n = j + k + 2; - t = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + t = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!t) return JS_FALSE; for (i = 0; i < j; i++) @@ -707,7 +707,7 @@ str_toSource(JSContext *cx, uintN argc, jsval *vp) t[i] = 0; str = js_NewString(cx, t, n); if (!str) { - JS_free(cx, t); + cx->free(t); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -799,7 +799,7 @@ js_toLowerCase(JSContext *cx, JSString *str) jschar *news; str->getCharsAndLength(s, n); - news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + news = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!news) return NULL; for (i = 0; i < n; i++) @@ -807,7 +807,7 @@ js_toLowerCase(JSContext *cx, JSString *str) news[n] = 0; str = js_NewString(cx, news, n); if (!str) { - JS_free(cx, news); + cx->free(news); return NULL; } return str; @@ -850,7 +850,7 @@ js_toUpperCase(JSContext *cx, JSString *str) jschar *news; str->getCharsAndLength(s, n); - news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + news = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!news) return NULL; for (i = 0; i < n; i++) @@ -858,7 +858,7 @@ js_toUpperCase(JSContext *cx, JSString *str) news[n] = 0; str = js_NewString(cx, news, n); if (!str) { - JS_free(cx, news); + cx->free(news); return NULL; } return str; @@ -1659,7 +1659,7 @@ find_replen(JSContext *cx, ReplaceData *rdata, size_t *sizep) lambda_out: js_FreeStack(cx, mark); if (freeMoreParens) - JS_free(cx, cx->regExpStatics.moreParens); + cx->free(cx->regExpStatics.moreParens); cx->regExpStatics = save; return ok; } @@ -1716,7 +1716,7 @@ replace_destroy(JSContext *cx, GlobData *data) ReplaceData *rdata; rdata = (ReplaceData *)data; - JS_free(cx, rdata->chars); + cx->free(rdata->chars); rdata->chars = NULL; } @@ -1741,9 +1741,9 @@ replace_glob(JSContext *cx, jsint count, GlobData *data) growth = leftlen + replen; chars = (jschar *) (rdata->chars - ? JS_realloc(cx, rdata->chars, (rdata->length + growth + 1) + ? cx->realloc(rdata->chars, (rdata->length + growth + 1) * sizeof(jschar)) - : JS_malloc(cx, (growth + 1) * sizeof(jschar))); + : cx->malloc((growth + 1) * sizeof(jschar))); if (!chars) return JS_FALSE; rdata->chars = chars; @@ -1826,7 +1826,7 @@ js_StringReplaceHelper(JSContext *cx, uintN argc, JSObject *lambda, if (!ok) goto out; length += leftlen; - chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) { ok = JS_FALSE; goto out; @@ -1840,9 +1840,9 @@ js_StringReplaceHelper(JSContext *cx, uintN argc, JSObject *lambda, rightlen = cx->regExpStatics.rightContext.length; length = rdata.length + rightlen; chars = (jschar *) - JS_realloc(cx, rdata.chars, (length + 1) * sizeof(jschar)); + cx->realloc(rdata.chars, (length + 1) * sizeof(jschar)); if (!chars) { - JS_free(cx, rdata.chars); + cx->free(rdata.chars); ok = JS_FALSE; goto out; } @@ -1852,7 +1852,7 @@ js_StringReplaceHelper(JSContext *cx, uintN argc, JSObject *lambda, str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); ok = JS_FALSE; goto out; } @@ -2266,7 +2266,7 @@ tagify(JSContext *cx, const char *begin, JSString *param, const char *end, return JS_FALSE; } - tagbuf = (jschar *) JS_malloc(cx, (taglen + 1) * sizeof(jschar)); + tagbuf = (jschar *) cx->malloc((taglen + 1) * sizeof(jschar)); if (!tagbuf) return JS_FALSE; @@ -2294,7 +2294,7 @@ tagify(JSContext *cx, const char *begin, JSString *param, const char *end, str = js_NewString(cx, tagbuf, taglen); if (!str) { - free((char *)tagbuf); + js_free((char *)tagbuf); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -2531,13 +2531,13 @@ str_fromCharCode(JSContext *cx, uintN argc, jsval *vp) *vp = STRING_TO_JSVAL(str); return JS_TRUE; } - chars = (jschar *) JS_malloc(cx, (argc + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((argc + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; for (i = 0; i < argc; i++) { code = js_ValueToUint16(cx, &argv[i]); if (JSVAL_IS_NULL(argv[i])) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } chars[i] = (jschar)code; @@ -2545,7 +2545,7 @@ str_fromCharCode(JSContext *cx, uintN argc, jsval *vp) chars[i] = 0; str = js_NewString(cx, chars, argc); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } *vp = STRING_TO_JSVAL(str); @@ -2621,9 +2621,8 @@ js_GetUnitStringForChar(JSContext *cx, jschar c) JS_ASSERT(c < UNIT_STRING_LIMIT); rt = cx->runtime; if (!rt->unitStrings) { - sp = (JSString **) calloc(UNIT_STRING_LIMIT * sizeof(JSString *) + - UNIT_STRING_LIMIT * 2 * sizeof(jschar), - 1); + sp = (JSString **) js_calloc(UNIT_STRING_LIMIT * sizeof(JSString *) + + UNIT_STRING_LIMIT * 2 * sizeof(jschar)); if (!sp) { JS_ReportOutOfMemory(cx); return NULL; @@ -2639,7 +2638,7 @@ js_GetUnitStringForChar(JSContext *cx, jschar c) JS_UNLOCK_GC(rt); } else { JS_UNLOCK_GC(rt); - free(sp); + js_free(sp); } } if (!rt->unitStrings[c]) { @@ -2676,7 +2675,7 @@ js_GetUnitString(JSContext *cx, JSString *str, size_t index) void js_FinishUnitStrings(JSRuntime *rt) { - free(rt->unitStrings); + js_free(rt->unitStrings); rt->unitStrings = NULL; } @@ -2832,14 +2831,14 @@ js_NewStringCopyN(JSContext *cx, const jschar *s, size_t n) jschar *news; JSString *str; - news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar)); + news = (jschar *) cx->malloc((n + 1) * sizeof(jschar)); if (!news) return NULL; js_strncpy(news, s, n); news[n] = 0; str = js_NewString(cx, news, n); if (!str) - JS_free(cx, news); + cx->free(news); return str; } @@ -2852,13 +2851,13 @@ js_NewStringCopyZ(JSContext *cx, const jschar *s) n = js_strlen(s); m = (n + 1) * sizeof(jschar); - news = (jschar *) JS_malloc(cx, m); + news = (jschar *) cx->malloc(m); if (!news) return NULL; memcpy(news, s, m); str = js_NewString(cx, news, n); if (!str) - JS_free(cx, news); + cx->free(news); return str; } @@ -2876,7 +2875,7 @@ js_PurgeDeflatedStringCache(JSRuntime *rt, JSString *str) #ifdef DEBUG rt->deflatedStringCacheBytes -= str->length(); #endif - free(he->value); + js_free(he->value); JS_HashTableRawRemove(rt->deflatedStringCache, hep, he); } JS_RELEASE_LOCK(rt->deflatedStringCacheLock); @@ -3121,7 +3120,7 @@ js_InflateString(JSContext *cx, const char *bytes, size_t *lengthp) if (js_CStringsAreUTF8) { if (!js_InflateStringToBuffer(cx, bytes, nbytes, NULL, &nchars)) goto bad; - chars = (jschar *) JS_malloc(cx, (nchars + 1) * sizeof (jschar)); + chars = (jschar *) cx->malloc((nchars + 1) * sizeof (jschar)); if (!chars) goto bad; #ifdef DEBUG @@ -3131,7 +3130,7 @@ js_InflateString(JSContext *cx, const char *bytes, size_t *lengthp) JS_ASSERT(ok); } else { nchars = nbytes; - chars = (jschar *) JS_malloc(cx, (nchars + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((nchars + 1) * sizeof(jschar)); if (!chars) goto bad; for (i = 0; i < nchars; i++) @@ -3166,7 +3165,7 @@ js_DeflateString(JSContext *cx, const jschar *chars, size_t nchars) nbytes = js_GetDeflatedStringLength(cx, chars, nchars); if (nbytes == (size_t) -1) return NULL; - bytes = (char *) (cx ? JS_malloc(cx, nbytes + 1) : malloc(nbytes + 1)); + bytes = (char *) (cx ? cx->malloc(nbytes + 1) : js_malloc(nbytes + 1)); if (!bytes) return NULL; #ifdef DEBUG @@ -3176,7 +3175,7 @@ js_DeflateString(JSContext *cx, const jschar *chars, size_t nchars) JS_ASSERT(ok); } else { nbytes = nchars; - bytes = (char *) (cx ? JS_malloc(cx, nbytes + 1) : malloc(nbytes + 1)); + bytes = (char *) (cx ? cx->malloc(nbytes + 1) : js_malloc(nbytes + 1)); if (!bytes) return NULL; for (i = 0; i < nbytes; i++) @@ -3491,9 +3490,9 @@ js_GetStringBytes(JSContext *cx, JSString *str) str->setDeflated(); } else { if (cx) - JS_free(cx, bytes); + cx->free(bytes); else - free(bytes); + js_free(bytes); bytes = NULL; } } @@ -4836,8 +4835,8 @@ AddCharsToURI(JSContext *cx, JSCharBuffer *buf, if (!buf->chars || JS_HOWMANY(total, URI_CHUNK) > JS_HOWMANY(buf->length + 1, URI_CHUNK)) { total = JS_ROUNDUP(total, URI_CHUNK); - newchars = (jschar *) JS_realloc(cx, buf->chars, - total * sizeof(jschar)); + newchars = (jschar *) cx->realloc(buf->chars, + total * sizeof(jschar)); if (!newchars) return JS_FALSE; buf->chars = newchars; @@ -4860,7 +4859,7 @@ TransferBufferToString(JSContext *cx, JSCharBuffer *cb, jsval *rval) * don't worry about that case here. */ n = cb->length; - chars = (jschar *) JS_realloc(cx, cb->chars, (n + 1) * sizeof(jschar)); + chars = (jschar *) cx->realloc(cb->chars, (n + 1) * sizeof(jschar)); if (!chars) chars = cb->chars; str = js_NewString(cx, chars, n); @@ -4953,7 +4952,7 @@ Encode(JSContext *cx, JSString *str, const jschar *unescapedSet, return JS_TRUE; bad: - JS_free(cx, cb.chars); + cx->free(cb.chars); return JS_FALSE; } @@ -5048,7 +5047,7 @@ Decode(JSContext *cx, JSString *str, const jschar *reservedSet, jsval *rval) /* FALL THROUGH */ bad: - JS_free(cx, cb.chars); + cx->free(cb.chars); return JS_FALSE; } diff --git a/js/src/jstask.cpp b/js/src/jstask.cpp new file mode 100644 index 000000000000..2814b953292d --- /dev/null +++ b/js/src/jstask.cpp @@ -0,0 +1,126 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 et tw=99 ft=cpp: + * + * ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is Mozilla SpiderMonkey JavaScript 1.9.1 code, released + * June 30, 2009. + * + * The Initial Developer of the Original Code is + * Andreas Gal + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either of the GNU General Public License Version 2 or later (the "GPL"), + * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#include "jstask.h" + +#ifdef JS_THREADSAFE +static void start(void* arg) { + ((JSBackgroundThread*)arg)->work(); +} + +JSBackgroundThread::JSBackgroundThread() + : thread(NULL), stack(NULL), lock(NULL), wakeup(NULL), shutdown(false) +{ +} + +JSBackgroundThread::~JSBackgroundThread() +{ + if (wakeup) + PR_DestroyCondVar(wakeup); + if (lock) + PR_DestroyLock(lock); + /* PR_DestroyThread is not necessary. */ +} + +bool +JSBackgroundThread::init() +{ + if (!(lock = PR_NewLock())) + return false; + if (!(wakeup = PR_NewCondVar(lock))) + return false; + thread = PR_CreateThread(PR_USER_THREAD, start, this, PR_PRIORITY_LOW, + PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0); + return !!thread; +} + +void +JSBackgroundThread::cancel() +{ + PR_Lock(lock); + if (shutdown) { + PR_Unlock(lock); + return; + } + shutdown = true; + PR_NotifyCondVar(wakeup); + PR_Unlock(lock); + PR_JoinThread(thread); +} + +void +JSBackgroundThread::work() +{ + PR_Lock(lock); + do { + PR_WaitCondVar(wakeup, PR_INTERVAL_NO_TIMEOUT); + JSBackgroundTask* t; + while ((t = stack) != NULL) { + stack = t->next; + PR_Unlock(lock); + t->run(); + delete t; + PR_Lock(lock); + } + } while (!shutdown); + PR_Unlock(lock); +} + +bool +JSBackgroundThread::busy() +{ + return !!stack; // we tolerate some racing here +} + +void +JSBackgroundThread::schedule(JSBackgroundTask* task) +{ + PR_Lock(lock); + if (shutdown) { + PR_Unlock(lock); + task->run(); + delete task; + return; + } + task->next = stack; + stack = task; + PR_NotifyCondVar(wakeup); + PR_Unlock(lock); +} + +#endif diff --git a/js/src/jstask.h b/js/src/jstask.h new file mode 100644 index 000000000000..30bc009a2c2a --- /dev/null +++ b/js/src/jstask.h @@ -0,0 +1,84 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 et tw=99 ft=cpp: + * + * ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released + * June 30, 2009. + * + * The Initial Developer of the Original Code is + * Andreas Gal + * + * Contributor(s): + * + * Alternatively, the contents of this file may be used under the terms of + * either of the GNU General Public License Version 2 or later (the "GPL"), + * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#ifndef jstask_h___ +#define jstask_h___ + +class JSBackgroundTask { + friend class JSBackgroundThread; + JSBackgroundTask* next; + public: + virtual void run() = 0; +}; + +#ifdef JS_THREADSAFE + +#include "prthread.h" +#include "prlock.h" +#include "prcvar.h" + +class JSBackgroundThread { + PRThread* thread; + JSBackgroundTask* stack; + PRLock* lock; + PRCondVar* wakeup; + bool shutdown; + + public: + JSBackgroundThread(); + ~JSBackgroundThread(); + + bool init(); + void cancel(); + void work(); + bool busy(); + void schedule(JSBackgroundTask* task); +}; + +#else + +class JSBackgroundThread { + public: + void schedule(JSBackgroundTask* task) { + task->run(); + } +}; + +#endif + +#endif /* jstask_h___ */ diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index a2c45c0b627d..90c0fd2bd082 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -3616,11 +3616,11 @@ TraceRecorder::compile(JSTraceMonitor* tm) /* :TODO: windows support */ #if defined DEBUG && !defined WIN32 const char* filename = cx->fp->script->filename; - char* label = (char*)malloc((filename ? strlen(filename) : 7) + 16); + char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16); sprintf(label, "%s:%u", filename ? filename : "", js_FramePCToLineNumber(cx, cx->fp)); fragmento->labels->add(fragment, sizeof(Fragment), 0, label); - free(label); + js_free(label); #endif AUDIT(traceCompleted); } @@ -12445,7 +12445,7 @@ js_StartTraceVis(JSContext *cx, JSObject *obj, if (!filename) goto error; ok = JS_StartTraceVis(filename); - JS_free(cx, filename); + cx->free(filename); } else { ok = JS_StartTraceVis(); } diff --git a/js/src/jsutil.cpp b/js/src/jsutil.cpp index e3a62878e3d8..43d4eb37ff44 100644 --- a/js/src/jsutil.cpp +++ b/js/src/jsutil.cpp @@ -297,7 +297,7 @@ CallTree(void **bp) return NULL; /* Create a new callsite record. */ - site = (JSCallsite *) malloc(sizeof(JSCallsite)); + site = (JSCallsite *) js_malloc(sizeof(JSCallsite)); if (!site) return NULL; diff --git a/js/src/jsutil.h b/js/src/jsutil.h index 18264ccca459..6e0895b2fb9d 100644 --- a/js/src/jsutil.h +++ b/js/src/jsutil.h @@ -44,6 +44,8 @@ #ifndef jsutil_h___ #define jsutil_h___ +#include + JS_BEGIN_EXTERN_C /* @@ -176,9 +178,30 @@ JS_Backtrace(int skip); extern JS_FRIEND_API(void) JS_DumpBacktrace(JSCallsite *trace); - #endif +static JS_INLINE void* js_malloc(size_t bytes) { + if (bytes < sizeof(void*)) /* for asyncFree */ + bytes = sizeof(void*); + return malloc(bytes); +} + +static JS_INLINE void* js_calloc(size_t bytes) { + if (bytes < sizeof(void*)) /* for asyncFree */ + bytes = sizeof(void*); + return calloc(bytes, 1); +} + +static JS_INLINE void* js_realloc(void* p, size_t bytes) { + if (bytes < sizeof(void*)) /* for asyncFree */ + bytes = sizeof(void*); + return realloc(p, bytes); +} + +static JS_INLINE void js_free(void* p) { + free(p); +} + JS_END_EXTERN_C #endif /* jsutil_h___ */ diff --git a/js/src/jsxdrapi.cpp b/js/src/jsxdrapi.cpp index b2d3efc1bf83..2eb9419d74e4 100644 --- a/js/src/jsxdrapi.cpp +++ b/js/src/jsxdrapi.cpp @@ -90,7 +90,7 @@ typedef struct JSXDRMemState { if (MEM_LIMIT(xdr) && \ MEM_COUNT(xdr) + bytes > MEM_LIMIT(xdr)) { \ uint32 limit_ = JS_ROUNDUP(MEM_COUNT(xdr) + bytes, MEM_BLOCK);\ - void *data_ = JS_realloc((xdr)->cx, MEM_BASE(xdr), limit_); \ + void *data_ = (xdr)->cx->realloc(MEM_BASE(xdr), limit_); \ if (!data_) \ return 0; \ MEM_BASE(xdr) = (char *) data_; \ @@ -216,7 +216,7 @@ mem_tell(JSXDRState *xdr) static void mem_finalize(JSXDRState *xdr) { - JS_free(xdr->cx, MEM_BASE(xdr)); + xdr->cx->free(MEM_BASE(xdr)); } static JSXDROps xdrmem_ops = { @@ -239,13 +239,13 @@ JS_XDRInitBase(JSXDRState *xdr, JSXDRMode mode, JSContext *cx) JS_PUBLIC_API(JSXDRState *) JS_XDRNewMem(JSContext *cx, JSXDRMode mode) { - JSXDRState *xdr = (JSXDRState *) JS_malloc(cx, sizeof(JSXDRMemState)); + JSXDRState *xdr = (JSXDRState *) cx->malloc(sizeof(JSXDRMemState)); if (!xdr) return NULL; JS_XDRInitBase(xdr, mode, cx); if (mode == JSXDR_ENCODE) { - if (!(MEM_BASE(xdr) = (char *) JS_malloc(cx, MEM_BLOCK))) { - JS_free(cx, xdr); + if (!(MEM_BASE(xdr) = (char *) cx->malloc(MEM_BLOCK))) { + cx->free(xdr); return NULL; } } else { @@ -299,11 +299,11 @@ JS_XDRDestroy(JSXDRState *xdr) JSContext *cx = xdr->cx; xdr->ops->finalize(xdr); if (xdr->registry) { - JS_free(cx, xdr->registry); + cx->free(xdr->registry); if (xdr->reghash) JS_DHashTableDestroy((JSDHashTable *) xdr->reghash); } - JS_free(cx, xdr); + cx->free(xdr); } JS_PUBLIC_API(JSBool) @@ -381,18 +381,18 @@ JS_XDRCString(JSXDRState *xdr, char **sp) len = strlen(*sp); JS_XDRUint32(xdr, &len); if (xdr->mode == JSXDR_DECODE) { - if (!(*sp = (char *) JS_malloc(xdr->cx, len + 1))) + if (!(*sp = (char *) xdr->cx->malloc(len + 1))) return JS_FALSE; } if (!JS_XDRBytes(xdr, *sp, len)) { if (xdr->mode == JSXDR_DECODE) - JS_free(xdr->cx, *sp); + xdr->cx->free(*sp); return JS_FALSE; } if (xdr->mode == JSXDR_DECODE) { (*sp)[len] = '\0'; } else if (xdr->mode == JSXDR_FREE) { - JS_free(xdr->cx, *sp); + xdr->cx->free(*sp); *sp = NULL; } return JS_TRUE; @@ -452,7 +452,7 @@ JS_XDRString(JSXDRState *xdr, JSString **strp) return JS_FALSE; if (xdr->mode == JSXDR_DECODE) { - chars = (jschar *) JS_malloc(xdr->cx, (nchars + 1) * sizeof(jschar)); + chars = (jschar *) xdr->cx->malloc((nchars + 1) * sizeof(jschar)); if (!chars) return JS_FALSE; } else { @@ -471,7 +471,7 @@ JS_XDRString(JSXDRState *xdr, JSString **strp) bad: if (xdr->mode == JSXDR_DECODE) - JS_free(xdr->cx, chars); + xdr->cx->free(chars); return JS_FALSE; } @@ -662,7 +662,7 @@ js_XDRStringAtom(JSXDRState *xdr, JSAtom **atomp) * This is very uncommon. Don't use the tempPool arena for this as * most allocations here will be bigger than tempPool's arenasize. */ - chars = (jschar *) JS_malloc(cx, nchars * sizeof(jschar)); + chars = (jschar *) cx->malloc(nchars * sizeof(jschar)); if (!chars) return JS_FALSE; } @@ -670,7 +670,7 @@ js_XDRStringAtom(JSXDRState *xdr, JSAtom **atomp) if (XDRChars(xdr, chars, nchars)) atom = js_AtomizeChars(cx, chars, nchars, 0); if (chars != stackChars) - JS_free(cx, chars); + cx->free(chars); if (!atom) return JS_FALSE; @@ -709,7 +709,7 @@ JS_XDRRegisterClass(JSXDRState *xdr, JSClass *clasp, uint32 *idp) if (numclasses == maxclasses) { maxclasses = (maxclasses == 0) ? CLASS_REGISTRY_MIN : maxclasses << 1; registry = (JSClass **) - JS_realloc(xdr->cx, xdr->registry, maxclasses * sizeof(JSClass *)); + xdr->cx->realloc(xdr->registry, maxclasses * sizeof(JSClass *)); if (!registry) return JS_FALSE; xdr->registry = registry; diff --git a/js/src/jsxml.cpp b/js/src/jsxml.cpp index 46cdb5e66b6a..1e8d73f9a892 100644 --- a/js/src/jsxml.cpp +++ b/js/src/jsxml.cpp @@ -460,7 +460,7 @@ qname_toString(JSContext *cx, uintN argc, jsval *vp) if (str && clasp == &js_AttributeNameClass) { length = str->length(); - chars = (jschar *) JS_malloc(cx, (length + 2) * sizeof(jschar)); + chars = (jschar *) cx->malloc((length + 2) * sizeof(jschar)); if (!chars) return JS_FALSE; *chars = '@'; @@ -468,7 +468,7 @@ qname_toString(JSContext *cx, uintN argc, jsval *vp) chars[++length] = 0; str = js_NewString(cx, chars, length); if (!str) { - JS_free(cx, chars); + cx->free(chars); return JS_FALSE; } } @@ -933,7 +933,7 @@ XMLArraySetCapacity(JSContext *cx, JSXMLArray *array, uint32 capacity) if (capacity == 0) { /* We could let realloc(p, 0) free this, but purify gets confused. */ if (array->vector) - free(array->vector); + cx->free(array->vector); vector = NULL; } else { if ( @@ -941,7 +941,7 @@ XMLArraySetCapacity(JSContext *cx, JSXMLArray *array, uint32 capacity) (size_t)capacity > ~(size_t)0 / sizeof(void *) || #endif !(vector = (void **) - realloc(array->vector, capacity * sizeof(void *)))) { + js_realloc(array->vector, capacity * sizeof(void *)))) { if (cx) JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -975,7 +975,7 @@ XMLArrayFinish(JSContext *cx, JSXMLArray *array) { JSXMLArrayCursor *cursor; - JS_free(cx, array->vector); + cx->free(array->vector); while ((cursor = array->cursors) != NULL) XMLArrayCursorFinish(cursor); @@ -1039,7 +1039,7 @@ XMLArrayAddMember(JSContext *cx, JSXMLArray *array, uint32 index, void *elt) (size_t)capacity > ~(size_t)0 / sizeof(void *) || #endif !(vector = (void **) - realloc(array->vector, capacity * sizeof(void *)))) { + js_realloc(array->vector, capacity * sizeof(void *)))) { JS_ReportOutOfMemory(cx); return JS_FALSE; } @@ -1120,10 +1120,10 @@ XMLArrayTruncate(JSContext *cx, JSXMLArray *array, uint32 length) if (length == 0) { if (array->vector) - free(array->vector); + cx->free(array->vector); vector = NULL; } else { - vector = (void **) realloc(array->vector, length * sizeof(void *)); + vector = (void **) js_realloc(array->vector, length * sizeof(void *)); if (!vector) return; } @@ -1854,7 +1854,7 @@ ParseXMLSource(JSContext *cx, JSString *src) length = constrlen(prefix) + urilen + constrlen(middle) + srclen + constrlen(suffix); - chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar)); + chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); if (!chars) return NULL; @@ -1905,7 +1905,7 @@ ParseXMLSource(JSContext *cx, JSString *src) } } - JS_free(cx, chars); + cx->free(chars); return xml; #undef constrlen @@ -2138,7 +2138,7 @@ MakeXMLSpecialString(JSContext *cx, JSStringBuffer *sb, prefixlength + length + ((length2 != 0) ? 1 + length2 : 0) + suffixlength; bp = base = (jschar *) - JS_realloc(cx, sb->base, (newlength + 1) * sizeof(jschar)); + cx->realloc(sb->base, (newlength + 1) * sizeof(jschar)); if (!bp) { js_FinishStringBuffer(sb); return NULL; @@ -2159,7 +2159,7 @@ MakeXMLSpecialString(JSContext *cx, JSStringBuffer *sb, str = js_NewString(cx, base, newlength); if (!str) - free(base); + cx->free(base); return str; } @@ -2210,7 +2210,7 @@ AppendAttributeValue(JSContext *cx, JSStringBuffer *sb, JSString *valstr) valstr = js_EscapeAttributeValue(cx, valstr, JS_TRUE); if (!valstr) { if (STRING_BUFFER_OK(sb)) { - free(sb->base); + cx->free(sb->base); sb->base = STRING_BUFFER_ERROR_BASE; } return; @@ -2482,7 +2482,7 @@ GeneratePrefix(JSContext *cx, JSString *uri, JSXMLArray *decls) if (STARTS_WITH_XML(cp, length) || !IsXMLName(cp, length)) { newlength = length + 2 + (size_t) log10((double) decls->length); bp = (jschar *) - JS_malloc(cx, (newlength + 1) * sizeof(jschar)); + cx->malloc((newlength + 1) * sizeof(jschar)); if (!bp) return NULL; @@ -2507,7 +2507,7 @@ GeneratePrefix(JSContext *cx, JSString *uri, JSXMLArray *decls) if (bp == cp) { newlength = length + 2 + (size_t) log10((double) n); bp = (jschar *) - JS_malloc(cx, (newlength + 1) * sizeof(jschar)); + cx->malloc((newlength + 1) * sizeof(jschar)); if (!bp) return NULL; js_strncpy(bp, cp, length); @@ -2534,7 +2534,7 @@ GeneratePrefix(JSContext *cx, JSString *uri, JSXMLArray *decls) } else { prefix = js_NewString(cx, bp, newlength); if (!prefix) - JS_free(cx, bp); + cx->free(bp); } return prefix; } @@ -5132,7 +5132,7 @@ xml_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (length == 0) { cursor = NULL; } else { - cursor = (JSXMLArrayCursor *) JS_malloc(cx, sizeof *cursor); + cursor = (JSXMLArrayCursor *) cx->malloc(sizeof *cursor); if (!cursor) return JS_FALSE; XMLArrayCursorInit(cursor, &xml->xml_kids); @@ -5155,7 +5155,7 @@ xml_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, cursor = (JSXMLArrayCursor *) JSVAL_TO_PRIVATE(*statep); if (cursor) { XMLArrayCursorFinish(cursor); - JS_free(cx, cursor); + cx->free(cursor); } *statep = JSVAL_NULL; break; @@ -5266,7 +5266,7 @@ js_EnumerateXMLValues(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (length == 0) { cursor = NULL; } else { - cursor = (JSXMLArrayCursor *) JS_malloc(cx, sizeof *cursor); + cursor = (JSXMLArrayCursor *) cx->malloc(sizeof *cursor); if (!cursor) return JS_FALSE; XMLArrayCursorInit(cursor, &xml->xml_kids); @@ -5301,7 +5301,7 @@ js_EnumerateXMLValues(JSContext *cx, JSObject *obj, JSIterateOp enum_op, if (cursor) { destroy: XMLArrayCursorFinish(cursor); - JS_free(cx, cursor); + cx->free(cursor); } *statep = JSVAL_NULL; break; @@ -7801,7 +7801,7 @@ js_AddAttributePart(JSContext *cx, JSBool isName, JSString *str, JSString *str2) str2->getCharsAndLength(chars2, len2); newlen = (isName) ? len + 1 + len2 : len + 2 + len2 + 1; - chars = (jschar *) JS_realloc(cx, chars, (newlen+1) * sizeof(jschar)); + chars = (jschar *) cx->realloc(chars, (newlen+1) * sizeof(jschar)); if (!chars) return NULL; @@ -8114,7 +8114,7 @@ xmlfilter_finalize(JSContext *cx, JSObject *obj) return; XMLArrayCursorFinish(&filter->cursor); - JS_free(cx, filter); + cx->free(filter); } JSClass js_XMLFilterClass = { @@ -8169,7 +8169,7 @@ js_StepXMLListFilter(JSContext *cx, JSBool initialized) if (!filterobj) return JS_FALSE; - filter = (JSXMLFilter *) JS_malloc(cx, sizeof *filter); + filter = (JSXMLFilter *) cx->malloc(sizeof *filter); if (!filter) return JS_FALSE; From 102d0ced5271f9d05e030a72be7ab48ce6491e12 Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Tue, 28 Jul 2009 04:00:35 -0500 Subject: [PATCH 12/19] Bug 506786 - JSScope::trace method. r=brendan. --- js/src/jsapi.cpp | 4 ++-- js/src/jsdbgapi.cpp | 2 +- js/src/jsgc.cpp | 2 +- js/src/jsobj.cpp | 47 ++---------------------------------- js/src/jsscope.cpp | 2 +- js/src/jsscope.h | 58 ++++++++++++++++++++++++++++++++++++++------- 6 files changed, 57 insertions(+), 58 deletions(-) diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp index 00e8a6ec37b3..dd3ab2e83557 100644 --- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -4058,13 +4058,13 @@ prop_iter_trace(JSTracer *trc, JSObject *obj) /* Native case: just mark the next property to visit. */ sprop = (JSScopeProperty *) JSVAL_TO_PRIVATE(v); if (sprop) - TRACE_SCOPE_PROPERTY(trc, sprop); + sprop->trace(trc); } else { /* Non-native case: mark each id in the JSIdArray private. */ ida = (JSIdArray *) JSVAL_TO_PRIVATE(v); for (i = 0, n = ida->length; i < n; i++) { id = ida->vector[i]; - TRACE_ID(trc, id); + js_TraceId(trc, id); } } } diff --git a/js/src/jsdbgapi.cpp b/js/src/jsdbgapi.cpp index d9e9ed62a2fb..fd6af3ee7dc3 100644 --- a/js/src/jsdbgapi.cpp +++ b/js/src/jsdbgapi.cpp @@ -434,7 +434,7 @@ js_TraceWatchPoints(JSTracer *trc, JSObject *obj) &wp->links != &rt->watchPointList; wp = (JSWatchPoint *)wp->links.next) { if (wp->object == obj) { - TRACE_SCOPE_PROPERTY(trc, wp->sprop); + wp->sprop->trace(trc); if ((wp->sprop->attrs & JSPROP_SETTER) && wp->setter) { JS_CALL_OBJECT_TRACER(trc, js_CastAsObject(wp->setter), "wp->setter"); diff --git a/js/src/jsgc.cpp b/js/src/jsgc.cpp index 386a65c83e83..d4a31a249df7 100644 --- a/js/src/jsgc.cpp +++ b/js/src/jsgc.cpp @@ -3043,7 +3043,7 @@ js_TraceContext(JSTracer *trc, JSContext *acx) tvr->u.trace(trc, tvr); break; case JSTVU_SPROP: - TRACE_SCOPE_PROPERTY(trc, tvr->u.sprop); + tvr->u.sprop->trace(trc); break; case JSTVU_WEAK_ROOTS: TraceWeakRoots(trc, tvr->u.weakRoots); diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp index ed62904eecdc..bf5f72b4e808 100644 --- a/js/src/jsobj.cpp +++ b/js/src/jsobj.cpp @@ -5085,7 +5085,7 @@ js_TraceNativeEnumerators(JSTracer *trc) cursor = ne->ids; end = cursor + ne->length; do { - TRACE_ID(trc, *cursor); + js_TraceId(trc, *cursor); } while (++cursor != end); } else if (doGC) { js_RemoveAsGCBytes(rt, NativeEnumeratorSize(ne->length)); @@ -5748,7 +5748,6 @@ js_TraceObject(JSTracer *trc, JSObject *obj) { JSContext *cx; JSScope *scope; - JSScopeProperty *sprop; JSClass *clasp; size_t nslots, i; jsval v; @@ -5772,49 +5771,7 @@ js_TraceObject(JSTracer *trc, JSObject *obj) MeterEntryCount(scope->entryCount); #endif - sprop = scope->lastProp; - uint8 regenFlag = cx->runtime->gcRegenShapesScopeFlag; - if (IS_GC_MARKING_TRACER(trc) && - cx->runtime->gcRegenShapes && - scope->hasRegenFlag(regenFlag)) { - /* - * Either scope has its own shape, which must be regenerated, or it - * must have the same shape as its lastProp. - */ - uint32 shape; - - if (sprop) { - if (!(sprop->flags & SPROP_FLAG_SHAPE_REGEN)) { - sprop->shape = js_RegenerateShapeForGC(cx); - sprop->flags |= SPROP_FLAG_SHAPE_REGEN; - } - shape = sprop->shape; - } - if (!sprop || scope->hasOwnShape()) { - shape = js_RegenerateShapeForGC(cx); - JS_ASSERT_IF(sprop, shape != sprop->shape); - } - scope->shape = shape; - scope->flags ^= JSScope::SHAPE_REGEN; - - /* Also regenerate the shapes of empty scopes, in case they are not shared. */ - for (JSScope *empty = scope->emptyScope; - empty && empty->hasRegenFlag(regenFlag); - empty = empty->emptyScope) { - empty->shape = js_RegenerateShapeForGC(cx); - empty->flags ^= JSScope::SHAPE_REGEN; - } - } - if (sprop) { - JS_ASSERT(scope->has(sprop)); - - /* Trace scope's property tree ancestor line. */ - do { - if (scope->hadMiddleDelete() && !scope->has(sprop)) - continue; - TRACE_SCOPE_PROPERTY(trc, sprop); - } while ((sprop = sprop->parent) != NULL); - } + scope->trace(trc); if (!JS_CLIST_IS_EMPTY(&cx->runtime->watchPointList)) js_TraceWatchPoints(trc, obj); diff --git a/js/src/jsscope.cpp b/js/src/jsscope.cpp index 40c5add19fe3..02791e40cff0 100644 --- a/js/src/jsscope.cpp +++ b/js/src/jsscope.cpp @@ -1649,7 +1649,7 @@ JSScopeProperty::trace(JSTracer *trc) { if (IS_GC_MARKING_TRACER(trc)) flags |= SPROP_MARK; - TRACE_ID(trc, id); + js_TraceId(trc, id); #if JS_HAS_GETTER_SETTER if (attrs & (JSPROP_GETTER | JSPROP_SETTER)) { diff --git a/js/src/jsscope.h b/js/src/jsscope.h index fe8b5798e4f1..211b88c9014b 100644 --- a/js/src/jsscope.h +++ b/js/src/jsscope.h @@ -265,6 +265,8 @@ struct JSScope { void extend(JSContext *cx, JSScopeProperty *sprop); + void trace(JSTracer *trc); + void brandingShapeChange(JSContext *cx, uint32 slot, jsval v); void deletingShapeChange(JSContext *cx, JSScopeProperty *sprop); void methodShapeChange(JSContext *cx, uint32 slot, jsval toval); @@ -501,6 +503,54 @@ JSScope::extend(JSContext *cx, JSScopeProperty *sprop) lastProp = sprop; } +inline void +JSScope::trace(JSTracer *trc) +{ + JSContext *cx = trc->context; + JSScopeProperty *sprop = lastProp; + uint8 regenFlag = cx->runtime->gcRegenShapesScopeFlag; + if (IS_GC_MARKING_TRACER(trc) && cx->runtime->gcRegenShapes && hasRegenFlag(regenFlag)) { + /* + * Either this scope has its own shape, which must be regenerated, or + * it must have the same shape as lastProp. + */ + uint32 newShape; + + if (sprop) { + if (!(sprop->flags & SPROP_FLAG_SHAPE_REGEN)) { + sprop->shape = js_RegenerateShapeForGC(cx); + sprop->flags |= SPROP_FLAG_SHAPE_REGEN; + } + newShape = sprop->shape; + } + if (!sprop || hasOwnShape()) { + newShape = js_RegenerateShapeForGC(cx); + JS_ASSERT_IF(sprop, newShape != sprop->shape); + } + shape = newShape; + flags ^= JSScope::SHAPE_REGEN; + + /* Also regenerate the shapes of empty scopes, in case they are not shared. */ + for (JSScope *empty = emptyScope; + empty && empty->hasRegenFlag(regenFlag); + empty = empty->emptyScope) { + empty->shape = js_RegenerateShapeForGC(cx); + empty->flags ^= JSScope::SHAPE_REGEN; + } + } + if (sprop) { + JS_ASSERT(has(sprop)); + + /* Trace scope's property tree ancestor line. */ + do { + if (hadMiddleDelete() && !has(sprop)) + continue; + sprop->trace(trc); + } while ((sprop = sprop->parent) != NULL); + } +} + + static JS_INLINE bool js_GetSprop(JSContext* cx, JSScopeProperty* sprop, JSObject* obj, jsval* vp) { @@ -553,14 +603,6 @@ js_SetSprop(JSContext* cx, JSScopeProperty* sprop, JSObject* obj, jsval* vp) extern JSScope * js_GetMutableScope(JSContext *cx, JSObject *obj); -/* - * These macros used to inline short code sequences, but they grew over time. - * We retain them for internal backward compatibility, and in case one or both - * ever shrink to inline-able size. - */ -#define TRACE_ID(trc, id) js_TraceId(trc, id) -#define TRACE_SCOPE_PROPERTY(trc, sprop) sprop->trace(trc) - extern void js_TraceId(JSTracer *trc, jsid id); From aa70cca682c9dcefa88f7010d0757e9ed0656567 Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Tue, 28 Jul 2009 04:39:40 -0500 Subject: [PATCH 13/19] Bug 506880 - Deadlock on exit in threadsafe shell. r=gal. --- js/src/jstask.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/js/src/jstask.cpp b/js/src/jstask.cpp index 2814b953292d..9071e632efdf 100644 --- a/js/src/jstask.cpp +++ b/js/src/jstask.cpp @@ -87,7 +87,7 @@ void JSBackgroundThread::work() { PR_Lock(lock); - do { + while (!shutdown) { PR_WaitCondVar(wakeup, PR_INTERVAL_NO_TIMEOUT); JSBackgroundTask* t; while ((t = stack) != NULL) { @@ -97,7 +97,7 @@ JSBackgroundThread::work() delete t; PR_Lock(lock); } - } while (!shutdown); + } PR_Unlock(lock); } From 3431af1c47e251d584e4b576ea919ba270b51d96 Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Tue, 21 Jul 2009 12:13:21 -0500 Subject: [PATCH 14/19] Bug 505522 - Support building with C++ exceptions. r=bsmedberg. --- config/autoconf.mk.in | 1 + configure.in | 2 ++ js/src/config/autoconf.mk.in | 1 + js/src/configure.in | 2 ++ 4 files changed, 6 insertions(+) diff --git a/config/autoconf.mk.in b/config/autoconf.mk.in index f46c76edcc89..51f26fba0bf8 100644 --- a/config/autoconf.mk.in +++ b/config/autoconf.mk.in @@ -297,6 +297,7 @@ MOZ_OPTIMIZE_LDFLAGS = @MOZ_OPTIMIZE_LDFLAGS@ MOZ_OPTIMIZE_SIZE_TWEAK = @MOZ_OPTIMIZE_SIZE_TWEAK@ MOZ_RTTI_FLAGS_ON = @_MOZ_RTTI_FLAGS_ON@ +MOZ_EXCEPTIONS_FLAGS_ON = @_MOZ_EXCEPTIONS_FLAGS_ON@ MOZ_PROFILE_GUIDED_OPTIMIZE_DISABLE = @MOZ_PROFILE_GUIDED_OPTIMIZE_DISABLE@ PROFILE_GEN_CFLAGS = @PROFILE_GEN_CFLAGS@ diff --git a/configure.in b/configure.in index b903ce5a5490..f70021719e83 100644 --- a/configure.in +++ b/configure.in @@ -7330,6 +7330,8 @@ else _MOZ_EXCEPTIONS_FLAGS=$_MOZ_EXCEPTIONS_FLAGS_OFF fi +AC_SUBST(_MOZ_EXCEPTIONS_FLAGS_ON) + # Irix & OSF native compilers do not like exception declarations # when exceptions are disabled if test -n "$MIPSPRO_CXX" -o -n "$COMPAQ_CXX" -o -n "$VACPP"; then diff --git a/js/src/config/autoconf.mk.in b/js/src/config/autoconf.mk.in index 0ff86b970134..9f5f9cfed8e6 100644 --- a/js/src/config/autoconf.mk.in +++ b/js/src/config/autoconf.mk.in @@ -162,6 +162,7 @@ MOZ_OPTIMIZE_LDFLAGS = @MOZ_OPTIMIZE_LDFLAGS@ MOZ_OPTIMIZE_SIZE_TWEAK = @MOZ_OPTIMIZE_SIZE_TWEAK@ MOZ_RTTI_FLAGS_ON = @_MOZ_RTTI_FLAGS_ON@ +MOZ_EXCEPTIONS_FLAGS_ON = @_MOZ_EXCEPTIONS_FLAGS_ON@ MOZ_PROFILE_GUIDED_OPTIMIZE_DISABLE = @MOZ_PROFILE_GUIDED_OPTIMIZE_DISABLE@ PROFILE_GEN_CFLAGS = @PROFILE_GEN_CFLAGS@ diff --git a/js/src/configure.in b/js/src/configure.in index 4d28a3fbd7cd..3e3abb43b74d 100644 --- a/js/src/configure.in +++ b/js/src/configure.in @@ -4737,6 +4737,8 @@ else _MOZ_EXCEPTIONS_FLAGS=$_MOZ_EXCEPTIONS_FLAGS_OFF fi +AC_SUBST(_MOZ_EXCEPTIONS_FLAGS_ON) + # Irix & OSF native compilers do not like exception declarations # when exceptions are disabled if test -n "$MIPSPRO_CXX" -o -n "$COMPAQ_CXX" -o -n "$VACPP"; then From 43dbee74727d2dc24900ca7c3b85c42310a4727c Mon Sep 17 00:00:00 2001 From: Jeff Walden Date: Tue, 28 Jul 2009 12:19:00 -0700 Subject: [PATCH 15/19] Bug 506894 - Crash - e4x/GC/regress-357063-01.js. r=gal --- js/src/jsxml.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/js/src/jsxml.cpp b/js/src/jsxml.cpp index 1e8d73f9a892..ad662c10029b 100644 --- a/js/src/jsxml.cpp +++ b/js/src/jsxml.cpp @@ -932,8 +932,12 @@ XMLArraySetCapacity(JSContext *cx, JSXMLArray *array, uint32 capacity) if (capacity == 0) { /* We could let realloc(p, 0) free this, but purify gets confused. */ - if (array->vector) - cx->free(array->vector); + if (array->vector) { + if (cx) + cx->free(array->vector); + else + js_free(array->vector); + } vector = NULL; } else { if ( From 19c9082e3373fc0ca5984ecdfbb2172baec3b549 Mon Sep 17 00:00:00 2001 From: Brendan Eich Date: Tue, 28 Jul 2009 13:20:14 -0700 Subject: [PATCH 16/19] Bug 471214 - Join function objects transparently, clone via read barrier to satisfy de-facto standard (r=igor). --- js/src/imacros.c.out | 2 + js/src/jsapi.cpp | 2 +- js/src/jsarray.cpp | 2 +- js/src/jsbuiltins.cpp | 4 +- js/src/jsemit.cpp | 10 +- js/src/jsinterp.cpp | 282 +++++++++++++++++++++++++++--------------- js/src/jsiter.cpp | 2 +- js/src/jsobj.cpp | 121 ++++++++++++------ js/src/jsobj.h | 23 +++- js/src/jsopcode.cpp | 2 + js/src/jsopcode.tbl | 8 +- js/src/jsparse.cpp | 12 ++ js/src/jsscope.cpp | 26 +++- js/src/jsscope.h | 168 ++++++++++++++++++++----- js/src/jstracer.cpp | 74 ++++++----- js/src/jstypes.h | 8 ++ js/src/jsxdrapi.h | 2 +- 17 files changed, 535 insertions(+), 213 deletions(-) diff --git a/js/src/imacros.c.out b/js/src/imacros.c.out index 8871bb8001e4..d7d4d9f73629 100644 --- a/js/src/imacros.c.out +++ b/js/src/imacros.c.out @@ -964,6 +964,8 @@ uint8 js_opcode2extra[JSOP_LIMIT] = { 0, /* JSOP_DEFFUN_DBGFC */ 0, /* JSOP_DEFLOCALFUN_DBGFC */ 0, /* JSOP_LAMBDA_DBGFC */ + 0, /* JSOP_SETMETHOD */ + 0, /* JSOP_INITMETHOD */ }; #define JSOP_IS_IMACOP(x) (0 \ || x == JSOP_BITOR \ diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp index dd3ab2e83557..b987d291e389 100644 --- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -3544,7 +3544,7 @@ JS_GetMethodById(JSContext *cx, JSObject *obj, jsid id, JSObject **objp, jsval *vp) { CHECK_REQUEST(cx); - if (!js_GetMethod(cx, obj, id, false, vp)) + if (!js_GetMethod(cx, obj, id, JSGET_METHOD_BARRIER, vp)) return JS_FALSE; if (objp) *objp = obj; diff --git a/js/src/jsarray.cpp b/js/src/jsarray.cpp index f4b1039efab6..debe0d8a2ba7 100644 --- a/js/src/jsarray.cpp +++ b/js/src/jsarray.cpp @@ -792,7 +792,7 @@ array_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp) if (prop) { if (OBJ_IS_NATIVE(obj2)) { sprop = (JSScopeProperty *) prop; - if (!js_NativeGet(cx, obj, obj2, sprop, vp)) + if (!js_NativeGet(cx, obj, obj2, sprop, JSGET_METHOD_BARRIER, vp)) return JS_FALSE; } OBJ_DROP_PROPERTY(cx, obj2, prop); diff --git a/js/src/jsbuiltins.cpp b/js/src/jsbuiltins.cpp index 1f1408bf460c..0f97db750322 100644 --- a/js/src/jsbuiltins.cpp +++ b/js/src/jsbuiltins.cpp @@ -337,9 +337,9 @@ JS_DEFINE_CALLINFO_3(extern, BOOL, js_HasNamedPropertyInt32, CONTEXT, OBJECT, IN jsval FASTCALL js_CallGetter(JSContext* cx, JSObject* obj, JSScopeProperty* sprop) { - JS_ASSERT(!SPROP_HAS_STUB_GETTER(sprop)); + JS_ASSERT(!SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)); jsval v; - if (!js_GetSprop(cx, sprop, obj, &v)) + if (!sprop->get(cx, obj, &v)) return JSVAL_ERROR_COOKIE; return v; } diff --git a/js/src/jsemit.cpp b/js/src/jsemit.cpp index 5105d8bba498..2b63635620e8 100644 --- a/js/src/jsemit.cpp +++ b/js/src/jsemit.cpp @@ -6529,7 +6529,15 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) ale = cg->atomList.add(cg->compiler, pn3->pn_atom); if (!ale) return JS_FALSE; - EMIT_INDEX_OP(JSOP_INITPROP, ALE_INDEX(ale)); + + JSOp initOp = (PN_OP(pn2->pn_right) == JSOP_LAMBDA +#if JS_HAS_GETTER_SETTER + && op != JSOP_GETTER && op != JSOP_SETTER +#endif + ) + ? JSOP_INITMETHOD + : JSOP_INITPROP; + EMIT_INDEX_OP(initOp, ALE_INDEX(ale)); } } diff --git a/js/src/jsinterp.cpp b/js/src/jsinterp.cpp index 0d9a50ba5ad1..13089d7a7d39 100644 --- a/js/src/jsinterp.cpp +++ b/js/src/jsinterp.cpp @@ -1,5 +1,5 @@ /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- - * vim: set ts=8 sw=4 et tw=79: + * vim: set ts=8 sw=4 et tw=99: * * ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 @@ -184,46 +184,61 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj, * is a plain old method? It's a function-valued property with stub * getter, so get of a function is idempotent. */ - if ((cs->format & JOF_CALLOP) && - SPROP_HAS_STUB_GETTER(sprop) && - SPROP_HAS_VALID_SLOT(sprop, scope)) { + if (cs->format & JOF_CALLOP) { jsval v; - v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); - if (VALUE_IS_FUNCTION(cx, v)) { + if (sprop->isMethod()) { /* - * Great, we have a function-valued prototype property where - * the getter is JS_PropertyStub. The type id in pobj's scope - * does not evolve with changes to property values, however. - * - * So here, on first cache fill for this method, we brand the - * scope with a new shape and set the SCOPE_BRANDED flag. Once - * this scope flag is set, any write to a function-valued plain - * old property in pobj will result in shape being regenerated. + * A compiler-created function object, AKA a method, already + * memoized in the property tree. */ - if (!scope->branded()) { - PCMETER(cache->brandfills++); -#ifdef DEBUG_notme - fprintf(stderr, - "branding %p (%s) for funobj %p (%s), shape %lu\n", - pobj, LOCKED_OBJ_GET_CLASS(pobj)->name, - JSVAL_TO_OBJECT(v), - JS_GetFunctionName(GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v))), - OBJ_SHAPE(obj)); -#endif - scope->brandingShapeChange(cx, sprop->slot, v); - if (js_IsPropertyCacheDisabled(cx)) /* check for rt->shapeGen overflow */ - return JS_NO_PROP_CACHE_FILL; - scope->setBranded(); - } + JS_ASSERT(scope->hasMethodBarrier()); + v = sprop->methodValue(); + JS_ASSERT(VALUE_IS_FUNCTION(cx, v)); vword = JSVAL_OBJECT_TO_PCVAL(v); break; } + + if (SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && + SPROP_HAS_VALID_SLOT(sprop, scope)) { + v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); + if (VALUE_IS_FUNCTION(cx, v)) { + /* + * Great, we have a function-valued prototype property + * where the getter is JS_PropertyStub. The type id in + * pobj's scope does not evolve with changes to property + * values, however. + * + * So here, on first cache fill for this method, we brand + * the scope with a new shape and set the SCOPE_BRANDED + * flag. Once this scope flag is set, any write that adds + * or deletes a function-valued plain old property in + * scope->object will result in shape being regenerated. + */ + if (!scope->branded()) { + PCMETER(cache->brandfills++); +#ifdef DEBUG_notme + fprintf(stderr, + "branding %p (%s) for funobj %p (%s), shape %lu\n", + pobj, LOCKED_OBJ_GET_CLASS(pobj)->name, + JSVAL_TO_OBJECT(v), + JS_GetFunctionName(GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v))), + OBJ_SHAPE(obj)); +#endif + scope->brandingShapeChange(cx, sprop->slot, v); + if (js_IsPropertyCacheDisabled(cx)) /* check for rt->shapeGen overflow */ + return JS_NO_PROP_CACHE_FILL; + scope->setBranded(); + } + vword = JSVAL_OBJECT_TO_PCVAL(v); + break; + } + } } /* If getting a value via a stub getter, we can cache the slot. */ if (!(cs->format & (JOF_SET | JOF_INCDEC | JOF_FOR)) && - SPROP_HAS_STUB_GETTER(sprop) && + SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && SPROP_HAS_VALID_SLOT(sprop, scope)) { /* Great, let's cache sprop's slot and use it on cache hit. */ vword = SLOT_TO_PCVAL(sprop->slot); @@ -235,30 +250,30 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj, scope->shape == sprop->shape) { /* * Our caller added a new property. We also know that a setter - * that js_NativeSet could have run has not mutated the scope - * so the added property is still the last one added and the + * that js_NativeSet could have run has not mutated the scope, + * so the added property is still the last one added, and the * scope is not branded. * * We want to cache under scope's shape before the property * addition to bias for the case when the mutator opcode - * always adds the same property. It allows to optimize - * periodic execution of object initializers or explicit - * initialization sequences like + * always adds the same property. This allows us to optimize + * periodic execution of object initializers or other explicit + * initialization sequences such as * * obj = {}; obj.x = 1; obj.y = 2; * * We assume that on average the win from this optimization is - * bigger that the cost of an extra mismatch per loop due to + * greater than the cost of an extra mismatch per loop owing to * the bias for the following case: * * obj = {}; ... for (...) { ... obj.x = ... } * - * On the first iteration JSOP_SETPROP fills the cache with - * the shape of newly created object, not the shape after - * obj.x is assigned. That mismatches obj's shape on the - * second iteration. Note that on third and the following - * iterations the cache will be hit since the shape no longer - * mutates. + * On the first iteration of such a for loop, JSOP_SETPROP + * fills the cache with the shape of the newly created object + * obj, not the shape of obj after obj.x has been assigned. + * That mismatches obj's shape on the second iteration. Note + * that on the third and subsequent iterations the cache will + * be hit because the shape is no longer updated. */ JS_ASSERT(scope->owned()); if (sprop->parent) { @@ -989,7 +1004,7 @@ js_OnUnknownMethod(JSContext *cx, jsval *vp) MUST_FLOW_THROUGH("out"); id = ATOM_TO_JSID(cx->runtime->atomState.noSuchMethodAtom); - ok = js_GetMethod(cx, obj, id, false, &tvr.u.value); + ok = js_GetMethod(cx, obj, id, 0, &tvr.u.value); if (!ok) goto out; if (JSVAL_IS_PRIMITIVE(tvr.u.value)) { @@ -2132,9 +2147,9 @@ js_TraceOpcode(JSContext *cx) fp->script, cx->tracePrevPc); /* - * If there aren't that many elements on the stack, then - * we have probably entered a new frame, and printing output - * would just be misleading. + * If there aren't that many elements on the stack, then we have + * probably entered a new frame, and printing output would just be + * misleading. */ if (ndefs != 0 && ndefs < regs->sp - fp->slots) { @@ -2600,7 +2615,7 @@ AssertValidPropertyCacheHit(JSContext *cx, JSScript *script, JSFrameRegs& regs, JS_ASSERT(PCVAL_IS_OBJECT(entry->vword)); JS_ASSERT(entry->vword != PCVAL_NULL); JS_ASSERT(OBJ_SCOPE(pobj)->branded()); - JS_ASSERT(SPROP_HAS_STUB_GETTER(sprop)); + JS_ASSERT(SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)); JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj))); v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); JS_ASSERT(VALUE_IS_FUNCTION(cx, v)); @@ -2637,9 +2652,11 @@ JS_STATIC_ASSERT(JSOP_DEFFUN_FC_LENGTH == JSOP_DEFFUN_DBGFC_LENGTH); /* * Same for JSOP_SETNAME and JSOP_SETPROP, which differ only slightly but - * remain distinct for the decompiler. + * remain distinct for the decompiler. Likewise for JSOP_INIT{PROP,METHOD}. */ JS_STATIC_ASSERT(JSOP_SETNAME_LENGTH == JSOP_SETPROP_LENGTH); +JS_STATIC_ASSERT(JSOP_SETNAME_LENGTH == JSOP_SETMETHOD_LENGTH); +JS_STATIC_ASSERT(JSOP_INITPROP_LENGTH == JSOP_INITMETHOD_LENGTH); /* See TRY_BRANCH_AFTER_COND. */ JS_STATIC_ASSERT(JSOP_IFNE_LENGTH == JSOP_IFEQ_LENGTH); @@ -2700,14 +2717,6 @@ js_Interpret(JSContext *cx) #endif JSAutoResolveFlags rf(cx, JSRESOLVE_INFER); -#ifdef __GNUC__ -# define JS_EXTENSION __extension__ -# define JS_EXTENSION_(s) __extension__ ({ s; }) -#else -# define JS_EXTENSION -# define JS_EXTENSION_(s) s -#endif - # ifdef DEBUG /* * We call this macro from BEGIN_CASE in threaded interpreters, @@ -3610,7 +3619,7 @@ js_Interpret(JSContext *cx) goto error; \ JS_END_MACRO -#define NATIVE_GET(cx,obj,pobj,sprop,vp) \ +#define NATIVE_GET(cx,obj,pobj,sprop,getHow,vp) \ JS_BEGIN_MACRO \ if (SPROP_HAS_STUB_GETTER(sprop)) { \ /* Fast path for Object instance properties. */ \ @@ -3620,7 +3629,7 @@ js_Interpret(JSContext *cx) ? LOCKED_OBJ_GET_SLOT(pobj, (sprop)->slot) \ : JSVAL_VOID; \ } else { \ - if (!js_NativeGet(cx, obj, pobj, sprop, vp)) \ + if (!js_NativeGet(cx, obj, pobj, sprop, getHow, vp)) \ goto error; \ } \ JS_END_MACRO @@ -4481,7 +4490,7 @@ js_Interpret(JSContext *cx) } else { JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); sprop = PCVAL_TO_SPROP(entry->vword); - NATIVE_GET(cx, obj, obj2, sprop, &rval); + NATIVE_GET(cx, obj, obj2, sprop, JSGET_METHOD_BARRIER, &rval); } JS_UNLOCK_OBJ(cx, obj2); break; @@ -4495,7 +4504,9 @@ js_Interpret(JSContext *cx) } id = ATOM_TO_JSID(atom); if (entry - ? !js_GetPropertyHelper(cx, obj, id, true, &rval) + ? !js_GetPropertyHelper(cx, obj, id, + JSGET_CACHE_RESULT | JSGET_METHOD_BARRIER, + &rval) : !OBJ_GET_PROPERTY(cx, obj, id, &rval)) { goto error; } @@ -4572,7 +4583,7 @@ js_Interpret(JSContext *cx) } else { JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); sprop = PCVAL_TO_SPROP(entry->vword); - NATIVE_GET(cx, obj, obj2, sprop, &rval); + NATIVE_GET(cx, obj, obj2, sprop, 0, &rval); } JS_UNLOCK_OBJ(cx, obj2); STORE_OPND(-1, rval); @@ -4591,13 +4602,13 @@ js_Interpret(JSContext *cx) id = ATOM_TO_JSID(atom); PUSH(JSVAL_NULL); if (!JSVAL_IS_PRIMITIVE(lval)) { - if (!js_GetMethod(cx, obj, id, !!entry, &rval)) + if (!js_GetMethod(cx, obj, id, entry ? JSGET_CACHE_RESULT : 0, &rval)) goto error; STORE_OPND(-1, OBJECT_TO_JSVAL(obj)); STORE_OPND(-2, rval); } else { JS_ASSERT(obj->map->ops->getProperty == js_GetProperty); - if (!js_GetPropertyHelper(cx, obj, id, true, &rval)) + if (!js_GetPropertyHelper(cx, obj, id, JSGET_CACHE_RESULT, &rval)) goto error; STORE_OPND(-1, lval); STORE_OPND(-2, rval); @@ -4628,9 +4639,12 @@ js_Interpret(JSContext *cx) BEGIN_CASE(JSOP_SETNAME) BEGIN_CASE(JSOP_SETPROP) + BEGIN_CASE(JSOP_SETMETHOD) + do_setprop: rval = FETCH_OPND(-1); + JS_ASSERT_IF(op == JSOP_SETMETHOD, VALUE_IS_FUNCTION(cx, rval)); lval = FETCH_OPND(-2); - JS_ASSERT(!JSVAL_IS_PRIMITIVE(lval) || op == JSOP_SETPROP); + JS_ASSERT_IF(op == JSOP_SETNAME, !JSVAL_IS_PRIMITIVE(lval)); VALUE_TO_OBJECT(cx, -2, lval, obj); do { @@ -4784,9 +4798,21 @@ js_Interpret(JSContext *cx) sprop = sprop2; } else { scope->extend(cx, sprop); + + jsuint index; + if (js_IdIsIndex(sprop->id, &index)) + scope->setIndexedProperties(); + + if (sprop->isMethod()) + scope->setMethodBarrier(); } - LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, rval); + /* + * No LOCKED_OBJ_WRITE_BARRIER because here we + * are adding a new property, not updating an + * existing slot's value that might contain a + * method of a branded scope. + */ TRACE_2(SetPropHit, entry, sprop); LOCKED_OBJ_SET_SLOT(obj, slot, rval); JS_UNLOCK_SCOPE(cx, scope); @@ -4830,7 +4856,10 @@ js_Interpret(JSContext *cx) LOAD_ATOM(0); id = ATOM_TO_JSID(atom); if (entry) { - if (!js_SetPropertyHelper(cx, obj, id, true, &rval)) + uintN defineHow = (op == JSOP_SETMETHOD) + ? JSDNP_CACHE_RESULT | JSDNP_SET_METHOD + : JSDNP_CACHE_RESULT; + if (!js_SetPropertyHelper(cx, obj, id, defineHow, &rval)) goto error; } else { if (!OBJ_SET_PROPERTY(cx, obj, id, &rval)) @@ -4887,7 +4916,7 @@ js_Interpret(JSContext *cx) END_CASE(JSOP_GETELEM) BEGIN_CASE(JSOP_CALLELEM) - ELEMENT_OP(-1, js_GetMethod(cx, obj, id, false, &rval)); + ELEMENT_OP(-1, js_GetMethod(cx, obj, id, 0, &rval)); #if JS_HAS_NO_SUCH_METHOD if (JS_UNLIKELY(JSVAL_IS_VOID(rval))) { regs.sp[-2] = regs.sp[-1]; @@ -5341,7 +5370,7 @@ js_Interpret(JSContext *cx) } else { sprop = (JSScopeProperty *)prop; do_native_get: - NATIVE_GET(cx, obj, obj2, sprop, &rval); + NATIVE_GET(cx, obj, obj2, sprop, JSGET_METHOD_BARRIER, &rval); OBJ_DROP_PROPERTY(cx, obj2, (JSProperty *) sprop); } @@ -5922,7 +5951,7 @@ js_Interpret(JSContext *cx) sprop = (JSScopeProperty *) prop; if ((sprop->attrs & JSPROP_PERMANENT) && SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj)) && - SPROP_HAS_STUB_GETTER(sprop) && + SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && SPROP_HAS_STUB_SETTER(sprop)) { /* * Fast globals use frame variables to map the global @@ -6216,24 +6245,65 @@ js_Interpret(JSContext *cx) obj = FUN_OBJECT(fun); if (FUN_NULL_CLOSURE(fun)) { - obj = js_CloneFunctionObject(cx, fun, fp->scopeChain); - if (!obj) - goto error; + parent = fp->scopeChain; + + if (OBJ_GET_PARENT(cx, obj) == parent) { + JSScope *scope; + + lval = FETCH_OPND(-1); + op = JSOp(regs.pc[JSOP_LAMBDA_LENGTH]); + + /* + * Optimize ({method: function () { ... }, ...}) and + * this.method = function () { ... }; bytecode sequences. + * + * Note that we jump to the entry points for JSOP_SETPROP + * and JSOP_INITPROP without calling the trace recorder, + * because the record hooks for those ops are essentially + * no-ops (this can't change given the predictive shape + * guarding the recorder must do). + */ + if (op == JSOP_SETMETHOD) { +#ifdef DEBUG + op2 = JSOp(regs.pc[JSOP_LAMBDA_LENGTH + JSOP_SETMETHOD_LENGTH]); + JS_ASSERT(op2 == JSOP_POP || op2 == JSOP_POPV); +#endif + + if (JSVAL_IS_OBJECT(lval) && + (obj2 = JSVAL_TO_OBJECT(lval)) && + OBJ_GET_CLASS(cx, obj2) == &js_ObjectClass) { + scope = OBJ_SCOPE(obj2); + if (scope->object == obj2) { + PUSH_OPND(OBJECT_TO_JSVAL(obj)); + regs.pc += JSOP_LAMBDA_LENGTH; + goto do_setprop; + } + } + } else if (op == JSOP_INITMETHOD) { + JS_ASSERT(!JSVAL_IS_PRIMITIVE(lval)); + obj2 = JSVAL_TO_OBJECT(lval); + scope = OBJ_SCOPE(obj2); + + /* + * JSOP_NEWINIT gave the new object it created (obj2 + * here) its own scope. + */ + JS_ASSERT(scope->object == obj2); + PUSH_OPND(OBJECT_TO_JSVAL(obj)); + regs.pc += JSOP_LAMBDA_LENGTH; + goto do_initprop; + } + } } else { parent = js_GetScopeChain(cx, fp); if (!parent) goto error; - - /* - * FIXME: bug 471214, Cloning here even when the compiler saw - * the right parent is wasteful but we don't fully support - * joined function objects, yet. - */ - obj = js_CloneFunctionObject(cx, fun, parent); - if (!obj) - goto error; } + obj = js_CloneFunctionObject(cx, fun, parent); + if (!obj) + goto error; + PUSH_OPND(OBJECT_TO_JSVAL(obj)); END_CASE(JSOP_LAMBDA) @@ -6379,11 +6449,26 @@ js_Interpret(JSContext *cx) BEGIN_CASE(JSOP_NEWINIT) i = GET_INT8(regs.pc); JS_ASSERT(i == JSProto_Array || i == JSProto_Object); - obj = (i == JSProto_Array) - ? js_NewArrayObject(cx, 0, NULL) - : js_NewObject(cx, &js_ObjectClass, NULL, NULL); - if (!obj) - goto error; + if (i == JSProto_Array) { + obj = js_NewArrayObject(cx, 0, NULL); + if (!obj) + goto error; + } else { + obj = js_NewObject(cx, &js_ObjectClass, NULL, NULL); + if (!obj) + goto error; + + if (regs.pc[JSOP_NEWINIT_LENGTH] != JSOP_ENDINIT) { + JS_LOCK_OBJ(cx, obj); + JSScope *scope = js_GetMutableScope(cx, obj); + if (!scope) { + JS_UNLOCK_OBJ(cx, obj); + goto error; + } + JS_UNLOCK_SCOPE(cx, scope); + } + } + PUSH_OPND(OBJECT_TO_JSVAL(obj)); fp->sharpDepth++; CHECK_INTERRUPT_HANDLER(); @@ -6401,6 +6486,8 @@ js_Interpret(JSContext *cx) END_CASE(JSOP_ENDINIT) BEGIN_CASE(JSOP_INITPROP) + BEGIN_CASE(JSOP_INITMETHOD) + do_initprop: /* Load the property's initial value into rval. */ JS_ASSERT(regs.sp - StackBase(fp) >= 2); rval = FETCH_OPND(-1); @@ -6421,6 +6508,7 @@ js_Interpret(JSContext *cx) JS_LOCK_OBJ(cx, obj); scope = OBJ_SCOPE(obj); + JS_ASSERT(scope->object == obj); JS_ASSERT(!scope->sealed()); kshape = scope->shape; cache = &JS_PROPERTY_CACHE(cx); @@ -6450,14 +6538,6 @@ js_Interpret(JSContext *cx) if (!SPROP_HAS_STUB_SETTER(sprop)) goto do_initprop_miss; - if (!scope->owned()) { - scope = js_GetMutableScope(cx, obj); - if (!scope) { - JS_UNLOCK_OBJ(cx, obj); - goto error; - } - } - /* * Detect a repeated property name and force a miss to * share the strict warning code and cope with complexity @@ -6508,7 +6588,11 @@ js_Interpret(JSContext *cx) scope->lastProp = sprop; } - LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, rval); + /* + * No LOCKED_OBJ_WRITE_BARRIER because here we are adding a + * new property, not updating an existing slot's value that + * might contain a method of a branded scope. + */ TRACE_2(SetPropHit, entry, sprop); LOCKED_OBJ_SET_SLOT(obj, slot, rval); JS_UNLOCK_SCOPE(cx, scope); @@ -6529,12 +6613,16 @@ js_Interpret(JSContext *cx) goto error; } + uintN defineHow = (op == JSOP_INITMETHOD) + ? JSDNP_CACHE_RESULT | JSDNP_SET_METHOD + : JSDNP_CACHE_RESULT; if (!(JS_UNLIKELY(atom == cx->runtime->atomState.protoAtom) - ? js_SetPropertyHelper(cx, obj, id, true, &rval) + ? js_SetPropertyHelper(cx, obj, id, defineHow, &rval) : js_DefineNativeProperty(cx, obj, id, rval, NULL, NULL, JSPROP_ENUMERATE, 0, 0, NULL, - JSDNP_CACHE_RESULT))) + defineHow))) { goto error; + } } while (0); /* Common tail for property cache hit and miss cases. */ diff --git a/js/src/jsiter.cpp b/js/src/jsiter.cpp index a9c3473f9775..b87662baf415 100644 --- a/js/src/jsiter.cpp +++ b/js/src/jsiter.cpp @@ -376,7 +376,7 @@ js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp) *vp = OBJECT_TO_JSVAL(iterobj); } else { atom = cx->runtime->atomState.iteratorAtom; - if (!js_GetMethod(cx, obj, ATOM_TO_JSID(atom), false, vp)) + if (!js_GetMethod(cx, obj, ATOM_TO_JSID(atom), 0, vp)) goto bad; if (JSVAL_IS_VOID(*vp)) { default_iter: diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp index bf5f72b4e808..8e6fc5ee6f33 100644 --- a/js/src/jsobj.cpp +++ b/js/src/jsobj.cpp @@ -418,7 +418,7 @@ MarkSharpObjects(JSContext *cx, JSObject *obj, JSIdArray **idap) JSScopeProperty *sprop = (JSScopeProperty *) prop; val = JSVAL_NULL; if (attrs & JSPROP_GETTER) - val = js_CastAsObjectJSVal(sprop->getter); + val = sprop->getterValue(); if (attrs & JSPROP_SETTER) { if (val != JSVAL_NULL) { /* Mark the getter, then set val to setter. */ @@ -426,7 +426,7 @@ MarkSharpObjects(JSContext *cx, JSObject *obj, JSIdArray **idap) NULL) != NULL); } - val = js_CastAsObjectJSVal(sprop->setter); + val = sprop->setterValue(); } } else { ok = OBJ_GET_PROPERTY(cx, obj, id, &val); @@ -782,7 +782,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) (attrs & (JSPROP_GETTER | JSPROP_SETTER))) { JSScopeProperty *sprop = (JSScopeProperty *) prop; if (attrs & JSPROP_GETTER) { - val[valcnt] = js_CastAsObjectJSVal(sprop->getter); + val[valcnt] = sprop->getterValue(); gsopold[valcnt] = ATOM_TO_STRING(cx->runtime->atomState.getterAtom); gsop[valcnt] = @@ -791,7 +791,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) valcnt++; } if (attrs & JSPROP_SETTER) { - val[valcnt] = js_CastAsObjectJSVal(sprop->setter); + val[valcnt] = sprop->setterValue(); gsopold[valcnt] = ATOM_TO_STRING(cx->runtime->atomState.setterAtom); gsop[valcnt] = @@ -1905,7 +1905,7 @@ obj_lookupGetter(JSContext *cx, uintN argc, jsval *vp) if (OBJ_IS_NATIVE(pobj)) { sprop = (JSScopeProperty *) prop; if (sprop->attrs & JSPROP_GETTER) - *vp = js_CastAsObjectJSVal(sprop->getter); + *vp = sprop->getterValue(); } OBJ_DROP_PROPERTY(cx, pobj, prop); } @@ -1930,7 +1930,7 @@ obj_lookupSetter(JSContext *cx, uintN argc, jsval *vp) if (OBJ_IS_NATIVE(pobj)) { sprop = (JSScopeProperty *) prop; if (sprop->attrs & JSPROP_SETTER) - *vp = js_CastAsObjectJSVal(sprop->setter); + *vp = sprop->setterValue(); } OBJ_DROP_PROPERTY(cx, pobj, prop); } @@ -3557,6 +3557,8 @@ js_AddNativeProperty(JSContext *cx, JSObject *obj, jsid id, JSScope *scope; JSScopeProperty *sprop; + JS_ASSERT(!(flags & SPROP_IS_METHOD)); + /* * Purge the property cache of now-shadowed id in obj's scope chain. Do * this optimistically (assuming no failure below) before locking obj, so @@ -3638,7 +3640,7 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, JSScopeProperty *sprop; JSBool added; - JS_ASSERT((defineHow & ~(JSDNP_CACHE_RESULT | JSDNP_DONT_PURGE)) == 0); + JS_ASSERT((defineHow & ~(JSDNP_CACHE_RESULT | JSDNP_DONT_PURGE | JSDNP_SET_METHOD)) == 0); js_LeaveTraceIfGlobalObject(cx, obj); /* Convert string indices to integers if appropriate. */ @@ -3710,10 +3712,12 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, /* Use the object's class getter and setter by default. */ clasp = LOCKED_OBJ_GET_CLASS(obj); - if (!getter) - getter = clasp->getProperty; - if (!setter) - setter = clasp->setProperty; + if (!(defineHow & JSDNP_SET_METHOD)) { + if (!getter) + getter = clasp->getProperty; + if (!setter) + setter = clasp->setProperty; + } /* Get obj's own scope if it has one, or create a new one for obj. */ scope = js_GetMutableScope(cx, obj); @@ -3725,6 +3729,16 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, /* Add a new property, or replace an existing one of the same id. */ if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES) attrs |= JSPROP_SHARED; + + if (defineHow & JSDNP_SET_METHOD) { + JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_ObjectClass); + JS_ASSERT(VALUE_IS_FUNCTION(cx, value)); + JS_ASSERT(!(attrs & (JSPROP_GETTER | JSPROP_SETTER))); + JS_ASSERT(!getter && !setter); + flags |= SPROP_IS_METHOD; + getter = js_CastAsPropertyOp(JSVAL_TO_OBJECT(value)); + } + sprop = scope->add(cx, id, getter, setter, SPROP_INVALID_SLOT, attrs, flags, shortid); if (!sprop) @@ -3733,8 +3747,12 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, } /* Store value before calling addProperty, in case the latter GC's. */ - if (SPROP_HAS_VALID_SLOT(sprop, scope)) - LOCKED_OBJ_WRITE_SLOT(cx, obj, sprop->slot, value); + if (SPROP_HAS_VALID_SLOT(sprop, scope)) { + if (added) + LOCKED_OBJ_SET_SLOT(obj, sprop->slot, value); + else + LOCKED_OBJ_WRITE_BARRIER(cx, obj, sprop->slot, value); + } /* XXXbe called with lock held */ ADD_PROPERTY_HELPER(cx, clasp, obj, scope, sprop, &value, @@ -4138,7 +4156,7 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id) JSBool js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, - JSScopeProperty *sprop, jsval *vp) + JSScopeProperty *sprop, uintN getHow, jsval *vp) { js_LeaveTraceIfGlobalObject(cx, pobj); @@ -4157,17 +4175,20 @@ js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, ? LOCKED_OBJ_GET_SLOT(pobj, slot) : JSVAL_VOID; if (SPROP_HAS_STUB_GETTER(sprop)) - return JS_TRUE; + return true; + + if (JS_UNLIKELY(sprop->isMethod()) && !(getHow & JSGET_METHOD_BARRIER)) + return true; sample = cx->runtime->propertyRemovals; JS_UNLOCK_SCOPE(cx, scope); JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr); JS_PUSH_TEMP_ROOT_OBJECT(cx, pobj, &tvr2); - ok = js_GetSprop(cx, sprop, obj, vp); + ok = sprop->get(cx, obj, vp); JS_POP_TEMP_ROOT(cx, &tvr2); JS_POP_TEMP_ROOT(cx, &tvr); if (!ok) - return JS_FALSE; + return false; JS_LOCK_SCOPE(cx, scope); if (SLOT_IN_SCOPE(slot, scope) && @@ -4176,7 +4197,7 @@ js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, LOCKED_OBJ_SET_SLOT(pobj, slot, *vp); } - return JS_TRUE; + return true; } JSBool @@ -4220,7 +4241,7 @@ js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp) sample = cx->runtime->propertyRemovals; JS_UNLOCK_SCOPE(cx, scope); JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr); - ok = js_SetSprop(cx, sprop, obj, vp); + ok = sprop->set(cx, obj, vp); JS_POP_TEMP_ROOT(cx, &tvr); if (!ok) return JS_FALSE; @@ -4237,7 +4258,7 @@ js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp) } JSBool -js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, +js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp) { JSObject *aobj, *obj2; @@ -4245,7 +4266,8 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, JSProperty *prop; JSScopeProperty *sprop; - JS_ASSERT_IF(cacheResult, !JS_ON_TRACE(cx)); + JS_ASSERT_IF(getHow & JSGET_CACHE_RESULT, !JS_ON_TRACE(cx)); + /* Convert string indices to integers if appropriate. */ id = js_CheckForStringIndex(id); @@ -4260,7 +4282,7 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, if (!OBJ_GET_CLASS(cx, obj)->getProperty(cx, obj, ID_TO_VALUE(id), vp)) return JS_FALSE; - PCMETER(cacheResult && JS_PROPERTY_CACHE(cx).nofills++); + PCMETER(getHow & JSGET_CACHE_RESULT && JS_PROPERTY_CACHE(cx).nofills++); /* * Give a strict warning if foo.bar is evaluated by a script for an @@ -4321,12 +4343,12 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, sprop = (JSScopeProperty *) prop; - if (cacheResult) { + if (getHow & JSGET_CACHE_RESULT) { JS_ASSERT_NOT_ON_TRACE(cx); js_FillPropertyCache(cx, aobj, 0, protoIndex, obj2, sprop, false); } - if (!js_NativeGet(cx, obj, obj2, sprop, vp)) + if (!js_NativeGet(cx, obj, obj2, sprop, getHow, vp)) return JS_FALSE; JS_UNLOCK_OBJ(cx, obj2); @@ -4336,20 +4358,19 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, JSBool js_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp) { - return js_GetPropertyHelper(cx, obj, id, false, vp); + return js_GetPropertyHelper(cx, obj, id, JSGET_METHOD_BARRIER, vp); } JSBool -js_GetMethod(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, - jsval *vp) +js_GetMethod(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp) { JSAutoResolveFlags rf(cx, JSRESOLVE_QUALIFIED); if (obj->map->ops == &js_ObjectOps || obj->map->ops->getProperty == js_GetProperty) { - return js_GetPropertyHelper(cx, obj, id, cacheResult, vp); + return js_GetPropertyHelper(cx, obj, id, getHow, vp); } - JS_ASSERT_IF(cacheResult, OBJ_IS_DENSE_ARRAY(cx, obj)); + JS_ASSERT_IF(getHow & JSGET_CACHE_RESULT, OBJ_IS_DENSE_ARRAY(cx, obj)); #if JS_HAS_XML_SUPPORT if (OBJECT_IS_XML(cx, obj)) return js_GetXMLMethod(cx, obj, id, vp); @@ -4380,10 +4401,11 @@ js_CheckUndeclaredVarAssignment(JSContext *cx) /* * Note: all non-error exits in this function must notify the tracer using - * SetPropHit when called from the interpreter loop (cacheResult is true). + * SetPropHit when called from the interpreter, which is detected by testing + * (defineHow & JSDNP_CACHE_RESULT). */ JSBool -js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, +js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, jsval *vp) { int protoIndex; @@ -4397,7 +4419,8 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, JSPropertyOp getter, setter; bool added; - if (cacheResult) + JS_ASSERT((defineHow & ~(JSDNP_CACHE_RESULT | JSDNP_SET_METHOD)) == 0); + if (defineHow & JSDNP_CACHE_RESULT) JS_ASSERT_NOT_ON_TRACE(cx); /* Convert string indices to integers if appropriate. */ @@ -4470,8 +4493,8 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, if (attrs & JSPROP_READONLY) { if (!JS_HAS_STRICT_OPTION(cx)) { /* Just return true per ECMA if not in strict mode. */ - PCMETER(cacheResult && JS_PROPERTY_CACHE(cx).rofills++); - if (cacheResult) + PCMETER((defineHow & JSDNP_CACHE_RESULT) && JS_PROPERTY_CACHE(cx).rofills++); + if (defineHow & JSDNP_CACHE_RESULT) TRACE_2(SetPropHit, JS_NO_PROP_CACHE_FILL, sprop); return JS_TRUE; error: // TRACE_2 jumps here in case of error. @@ -4484,7 +4507,10 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, goto read_only_error; } - if (pobj != obj) { + if (pobj == obj) { + if (!(defineHow & JSDNP_SET_METHOD) != !sprop->isMethod()) + sprop = NULL; + } else { /* * We found id in a prototype object: prepare to share or shadow. * @@ -4496,7 +4522,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, /* Don't clone a shared prototype property. */ if (attrs & JSPROP_SHARED) { - if (cacheResult) { + if (defineHow & JSDNP_CACHE_RESULT) { JSPropCacheEntry *entry; entry = js_FillPropertyCache(cx, obj, 0, protoIndex, pobj, sprop, false); TRACE_2(SetPropHit, entry, sprop); @@ -4507,7 +4533,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, return JS_TRUE; } - return js_SetSprop(cx, sprop, obj, vp); + return sprop->set(cx, obj, vp); } /* Restore attrs to the ECMA default for new properties. */ @@ -4554,8 +4580,23 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, JS_UNLOCK_OBJ(cx, obj); return JS_FALSE; } + if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES) attrs |= JSPROP_SHARED; + + /* + * Check for Object class here to avoid defining a method on a class + * with magic resolve, addProperty, getProperty, etc. hooks. + */ + if ((defineHow & JSDNP_SET_METHOD) && + LOCKED_OBJ_GET_CLASS(obj) == &js_ObjectClass) { + JS_ASSERT(VALUE_IS_FUNCTION(cx, *vp)); + JS_ASSERT(!(attrs & (JSPROP_GETTER | JSPROP_SETTER))); + flags |= SPROP_IS_METHOD; + getter = JS_EXTENSION (JSPropertyOp) JSVAL_TO_OBJECT(*vp); + setter = NULL; + } + sprop = scope->add(cx, id, getter, setter, SPROP_INVALID_SLOT, attrs, flags, shortid); if (!sprop) { @@ -4579,7 +4620,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, added = true; } - if (cacheResult) { + if (defineHow & JSDNP_CACHE_RESULT) { JSPropCacheEntry *entry; entry = js_FillPropertyCache(cx, obj, 0, 0, obj, sprop, added); TRACE_2(SetPropHit, entry, sprop); @@ -4758,7 +4799,7 @@ js_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp) } if (sprop && - SPROP_HAS_STUB_GETTER(sprop) && + SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && SPROP_HAS_VALID_SLOT(sprop, scope)) { jsval fval = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); @@ -5548,7 +5589,7 @@ js_TryMethod(JSContext *cx, JSObject *obj, JSAtom *atom, older = JS_SetErrorReporter(cx, NULL); id = ATOM_TO_JSID(atom); fval = JSVAL_VOID; - ok = js_GetMethod(cx, obj, id, false, &fval); + ok = js_GetMethod(cx, obj, id, 0, &fval); if (!ok) JS_ClearPendingException(cx); JS_SetErrorReporter(cx, older); diff --git a/js/src/jsobj.h b/js/src/jsobj.h index 57058c9f1d6a..c553f3c37501 100644 --- a/js/src/jsobj.h +++ b/js/src/jsobj.h @@ -664,6 +664,9 @@ js_DefineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, */ const uintN JSDNP_CACHE_RESULT = 1; /* an interpreter call from JSOP_INITPROP */ const uintN JSDNP_DONT_PURGE = 2; /* suppress js_PurgeScopeChain */ +const uintN JSDNP_SET_METHOD = 4; /* js_{DefineNativeProperty,SetPropertyHelper} + must pass the SPROP_IS_METHOD flag on to + js_AddScopeProperty */ extern JSBool js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, @@ -712,6 +715,17 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id); extern JSObject * js_FindVariableScope(JSContext *cx, JSFunction **funp); +/* + * JSGET_CACHE_RESULT is the analogue of JSDNP_CACHE_RESULT for js_GetMethod. + * + * JSGET_METHOD_BARRIER enables a read barrier that preserves standard function + * object semantics (by default we assume our caller won't leak a joined callee + * to script, where it would create hazardous mutable object sharing as well as + * observable identity according to == and ===. + */ +const uintN JSGET_CACHE_RESULT = 1; // call from a caching interpreter opcode +const uintN JSGET_METHOD_BARRIER = 2; // caller may leak shared function object + /* * NB: js_NativeGet and js_NativeSet are called with the scope containing sprop * (pobj's scope for Get, obj's for Set) locked, and on successful return, that @@ -720,21 +734,20 @@ js_FindVariableScope(JSContext *cx, JSFunction **funp); */ extern JSBool js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, - JSScopeProperty *sprop, jsval *vp); + JSScopeProperty *sprop, uintN getHow, jsval *vp); extern JSBool js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp); extern JSBool -js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, +js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp); extern JSBool js_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp); extern JSBool -js_GetMethod(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, - jsval *vp); +js_GetMethod(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp); /* * Check whether it is OK to assign an undeclared property of the global @@ -744,7 +757,7 @@ extern JS_FRIEND_API(JSBool) js_CheckUndeclaredVarAssignment(JSContext *cx); extern JSBool -js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, +js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, jsval *vp); extern JSBool diff --git a/js/src/jsopcode.cpp b/js/src/jsopcode.cpp index 28376ee0076b..a2c63c9b0cbb 100644 --- a/js/src/jsopcode.cpp +++ b/js/src/jsopcode.cpp @@ -3823,6 +3823,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) goto do_getprop; case JSOP_SETPROP: + case JSOP_SETMETHOD: LOAD_ATOM(0); GET_QUOTE_AND_FMT("%s[%s] %s= %s", "%s.%s %s= %s", xval); rval = POP_STR(); @@ -4484,6 +4485,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) break; case JSOP_INITPROP: + case JSOP_INITMETHOD: LOAD_ATOM(0); xval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), (jschar) diff --git a/js/src/jsopcode.tbl b/js/src/jsopcode.tbl index 6981a1edf309..13129d2fb3e5 100644 --- a/js/src/jsopcode.tbl +++ b/js/src/jsopcode.tbl @@ -582,10 +582,16 @@ OPDEF(JSOP_OBJTOP, 227,"objtop", NULL, 3, 0, 0, 0, JOF_UINT16 OPDEF(JSOP_LOOP, 228, "loop", NULL, 1, 0, 0, 0, JOF_BYTE) /* - * Debugger versions of JSOP_{GET,CALL}UPVAR. + * Debugger versions of JSOP_{GET,CALL}UPVAR and the flat closure (_FC) ops. */ OPDEF(JSOP_GETUPVAR_DBG, 229,"getupvar_dbg", NULL, 3, 0, 1, 19, JOF_UINT16|JOF_NAME) OPDEF(JSOP_CALLUPVAR_DBG, 230,"callupvar_dbg", NULL, 3, 0, 2, 19, JOF_UINT16|JOF_NAME|JOF_CALLOP) OPDEF(JSOP_DEFFUN_DBGFC, 231,"deffun_dbgfc", NULL, 3, 0, 0, 0, JOF_OBJECT|JOF_DECLARING) OPDEF(JSOP_DEFLOCALFUN_DBGFC,232,"deflocalfun_dbgfc",NULL, 5, 0, 0, 0, JOF_SLOTOBJECT|JOF_DECLARING) OPDEF(JSOP_LAMBDA_DBGFC, 233,"lambda_dbgfc", NULL, 3, 0, 1, 19, JOF_OBJECT) + +/* + * Joined function object as method optimization support. + */ +OPDEF(JSOP_SETMETHOD, 234,"setmethod", NULL, 3, 2, 1, 3, JOF_ATOM|JOF_PROP|JOF_SET|JOF_DETECTING) +OPDEF(JSOP_INITMETHOD, 235,"initprop", NULL, 3, 1, 0, 3, JOF_ATOM|JOF_PROP|JOF_SET|JOF_DETECTING) diff --git a/js/src/jsparse.cpp b/js/src/jsparse.cpp index 35cd45dd9d63..cc2d2213c7ab 100644 --- a/js/src/jsparse.cpp +++ b/js/src/jsparse.cpp @@ -5508,6 +5508,18 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) pn->pn_type = TOK_SEMI; pn->pn_pos = pn2->pn_pos; pn->pn_kid = pn2; + + /* + * Specialize JSOP_SETPROP into JSOP_SETMETHOD to defer or avoid null + * closure cloning. Do this here rather than in AssignExpr as only now + * do we know that the uncloned (unjoined in ES3 terms) function object + * result of the assignment expression can't escape. + */ + if (PN_TYPE(pn2) == TOK_ASSIGN && PN_OP(pn2) == JSOP_NOP && + PN_OP(pn2->pn_left) == JSOP_SETPROP && + PN_OP(pn2->pn_right) == JSOP_LAMBDA) { + pn2->pn_left->pn_op = JSOP_SETMETHOD; + } break; } diff --git a/js/src/jsscope.cpp b/js/src/jsscope.cpp index 02791e40cff0..3d691274b563 100644 --- a/js/src/jsscope.cpp +++ b/js/src/jsscope.cpp @@ -444,12 +444,13 @@ js_HashScopeProperty(JSDHashTable *table, const void *key) /* Accumulate from least to most random so the low bits are most random. */ hash = 0; + JS_ASSERT_IF(sprop->isMethod(), !sprop->setter); gsop = sprop->getter; if (gsop) - hash = JS_ROTATE_LEFT32(hash, 4) ^ (jsword)gsop; + hash = JS_ROTATE_LEFT32(hash, 4) ^ jsword(gsop); gsop = sprop->setter; if (gsop) - hash = JS_ROTATE_LEFT32(hash, 4) ^ (jsword)gsop; + hash = JS_ROTATE_LEFT32(hash, 4) ^ jsword(gsop); hash = JS_ROTATE_LEFT32(hash, 4) ^ (sprop->flags & ~SPROP_FLAGS_NOT_MATCHED); @@ -1054,6 +1055,9 @@ JSScope::add(JSContext *cx, jsid id, JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, this)); CHECK_ANCESTOR_LINE(this, true); + JS_ASSERT_IF(attrs & JSPROP_GETTER, getter); + JS_ASSERT_IF(attrs & JSPROP_SETTER, setter); + /* * You can't add properties to a sealed scope. But note well that you can * change property attributes in a sealed scope, even though that replaces @@ -1069,10 +1073,17 @@ JSScope::add(JSContext *cx, jsid id, * Normalize stub getter and setter values for faster is-stub testing in * the SPROP_CALL_[GS]ETTER macros. */ - if (getter == JS_PropertyStub) - getter = NULL; if (setter == JS_PropertyStub) setter = NULL; + if (flags & SPROP_IS_METHOD) { + /* Here, getter is the method, a function object reference. */ + JS_ASSERT(getter); + JS_ASSERT(!setter); + JS_ASSERT(!(attrs & (JSPROP_GETTER | JSPROP_SETTER))); + } else { + if (getter == JS_PropertyStub) + getter = NULL; + } /* * Search for id in order to claim its entry, allocating a property tree @@ -1354,6 +1365,9 @@ JSScope::add(JSContext *cx, jsid id, if (js_IdIsIndex(sprop->id, &index)) setIndexedProperties(); + if (sprop->isMethod()) + setMethodBarrier(); + METER(adds); return sprop; @@ -1655,11 +1669,11 @@ JSScopeProperty::trace(JSTracer *trc) if (attrs & (JSPROP_GETTER | JSPROP_SETTER)) { if (attrs & JSPROP_GETTER) { JS_SET_TRACING_DETAILS(trc, PrintPropertyGetterOrSetter, this, 0); - JS_CallTracer(trc, js_CastAsObject(getter), JSTRACE_OBJECT); + JS_CallTracer(trc, getterObject(), JSTRACE_OBJECT); } if (attrs & JSPROP_SETTER) { JS_SET_TRACING_DETAILS(trc, PrintPropertyGetterOrSetter, this, 1); - JS_CallTracer(trc, js_CastAsObject(setter), JSTRACE_OBJECT); + JS_CallTracer(trc, setterObject(), JSTRACE_OBJECT); } } #endif /* JS_HAS_GETTER_SETTER */ diff --git a/js/src/jsscope.h b/js/src/jsscope.h index 211b88c9014b..346f6e368eff 100644 --- a/js/src/jsscope.h +++ b/js/src/jsscope.h @@ -1,5 +1,5 @@ /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- - * vim: set ts=8 sw=4 et tw=78: + * vim: set ts=8 sw=4 et tw=99: * * ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 @@ -286,12 +286,13 @@ struct JSScope { BRANDED = 0x0004, INDEXED_PROPERTIES = 0x0008, OWN_SHAPE = 0x0010, + METHOD_BARRIER = 0x0020, /* * This flag toggles with each shape-regenerating GC cycle. * See JSRuntime::gcRegenShapesScopeFlag. */ - SHAPE_REGEN = 0x0020 + SHAPE_REGEN = 0x0040 }; bool hadMiddleDelete() { return flags & MIDDLE_DELETE; } @@ -322,6 +323,42 @@ struct JSScope { bool hasRegenFlag(uint8 regenFlag) { return (flags & SHAPE_REGEN) == regenFlag; } + /* + * A scope has a method barrier when some compiler-created "null closure" + * function objects (functions that do not use lexical bindings above their + * scope, only free variable names) that have a correct JSSLOT_PARENT value + * thanks to the COMPILE_N_GO optimization are stored as newly added direct + * property values. + * + * The de-facto standard JS language requires each evaluation of such a + * closure to result in a unique (according to === and observable effects) + * function object. ES3 tried to allow implementations to "join" such + * objects to a single compiler-created object, but this makes an overt + * mutation hazard, also an "identity hazard" against interoperation among + * implementations that join and do not join. + * + * To stay compatible with the de-facto standard, we store the compiler- + * created function object as the method value, set the METHOD_BARRIER + * flag, and brand the scope with a predictable shape that reflects its + * method values, which are cached and traced without being loaded, based + * on shape-qualified cache hit logic and equivalent trace guards. See + * BRANDED above. + * + * This means scope->hasMethodBarrier() => scope->branded(), but of course + * not the other way around. + * + * Then when reading from a scope for which scope->hasMethodBarrier() is + * true, we count on the scope's qualified/guarded shape being unique and + * add a read barrier that clones the compiler-created function object on + * demand, reshaping the scope. + * + * This read barrier is bypassed when evaluating the callee sub-expression + * of a call expression (see the JOF_CALLOP opcodes in jsopcode.tbl), since + * such ops do not present an identity or mutation hazard. + */ + bool hasMethodBarrier() { return flags & METHOD_BARRIER; } + void setMethodBarrier() { flags |= METHOD_BARRIER | BRANDED; } + bool owned() { return object != NULL; } }; @@ -363,7 +400,8 @@ js_CastAsPropertyOp(JSObject *object) struct JSScopeProperty { jsid id; /* int-tagged jsval/untagged JSAtom* */ JSPropertyOp getter; /* getter and setter hooks or objects */ - JSPropertyOp setter; + JSPropertyOp setter; /* getter is JSObject* and setter is 0 + if sprop->isMethod() */ uint32 slot; /* abstract index in object slots */ uint8 attrs; /* attributes, see jsapi.h JSPROP_* */ uint8 flags; /* flags, see below for defines */ @@ -373,6 +411,53 @@ struct JSScopeProperty { to many-kids data structure */ uint32 shape; /* property cache shape identifier */ +/* Bits stored in sprop->flags. */ +#define SPROP_MARK 0x01 +#define SPROP_IS_ALIAS 0x02 +#define SPROP_HAS_SHORTID 0x04 +#define SPROP_FLAG_SHAPE_REGEN 0x08 +#define SPROP_IS_METHOD 0x10 + + bool isMethod() const { + return flags & SPROP_IS_METHOD; + } + JSObject *method() const { + JS_ASSERT(isMethod()); + return js_CastAsObject(getter); + } + jsval methodValue() const { + JS_ASSERT(isMethod()); + return js_CastAsObjectJSVal(getter); + } + + bool hasGetter() const { + return attrs & JSPROP_GETTER; + } + JSObject *getterObject() const { + JS_ASSERT(hasGetter()); + return js_CastAsObject(getter); + } + jsval getterValue() const { + JS_ASSERT(hasGetter()); + return js_CastAsObjectJSVal(getter); + } + + bool hasSetter() const { + return attrs & JSPROP_SETTER; + } + JSObject *setterObject() const { + JS_ASSERT(hasSetter()); + return js_CastAsObject(setter); + } + jsval setterValue() const { + JS_ASSERT(hasSetter()); + return js_CastAsObjectJSVal(setter); + } + + bool methodBarrier(JSContext *cx, JSObject *obj, jsval *vp); + bool get(JSContext* cx, JSObject* obj, jsval* vp); + bool set(JSContext* cx, JSObject* obj, jsval* vp); + void trace(JSTracer *trc); }; @@ -408,12 +493,6 @@ JSScope::has(JSScopeProperty *sprop) return lookup(sprop->id) == sprop; } -/* Bits stored in sprop->flags. */ -#define SPROP_MARK 0x01 -#define SPROP_IS_ALIAS 0x02 -#define SPROP_HAS_SHORTID 0x04 -#define SPROP_FLAG_SHAPE_REGEN 0x08 - /* * If SPROP_HAS_SHORTID is set in sprop->flags, we use sprop->shortid rather * than id when calling sprop's getter or setter. @@ -430,6 +509,9 @@ JSScope::has(JSScopeProperty *sprop) #define SPROP_HAS_STUB_GETTER(sprop) (!(sprop)->getter) #define SPROP_HAS_STUB_SETTER(sprop) (!(sprop)->setter) +#define SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) \ + (SPROP_HAS_STUB_GETTER(sprop) || (sprop)->isMethod()) + #ifndef JS_THREADSAFE # define js_GenerateShape(cx, gcLocked) js_GenerateShape (cx) #endif @@ -551,15 +633,47 @@ JSScope::trace(JSTracer *trc) } -static JS_INLINE bool -js_GetSprop(JSContext* cx, JSScopeProperty* sprop, JSObject* obj, jsval* vp) +/* + * Read barrier for deferred cloning of compiler-created function objects + * optimized as typically non-escaping, ad-hoc methods in obj. + */ +JS_ALWAYS_INLINE bool +JSScopeProperty::methodBarrier(JSContext *cx, JSObject *obj, jsval *vp) { - JS_ASSERT(!SPROP_HAS_STUB_GETTER(sprop)); + JSScope *scope = OBJ_SCOPE(obj); +#ifdef JS_THREADSAFE + JS_ASSERT(scope->title.ownercx == cx); +#endif - if (sprop->attrs & JSPROP_GETTER) { - jsval fval = js_CastAsObjectJSVal(sprop->getter); - return js_InternalGetOrSet(cx, obj, sprop->id, fval, JSACC_READ, - 0, 0, vp); + if (scope->hasMethodBarrier()) { + JSObject *funobj = JSVAL_TO_OBJECT(*vp); + JSFunction *fun = GET_FUNCTION_PRIVATE(cx, funobj); + + if (FUN_OBJECT(fun) == funobj && FUN_INTERPRETED(fun)) { + funobj = js_CloneFunctionObject(cx, fun, OBJ_GET_PARENT(cx, funobj)); + if (!funobj) + return false; + *vp = OBJECT_TO_JSVAL(funobj); + return js_SetPropertyHelper(cx, obj, id, 0, vp); + } + } + return true; +} + +JS_ALWAYS_INLINE bool +JSScopeProperty::get(JSContext* cx, JSObject* obj, jsval* vp) +{ + JS_ASSERT(!SPROP_HAS_STUB_GETTER(this)); + + if (attrs & JSPROP_GETTER) { + JS_ASSERT(!isMethod()); + jsval fval = getterValue(); + return js_InternalGetOrSet(cx, obj, id, fval, JSACC_READ, 0, 0, vp); + } + + if (isMethod()) { + *vp = methodValue(); + return methodBarrier(cx, obj, vp); } /* @@ -570,30 +684,28 @@ js_GetSprop(JSContext* cx, JSScopeProperty* sprop, JSObject* obj, jsval* vp) */ if (STOBJ_GET_CLASS(obj) == &js_WithClass) obj = obj->map->ops->thisObject(cx, obj); - return sprop->getter(cx, obj, SPROP_USERID(sprop), vp); + return getter(cx, obj, SPROP_USERID(this), vp); } -static JS_INLINE bool -js_SetSprop(JSContext* cx, JSScopeProperty* sprop, JSObject* obj, jsval* vp) +JS_ALWAYS_INLINE bool +JSScopeProperty::set(JSContext* cx, JSObject* obj, jsval* vp) { - JS_ASSERT(!(SPROP_HAS_STUB_SETTER(sprop) && - !(sprop->attrs & JSPROP_GETTER))); + JS_ASSERT_IF(SPROP_HAS_STUB_SETTER(this), attrs & JSPROP_GETTER); - if (sprop->attrs & JSPROP_SETTER) { - jsval fval = js_CastAsObjectJSVal(sprop->setter); - return js_InternalGetOrSet(cx, obj, (sprop)->id, fval, JSACC_WRITE, - 1, vp, vp); + if (attrs & JSPROP_SETTER) { + jsval fval = setterValue(); + return js_InternalGetOrSet(cx, obj, id, fval, JSACC_WRITE, 1, vp, vp); } - if (sprop->attrs & JSPROP_GETTER) { + if (attrs & JSPROP_GETTER) { js_ReportGetterOnlyAssignment(cx); return JS_FALSE; } - /* See the comment in js_GetSprop as to why we can check for 'with'. */ + /* See the comment in JSScopeProperty::get as to why we can check for With. */ if (STOBJ_GET_CLASS(obj) == &js_WithClass) obj = obj->map->ops->thisObject(cx, obj); - return sprop->setter(cx, obj, SPROP_USERID(sprop), vp); + return setter(cx, obj, SPROP_USERID(this), vp); } /* Macro for common expression to test for shared permanent attributes. */ diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 90c0fd2bd082..4f142149bfa6 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -2636,8 +2636,9 @@ TraceRecorder::isValidSlot(JSScope* scope, JSScopeProperty* sprop) if (sprop->attrs & JSPROP_READONLY) ABORT_TRACE_RV("writing to a read-only property", false); } + /* This check applies even when setflags == 0. */ - if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop)) + if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)) ABORT_TRACE_RV("non-stub getter", false); if (!SPROP_HAS_VALID_SLOT(sprop, scope)) @@ -4100,7 +4101,7 @@ TraceRecorder::hasMethod(JSObject* obj, jsid id) JSScope* scope = OBJ_SCOPE(pobj); JSScopeProperty* sprop = (JSScopeProperty*) prop; - if (SPROP_HAS_STUB_GETTER(sprop) && + if (SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && SPROP_HAS_VALID_SLOT(sprop, scope)) { jsval v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); if (VALUE_IS_FUNCTION(cx, v)) { @@ -10485,32 +10486,33 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32& slot, LIns*& v_ins) if (setflags && (sprop->attrs & JSPROP_READONLY)) ABORT_TRACE("writing to a readonly property"); if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop)) { - // FIXME 450335: generalize this away from regexp built-in getters. - if (setflags == 0 && - sprop->getter == js_RegExpClass.getProperty && - sprop->shortid < 0) { - if (sprop->shortid == REGEXP_LAST_INDEX) - ABORT_TRACE("can't trace RegExp.lastIndex yet"); - LIns* args[] = { INS_CONSTPTR(sprop), obj_ins, cx_ins }; - v_ins = lir->insCall(&js_CallGetter_ci, args); - guard(false, lir->ins2(LIR_eq, v_ins, INS_CONST(JSVAL_ERROR_COOKIE)), OOM_EXIT); - /* - * BIG FAT WARNING: This snapshot cannot be a BRANCH_EXIT, since - * the value to the top of the stack is not the value we unbox. - */ - unbox_jsval((sprop->shortid == REGEXP_SOURCE) ? JSVAL_STRING : JSVAL_BOOLEAN, - v_ins, - snapshot(MISMATCH_EXIT)); - return JSRS_CONTINUE; - } - if (setflags == 0 && - sprop->getter == js_StringClass.getProperty && - sprop->id == ATOM_KEY(cx->runtime->atomState.lengthAtom)) { - if (!guardClass(obj, obj_ins, &js_StringClass, snapshot(MISMATCH_EXIT))) - ABORT_TRACE("can't trace String.length on non-String objects"); - LIns* str_ins = stobj_get_private(obj_ins, JSVAL_TAGMASK); - v_ins = lir->ins1(LIR_i2f, getStringLength(str_ins)); - return JSRS_CONTINUE; + if (setflags == 0) { + // FIXME 450335: generalize this away from regexp built-in getters. + if (sprop->getter == js_RegExpClass.getProperty && + sprop->shortid < 0) { + if (sprop->shortid == REGEXP_LAST_INDEX) + ABORT_TRACE("can't trace RegExp.lastIndex yet"); + LIns* args[] = { INS_CONSTPTR(sprop), obj_ins, cx_ins }; + v_ins = lir->insCall(&js_CallGetter_ci, args); + guard(false, lir->ins2(LIR_eq, v_ins, INS_CONST(JSVAL_ERROR_COOKIE)), OOM_EXIT); + + /* + * BIG FAT WARNING: This snapshot cannot be a BRANCH_EXIT, since + * the value to the top of the stack is not the value we unbox. + */ + unbox_jsval((sprop->shortid == REGEXP_SOURCE) ? JSVAL_STRING : JSVAL_BOOLEAN, + v_ins, + snapshot(MISMATCH_EXIT)); + return JSRS_CONTINUE; + } + if (sprop->getter == js_StringClass.getProperty && + sprop->id == ATOM_KEY(cx->runtime->atomState.lengthAtom)) { + if (!guardClass(obj, obj_ins, &js_StringClass, snapshot(MISMATCH_EXIT))) + ABORT_TRACE("can't trace String.length on non-String objects"); + LIns* str_ins = stobj_get_private(obj_ins, JSVAL_TAGMASK); + v_ins = lir->ins1(LIR_i2f, getStringLength(str_ins)); + return JSRS_CONTINUE; + } } ABORT_TRACE("non-stub getter"); } @@ -11862,7 +11864,9 @@ TraceRecorder::record_JSOP_CALLPROP() } else if (JSVAL_TAG(l) == JSVAL_BOOLEAN) { if (l == JSVAL_VOID) ABORT_TRACE("callprop on void"); - guard(false, lir->ins2i(LIR_eq, get(&l), JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID)), MISMATCH_EXIT); + guard(false, + lir->ins2i(LIR_eq, get(&l), JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID)), + MISMATCH_EXIT); i = JSProto_Boolean; debug_only_stmt(protoname = "Boolean.prototype";) } else { @@ -12374,6 +12378,18 @@ DBG_STUB(JSOP_DEFFUN_DBGFC) DBG_STUB(JSOP_DEFLOCALFUN_DBGFC) DBG_STUB(JSOP_LAMBDA_DBGFC) +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::record_JSOP_SETMETHOD() +{ + return record_JSOP_SETPROP(); +} + +JS_REQUIRES_STACK JSRecordingStatus +TraceRecorder::record_JSOP_INITMETHOD() +{ + return record_JSOP_INITPROP(); +} + #ifdef JS_JIT_SPEW /* Prints information about entry typemaps and unstable exits for all peers at a PC */ void diff --git a/js/src/jstypes.h b/js/src/jstypes.h index 3a5a958ba8b6..17183e24c5f6 100644 --- a/js/src/jstypes.h +++ b/js/src/jstypes.h @@ -456,6 +456,14 @@ typedef JSUintPtr JSUword; # define JS_DATA_TO_FUNC_PTR(type, ptr) ((type) (void *) (ptr)) #endif +#ifdef __GNUC__ +# define JS_EXTENSION __extension__ +# define JS_EXTENSION_(s) __extension__ ({ s; }) +#else +# define JS_EXTENSION +# define JS_EXTENSION_(s) s +#endif + JS_END_EXTERN_C #endif /* jstypes_h___ */ diff --git a/js/src/jsxdrapi.h b/js/src/jsxdrapi.h index f20bc57b3ee9..930d7aa74144 100644 --- a/js/src/jsxdrapi.h +++ b/js/src/jsxdrapi.h @@ -204,7 +204,7 @@ JS_XDRFindClassById(JSXDRState *xdr, uint32 id); * before deserialization of bytecode. If the saved version does not match * the current version, abort deserialization and invalidate the file. */ -#define JSXDR_BYTECODE_VERSION (0xb973c0de - 49) +#define JSXDR_BYTECODE_VERSION (0xb973c0de - 50) /* * Library-private functions. From d366588e38f12fefd44a130de01746f08bdc2998 Mon Sep 17 00:00:00 2001 From: Robert Sayre Date: Tue, 28 Jul 2009 18:51:35 -0400 Subject: [PATCH 17/19] Backed out changeset e09130fcb013 --- js/src/imacros.c.out | 2 - js/src/jsapi.cpp | 2 +- js/src/jsarray.cpp | 2 +- js/src/jsbuiltins.cpp | 4 +- js/src/jsemit.cpp | 10 +- js/src/jsinterp.cpp | 282 +++++++++++++++--------------------------- js/src/jsiter.cpp | 2 +- js/src/jsobj.cpp | 121 ++++++------------ js/src/jsobj.h | 23 +--- js/src/jsopcode.cpp | 2 - js/src/jsopcode.tbl | 8 +- js/src/jsparse.cpp | 12 -- js/src/jsscope.cpp | 26 +--- js/src/jsscope.h | 168 +++++-------------------- js/src/jstracer.cpp | 74 +++++------ js/src/jstypes.h | 8 -- js/src/jsxdrapi.h | 2 +- 17 files changed, 213 insertions(+), 535 deletions(-) diff --git a/js/src/imacros.c.out b/js/src/imacros.c.out index d7d4d9f73629..8871bb8001e4 100644 --- a/js/src/imacros.c.out +++ b/js/src/imacros.c.out @@ -964,8 +964,6 @@ uint8 js_opcode2extra[JSOP_LIMIT] = { 0, /* JSOP_DEFFUN_DBGFC */ 0, /* JSOP_DEFLOCALFUN_DBGFC */ 0, /* JSOP_LAMBDA_DBGFC */ - 0, /* JSOP_SETMETHOD */ - 0, /* JSOP_INITMETHOD */ }; #define JSOP_IS_IMACOP(x) (0 \ || x == JSOP_BITOR \ diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp index b987d291e389..dd3ab2e83557 100644 --- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -3544,7 +3544,7 @@ JS_GetMethodById(JSContext *cx, JSObject *obj, jsid id, JSObject **objp, jsval *vp) { CHECK_REQUEST(cx); - if (!js_GetMethod(cx, obj, id, JSGET_METHOD_BARRIER, vp)) + if (!js_GetMethod(cx, obj, id, false, vp)) return JS_FALSE; if (objp) *objp = obj; diff --git a/js/src/jsarray.cpp b/js/src/jsarray.cpp index debe0d8a2ba7..f4b1039efab6 100644 --- a/js/src/jsarray.cpp +++ b/js/src/jsarray.cpp @@ -792,7 +792,7 @@ array_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp) if (prop) { if (OBJ_IS_NATIVE(obj2)) { sprop = (JSScopeProperty *) prop; - if (!js_NativeGet(cx, obj, obj2, sprop, JSGET_METHOD_BARRIER, vp)) + if (!js_NativeGet(cx, obj, obj2, sprop, vp)) return JS_FALSE; } OBJ_DROP_PROPERTY(cx, obj2, prop); diff --git a/js/src/jsbuiltins.cpp b/js/src/jsbuiltins.cpp index 0f97db750322..1f1408bf460c 100644 --- a/js/src/jsbuiltins.cpp +++ b/js/src/jsbuiltins.cpp @@ -337,9 +337,9 @@ JS_DEFINE_CALLINFO_3(extern, BOOL, js_HasNamedPropertyInt32, CONTEXT, OBJECT, IN jsval FASTCALL js_CallGetter(JSContext* cx, JSObject* obj, JSScopeProperty* sprop) { - JS_ASSERT(!SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)); + JS_ASSERT(!SPROP_HAS_STUB_GETTER(sprop)); jsval v; - if (!sprop->get(cx, obj, &v)) + if (!js_GetSprop(cx, sprop, obj, &v)) return JSVAL_ERROR_COOKIE; return v; } diff --git a/js/src/jsemit.cpp b/js/src/jsemit.cpp index 2b63635620e8..5105d8bba498 100644 --- a/js/src/jsemit.cpp +++ b/js/src/jsemit.cpp @@ -6529,15 +6529,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) ale = cg->atomList.add(cg->compiler, pn3->pn_atom); if (!ale) return JS_FALSE; - - JSOp initOp = (PN_OP(pn2->pn_right) == JSOP_LAMBDA -#if JS_HAS_GETTER_SETTER - && op != JSOP_GETTER && op != JSOP_SETTER -#endif - ) - ? JSOP_INITMETHOD - : JSOP_INITPROP; - EMIT_INDEX_OP(initOp, ALE_INDEX(ale)); + EMIT_INDEX_OP(JSOP_INITPROP, ALE_INDEX(ale)); } } diff --git a/js/src/jsinterp.cpp b/js/src/jsinterp.cpp index 13089d7a7d39..0d9a50ba5ad1 100644 --- a/js/src/jsinterp.cpp +++ b/js/src/jsinterp.cpp @@ -1,5 +1,5 @@ /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- - * vim: set ts=8 sw=4 et tw=99: + * vim: set ts=8 sw=4 et tw=79: * * ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 @@ -184,61 +184,46 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj, * is a plain old method? It's a function-valued property with stub * getter, so get of a function is idempotent. */ - if (cs->format & JOF_CALLOP) { + if ((cs->format & JOF_CALLOP) && + SPROP_HAS_STUB_GETTER(sprop) && + SPROP_HAS_VALID_SLOT(sprop, scope)) { jsval v; - if (sprop->isMethod()) { + v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); + if (VALUE_IS_FUNCTION(cx, v)) { /* - * A compiler-created function object, AKA a method, already - * memoized in the property tree. + * Great, we have a function-valued prototype property where + * the getter is JS_PropertyStub. The type id in pobj's scope + * does not evolve with changes to property values, however. + * + * So here, on first cache fill for this method, we brand the + * scope with a new shape and set the SCOPE_BRANDED flag. Once + * this scope flag is set, any write to a function-valued plain + * old property in pobj will result in shape being regenerated. */ - JS_ASSERT(scope->hasMethodBarrier()); - v = sprop->methodValue(); - JS_ASSERT(VALUE_IS_FUNCTION(cx, v)); + if (!scope->branded()) { + PCMETER(cache->brandfills++); +#ifdef DEBUG_notme + fprintf(stderr, + "branding %p (%s) for funobj %p (%s), shape %lu\n", + pobj, LOCKED_OBJ_GET_CLASS(pobj)->name, + JSVAL_TO_OBJECT(v), + JS_GetFunctionName(GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v))), + OBJ_SHAPE(obj)); +#endif + scope->brandingShapeChange(cx, sprop->slot, v); + if (js_IsPropertyCacheDisabled(cx)) /* check for rt->shapeGen overflow */ + return JS_NO_PROP_CACHE_FILL; + scope->setBranded(); + } vword = JSVAL_OBJECT_TO_PCVAL(v); break; } - - if (SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && - SPROP_HAS_VALID_SLOT(sprop, scope)) { - v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); - if (VALUE_IS_FUNCTION(cx, v)) { - /* - * Great, we have a function-valued prototype property - * where the getter is JS_PropertyStub. The type id in - * pobj's scope does not evolve with changes to property - * values, however. - * - * So here, on first cache fill for this method, we brand - * the scope with a new shape and set the SCOPE_BRANDED - * flag. Once this scope flag is set, any write that adds - * or deletes a function-valued plain old property in - * scope->object will result in shape being regenerated. - */ - if (!scope->branded()) { - PCMETER(cache->brandfills++); -#ifdef DEBUG_notme - fprintf(stderr, - "branding %p (%s) for funobj %p (%s), shape %lu\n", - pobj, LOCKED_OBJ_GET_CLASS(pobj)->name, - JSVAL_TO_OBJECT(v), - JS_GetFunctionName(GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v))), - OBJ_SHAPE(obj)); -#endif - scope->brandingShapeChange(cx, sprop->slot, v); - if (js_IsPropertyCacheDisabled(cx)) /* check for rt->shapeGen overflow */ - return JS_NO_PROP_CACHE_FILL; - scope->setBranded(); - } - vword = JSVAL_OBJECT_TO_PCVAL(v); - break; - } - } } /* If getting a value via a stub getter, we can cache the slot. */ if (!(cs->format & (JOF_SET | JOF_INCDEC | JOF_FOR)) && - SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && + SPROP_HAS_STUB_GETTER(sprop) && SPROP_HAS_VALID_SLOT(sprop, scope)) { /* Great, let's cache sprop's slot and use it on cache hit. */ vword = SLOT_TO_PCVAL(sprop->slot); @@ -250,30 +235,30 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj, scope->shape == sprop->shape) { /* * Our caller added a new property. We also know that a setter - * that js_NativeSet could have run has not mutated the scope, - * so the added property is still the last one added, and the + * that js_NativeSet could have run has not mutated the scope + * so the added property is still the last one added and the * scope is not branded. * * We want to cache under scope's shape before the property * addition to bias for the case when the mutator opcode - * always adds the same property. This allows us to optimize - * periodic execution of object initializers or other explicit - * initialization sequences such as + * always adds the same property. It allows to optimize + * periodic execution of object initializers or explicit + * initialization sequences like * * obj = {}; obj.x = 1; obj.y = 2; * * We assume that on average the win from this optimization is - * greater than the cost of an extra mismatch per loop owing to + * bigger that the cost of an extra mismatch per loop due to * the bias for the following case: * * obj = {}; ... for (...) { ... obj.x = ... } * - * On the first iteration of such a for loop, JSOP_SETPROP - * fills the cache with the shape of the newly created object - * obj, not the shape of obj after obj.x has been assigned. - * That mismatches obj's shape on the second iteration. Note - * that on the third and subsequent iterations the cache will - * be hit because the shape is no longer updated. + * On the first iteration JSOP_SETPROP fills the cache with + * the shape of newly created object, not the shape after + * obj.x is assigned. That mismatches obj's shape on the + * second iteration. Note that on third and the following + * iterations the cache will be hit since the shape no longer + * mutates. */ JS_ASSERT(scope->owned()); if (sprop->parent) { @@ -1004,7 +989,7 @@ js_OnUnknownMethod(JSContext *cx, jsval *vp) MUST_FLOW_THROUGH("out"); id = ATOM_TO_JSID(cx->runtime->atomState.noSuchMethodAtom); - ok = js_GetMethod(cx, obj, id, 0, &tvr.u.value); + ok = js_GetMethod(cx, obj, id, false, &tvr.u.value); if (!ok) goto out; if (JSVAL_IS_PRIMITIVE(tvr.u.value)) { @@ -2147,9 +2132,9 @@ js_TraceOpcode(JSContext *cx) fp->script, cx->tracePrevPc); /* - * If there aren't that many elements on the stack, then we have - * probably entered a new frame, and printing output would just be - * misleading. + * If there aren't that many elements on the stack, then + * we have probably entered a new frame, and printing output + * would just be misleading. */ if (ndefs != 0 && ndefs < regs->sp - fp->slots) { @@ -2615,7 +2600,7 @@ AssertValidPropertyCacheHit(JSContext *cx, JSScript *script, JSFrameRegs& regs, JS_ASSERT(PCVAL_IS_OBJECT(entry->vword)); JS_ASSERT(entry->vword != PCVAL_NULL); JS_ASSERT(OBJ_SCOPE(pobj)->branded()); - JS_ASSERT(SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)); + JS_ASSERT(SPROP_HAS_STUB_GETTER(sprop)); JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj))); v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); JS_ASSERT(VALUE_IS_FUNCTION(cx, v)); @@ -2652,11 +2637,9 @@ JS_STATIC_ASSERT(JSOP_DEFFUN_FC_LENGTH == JSOP_DEFFUN_DBGFC_LENGTH); /* * Same for JSOP_SETNAME and JSOP_SETPROP, which differ only slightly but - * remain distinct for the decompiler. Likewise for JSOP_INIT{PROP,METHOD}. + * remain distinct for the decompiler. */ JS_STATIC_ASSERT(JSOP_SETNAME_LENGTH == JSOP_SETPROP_LENGTH); -JS_STATIC_ASSERT(JSOP_SETNAME_LENGTH == JSOP_SETMETHOD_LENGTH); -JS_STATIC_ASSERT(JSOP_INITPROP_LENGTH == JSOP_INITMETHOD_LENGTH); /* See TRY_BRANCH_AFTER_COND. */ JS_STATIC_ASSERT(JSOP_IFNE_LENGTH == JSOP_IFEQ_LENGTH); @@ -2717,6 +2700,14 @@ js_Interpret(JSContext *cx) #endif JSAutoResolveFlags rf(cx, JSRESOLVE_INFER); +#ifdef __GNUC__ +# define JS_EXTENSION __extension__ +# define JS_EXTENSION_(s) __extension__ ({ s; }) +#else +# define JS_EXTENSION +# define JS_EXTENSION_(s) s +#endif + # ifdef DEBUG /* * We call this macro from BEGIN_CASE in threaded interpreters, @@ -3619,7 +3610,7 @@ js_Interpret(JSContext *cx) goto error; \ JS_END_MACRO -#define NATIVE_GET(cx,obj,pobj,sprop,getHow,vp) \ +#define NATIVE_GET(cx,obj,pobj,sprop,vp) \ JS_BEGIN_MACRO \ if (SPROP_HAS_STUB_GETTER(sprop)) { \ /* Fast path for Object instance properties. */ \ @@ -3629,7 +3620,7 @@ js_Interpret(JSContext *cx) ? LOCKED_OBJ_GET_SLOT(pobj, (sprop)->slot) \ : JSVAL_VOID; \ } else { \ - if (!js_NativeGet(cx, obj, pobj, sprop, getHow, vp)) \ + if (!js_NativeGet(cx, obj, pobj, sprop, vp)) \ goto error; \ } \ JS_END_MACRO @@ -4490,7 +4481,7 @@ js_Interpret(JSContext *cx) } else { JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); sprop = PCVAL_TO_SPROP(entry->vword); - NATIVE_GET(cx, obj, obj2, sprop, JSGET_METHOD_BARRIER, &rval); + NATIVE_GET(cx, obj, obj2, sprop, &rval); } JS_UNLOCK_OBJ(cx, obj2); break; @@ -4504,9 +4495,7 @@ js_Interpret(JSContext *cx) } id = ATOM_TO_JSID(atom); if (entry - ? !js_GetPropertyHelper(cx, obj, id, - JSGET_CACHE_RESULT | JSGET_METHOD_BARRIER, - &rval) + ? !js_GetPropertyHelper(cx, obj, id, true, &rval) : !OBJ_GET_PROPERTY(cx, obj, id, &rval)) { goto error; } @@ -4583,7 +4572,7 @@ js_Interpret(JSContext *cx) } else { JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); sprop = PCVAL_TO_SPROP(entry->vword); - NATIVE_GET(cx, obj, obj2, sprop, 0, &rval); + NATIVE_GET(cx, obj, obj2, sprop, &rval); } JS_UNLOCK_OBJ(cx, obj2); STORE_OPND(-1, rval); @@ -4602,13 +4591,13 @@ js_Interpret(JSContext *cx) id = ATOM_TO_JSID(atom); PUSH(JSVAL_NULL); if (!JSVAL_IS_PRIMITIVE(lval)) { - if (!js_GetMethod(cx, obj, id, entry ? JSGET_CACHE_RESULT : 0, &rval)) + if (!js_GetMethod(cx, obj, id, !!entry, &rval)) goto error; STORE_OPND(-1, OBJECT_TO_JSVAL(obj)); STORE_OPND(-2, rval); } else { JS_ASSERT(obj->map->ops->getProperty == js_GetProperty); - if (!js_GetPropertyHelper(cx, obj, id, JSGET_CACHE_RESULT, &rval)) + if (!js_GetPropertyHelper(cx, obj, id, true, &rval)) goto error; STORE_OPND(-1, lval); STORE_OPND(-2, rval); @@ -4639,12 +4628,9 @@ js_Interpret(JSContext *cx) BEGIN_CASE(JSOP_SETNAME) BEGIN_CASE(JSOP_SETPROP) - BEGIN_CASE(JSOP_SETMETHOD) - do_setprop: rval = FETCH_OPND(-1); - JS_ASSERT_IF(op == JSOP_SETMETHOD, VALUE_IS_FUNCTION(cx, rval)); lval = FETCH_OPND(-2); - JS_ASSERT_IF(op == JSOP_SETNAME, !JSVAL_IS_PRIMITIVE(lval)); + JS_ASSERT(!JSVAL_IS_PRIMITIVE(lval) || op == JSOP_SETPROP); VALUE_TO_OBJECT(cx, -2, lval, obj); do { @@ -4798,21 +4784,9 @@ js_Interpret(JSContext *cx) sprop = sprop2; } else { scope->extend(cx, sprop); - - jsuint index; - if (js_IdIsIndex(sprop->id, &index)) - scope->setIndexedProperties(); - - if (sprop->isMethod()) - scope->setMethodBarrier(); } - /* - * No LOCKED_OBJ_WRITE_BARRIER because here we - * are adding a new property, not updating an - * existing slot's value that might contain a - * method of a branded scope. - */ + LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, rval); TRACE_2(SetPropHit, entry, sprop); LOCKED_OBJ_SET_SLOT(obj, slot, rval); JS_UNLOCK_SCOPE(cx, scope); @@ -4856,10 +4830,7 @@ js_Interpret(JSContext *cx) LOAD_ATOM(0); id = ATOM_TO_JSID(atom); if (entry) { - uintN defineHow = (op == JSOP_SETMETHOD) - ? JSDNP_CACHE_RESULT | JSDNP_SET_METHOD - : JSDNP_CACHE_RESULT; - if (!js_SetPropertyHelper(cx, obj, id, defineHow, &rval)) + if (!js_SetPropertyHelper(cx, obj, id, true, &rval)) goto error; } else { if (!OBJ_SET_PROPERTY(cx, obj, id, &rval)) @@ -4916,7 +4887,7 @@ js_Interpret(JSContext *cx) END_CASE(JSOP_GETELEM) BEGIN_CASE(JSOP_CALLELEM) - ELEMENT_OP(-1, js_GetMethod(cx, obj, id, 0, &rval)); + ELEMENT_OP(-1, js_GetMethod(cx, obj, id, false, &rval)); #if JS_HAS_NO_SUCH_METHOD if (JS_UNLIKELY(JSVAL_IS_VOID(rval))) { regs.sp[-2] = regs.sp[-1]; @@ -5370,7 +5341,7 @@ js_Interpret(JSContext *cx) } else { sprop = (JSScopeProperty *)prop; do_native_get: - NATIVE_GET(cx, obj, obj2, sprop, JSGET_METHOD_BARRIER, &rval); + NATIVE_GET(cx, obj, obj2, sprop, &rval); OBJ_DROP_PROPERTY(cx, obj2, (JSProperty *) sprop); } @@ -5951,7 +5922,7 @@ js_Interpret(JSContext *cx) sprop = (JSScopeProperty *) prop; if ((sprop->attrs & JSPROP_PERMANENT) && SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj)) && - SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && + SPROP_HAS_STUB_GETTER(sprop) && SPROP_HAS_STUB_SETTER(sprop)) { /* * Fast globals use frame variables to map the global @@ -6245,64 +6216,23 @@ js_Interpret(JSContext *cx) obj = FUN_OBJECT(fun); if (FUN_NULL_CLOSURE(fun)) { - parent = fp->scopeChain; - - if (OBJ_GET_PARENT(cx, obj) == parent) { - JSScope *scope; - - lval = FETCH_OPND(-1); - op = JSOp(regs.pc[JSOP_LAMBDA_LENGTH]); - - /* - * Optimize ({method: function () { ... }, ...}) and - * this.method = function () { ... }; bytecode sequences. - * - * Note that we jump to the entry points for JSOP_SETPROP - * and JSOP_INITPROP without calling the trace recorder, - * because the record hooks for those ops are essentially - * no-ops (this can't change given the predictive shape - * guarding the recorder must do). - */ - if (op == JSOP_SETMETHOD) { -#ifdef DEBUG - op2 = JSOp(regs.pc[JSOP_LAMBDA_LENGTH + JSOP_SETMETHOD_LENGTH]); - JS_ASSERT(op2 == JSOP_POP || op2 == JSOP_POPV); -#endif - - if (JSVAL_IS_OBJECT(lval) && - (obj2 = JSVAL_TO_OBJECT(lval)) && - OBJ_GET_CLASS(cx, obj2) == &js_ObjectClass) { - scope = OBJ_SCOPE(obj2); - if (scope->object == obj2) { - PUSH_OPND(OBJECT_TO_JSVAL(obj)); - regs.pc += JSOP_LAMBDA_LENGTH; - goto do_setprop; - } - } - } else if (op == JSOP_INITMETHOD) { - JS_ASSERT(!JSVAL_IS_PRIMITIVE(lval)); - obj2 = JSVAL_TO_OBJECT(lval); - scope = OBJ_SCOPE(obj2); - - /* - * JSOP_NEWINIT gave the new object it created (obj2 - * here) its own scope. - */ - JS_ASSERT(scope->object == obj2); - PUSH_OPND(OBJECT_TO_JSVAL(obj)); - regs.pc += JSOP_LAMBDA_LENGTH; - goto do_initprop; - } - } + obj = js_CloneFunctionObject(cx, fun, fp->scopeChain); + if (!obj) + goto error; } else { parent = js_GetScopeChain(cx, fp); if (!parent) goto error; - } - obj = js_CloneFunctionObject(cx, fun, parent); - if (!obj) - goto error; + /* + * FIXME: bug 471214, Cloning here even when the compiler saw + * the right parent is wasteful but we don't fully support + * joined function objects, yet. + */ + obj = js_CloneFunctionObject(cx, fun, parent); + if (!obj) + goto error; + } PUSH_OPND(OBJECT_TO_JSVAL(obj)); END_CASE(JSOP_LAMBDA) @@ -6449,26 +6379,11 @@ js_Interpret(JSContext *cx) BEGIN_CASE(JSOP_NEWINIT) i = GET_INT8(regs.pc); JS_ASSERT(i == JSProto_Array || i == JSProto_Object); - if (i == JSProto_Array) { - obj = js_NewArrayObject(cx, 0, NULL); - if (!obj) - goto error; - } else { - obj = js_NewObject(cx, &js_ObjectClass, NULL, NULL); - if (!obj) - goto error; - - if (regs.pc[JSOP_NEWINIT_LENGTH] != JSOP_ENDINIT) { - JS_LOCK_OBJ(cx, obj); - JSScope *scope = js_GetMutableScope(cx, obj); - if (!scope) { - JS_UNLOCK_OBJ(cx, obj); - goto error; - } - JS_UNLOCK_SCOPE(cx, scope); - } - } - + obj = (i == JSProto_Array) + ? js_NewArrayObject(cx, 0, NULL) + : js_NewObject(cx, &js_ObjectClass, NULL, NULL); + if (!obj) + goto error; PUSH_OPND(OBJECT_TO_JSVAL(obj)); fp->sharpDepth++; CHECK_INTERRUPT_HANDLER(); @@ -6486,8 +6401,6 @@ js_Interpret(JSContext *cx) END_CASE(JSOP_ENDINIT) BEGIN_CASE(JSOP_INITPROP) - BEGIN_CASE(JSOP_INITMETHOD) - do_initprop: /* Load the property's initial value into rval. */ JS_ASSERT(regs.sp - StackBase(fp) >= 2); rval = FETCH_OPND(-1); @@ -6508,7 +6421,6 @@ js_Interpret(JSContext *cx) JS_LOCK_OBJ(cx, obj); scope = OBJ_SCOPE(obj); - JS_ASSERT(scope->object == obj); JS_ASSERT(!scope->sealed()); kshape = scope->shape; cache = &JS_PROPERTY_CACHE(cx); @@ -6538,6 +6450,14 @@ js_Interpret(JSContext *cx) if (!SPROP_HAS_STUB_SETTER(sprop)) goto do_initprop_miss; + if (!scope->owned()) { + scope = js_GetMutableScope(cx, obj); + if (!scope) { + JS_UNLOCK_OBJ(cx, obj); + goto error; + } + } + /* * Detect a repeated property name and force a miss to * share the strict warning code and cope with complexity @@ -6588,11 +6508,7 @@ js_Interpret(JSContext *cx) scope->lastProp = sprop; } - /* - * No LOCKED_OBJ_WRITE_BARRIER because here we are adding a - * new property, not updating an existing slot's value that - * might contain a method of a branded scope. - */ + LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, rval); TRACE_2(SetPropHit, entry, sprop); LOCKED_OBJ_SET_SLOT(obj, slot, rval); JS_UNLOCK_SCOPE(cx, scope); @@ -6613,16 +6529,12 @@ js_Interpret(JSContext *cx) goto error; } - uintN defineHow = (op == JSOP_INITMETHOD) - ? JSDNP_CACHE_RESULT | JSDNP_SET_METHOD - : JSDNP_CACHE_RESULT; if (!(JS_UNLIKELY(atom == cx->runtime->atomState.protoAtom) - ? js_SetPropertyHelper(cx, obj, id, defineHow, &rval) + ? js_SetPropertyHelper(cx, obj, id, true, &rval) : js_DefineNativeProperty(cx, obj, id, rval, NULL, NULL, JSPROP_ENUMERATE, 0, 0, NULL, - defineHow))) { + JSDNP_CACHE_RESULT))) goto error; - } } while (0); /* Common tail for property cache hit and miss cases. */ diff --git a/js/src/jsiter.cpp b/js/src/jsiter.cpp index b87662baf415..a9c3473f9775 100644 --- a/js/src/jsiter.cpp +++ b/js/src/jsiter.cpp @@ -376,7 +376,7 @@ js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp) *vp = OBJECT_TO_JSVAL(iterobj); } else { atom = cx->runtime->atomState.iteratorAtom; - if (!js_GetMethod(cx, obj, ATOM_TO_JSID(atom), 0, vp)) + if (!js_GetMethod(cx, obj, ATOM_TO_JSID(atom), false, vp)) goto bad; if (JSVAL_IS_VOID(*vp)) { default_iter: diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp index 8e6fc5ee6f33..bf5f72b4e808 100644 --- a/js/src/jsobj.cpp +++ b/js/src/jsobj.cpp @@ -418,7 +418,7 @@ MarkSharpObjects(JSContext *cx, JSObject *obj, JSIdArray **idap) JSScopeProperty *sprop = (JSScopeProperty *) prop; val = JSVAL_NULL; if (attrs & JSPROP_GETTER) - val = sprop->getterValue(); + val = js_CastAsObjectJSVal(sprop->getter); if (attrs & JSPROP_SETTER) { if (val != JSVAL_NULL) { /* Mark the getter, then set val to setter. */ @@ -426,7 +426,7 @@ MarkSharpObjects(JSContext *cx, JSObject *obj, JSIdArray **idap) NULL) != NULL); } - val = sprop->setterValue(); + val = js_CastAsObjectJSVal(sprop->setter); } } else { ok = OBJ_GET_PROPERTY(cx, obj, id, &val); @@ -782,7 +782,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) (attrs & (JSPROP_GETTER | JSPROP_SETTER))) { JSScopeProperty *sprop = (JSScopeProperty *) prop; if (attrs & JSPROP_GETTER) { - val[valcnt] = sprop->getterValue(); + val[valcnt] = js_CastAsObjectJSVal(sprop->getter); gsopold[valcnt] = ATOM_TO_STRING(cx->runtime->atomState.getterAtom); gsop[valcnt] = @@ -791,7 +791,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) valcnt++; } if (attrs & JSPROP_SETTER) { - val[valcnt] = sprop->setterValue(); + val[valcnt] = js_CastAsObjectJSVal(sprop->setter); gsopold[valcnt] = ATOM_TO_STRING(cx->runtime->atomState.setterAtom); gsop[valcnt] = @@ -1905,7 +1905,7 @@ obj_lookupGetter(JSContext *cx, uintN argc, jsval *vp) if (OBJ_IS_NATIVE(pobj)) { sprop = (JSScopeProperty *) prop; if (sprop->attrs & JSPROP_GETTER) - *vp = sprop->getterValue(); + *vp = js_CastAsObjectJSVal(sprop->getter); } OBJ_DROP_PROPERTY(cx, pobj, prop); } @@ -1930,7 +1930,7 @@ obj_lookupSetter(JSContext *cx, uintN argc, jsval *vp) if (OBJ_IS_NATIVE(pobj)) { sprop = (JSScopeProperty *) prop; if (sprop->attrs & JSPROP_SETTER) - *vp = sprop->setterValue(); + *vp = js_CastAsObjectJSVal(sprop->setter); } OBJ_DROP_PROPERTY(cx, pobj, prop); } @@ -3557,8 +3557,6 @@ js_AddNativeProperty(JSContext *cx, JSObject *obj, jsid id, JSScope *scope; JSScopeProperty *sprop; - JS_ASSERT(!(flags & SPROP_IS_METHOD)); - /* * Purge the property cache of now-shadowed id in obj's scope chain. Do * this optimistically (assuming no failure below) before locking obj, so @@ -3640,7 +3638,7 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, JSScopeProperty *sprop; JSBool added; - JS_ASSERT((defineHow & ~(JSDNP_CACHE_RESULT | JSDNP_DONT_PURGE | JSDNP_SET_METHOD)) == 0); + JS_ASSERT((defineHow & ~(JSDNP_CACHE_RESULT | JSDNP_DONT_PURGE)) == 0); js_LeaveTraceIfGlobalObject(cx, obj); /* Convert string indices to integers if appropriate. */ @@ -3712,12 +3710,10 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, /* Use the object's class getter and setter by default. */ clasp = LOCKED_OBJ_GET_CLASS(obj); - if (!(defineHow & JSDNP_SET_METHOD)) { - if (!getter) - getter = clasp->getProperty; - if (!setter) - setter = clasp->setProperty; - } + if (!getter) + getter = clasp->getProperty; + if (!setter) + setter = clasp->setProperty; /* Get obj's own scope if it has one, or create a new one for obj. */ scope = js_GetMutableScope(cx, obj); @@ -3729,16 +3725,6 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, /* Add a new property, or replace an existing one of the same id. */ if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES) attrs |= JSPROP_SHARED; - - if (defineHow & JSDNP_SET_METHOD) { - JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_ObjectClass); - JS_ASSERT(VALUE_IS_FUNCTION(cx, value)); - JS_ASSERT(!(attrs & (JSPROP_GETTER | JSPROP_SETTER))); - JS_ASSERT(!getter && !setter); - flags |= SPROP_IS_METHOD; - getter = js_CastAsPropertyOp(JSVAL_TO_OBJECT(value)); - } - sprop = scope->add(cx, id, getter, setter, SPROP_INVALID_SLOT, attrs, flags, shortid); if (!sprop) @@ -3747,12 +3733,8 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, } /* Store value before calling addProperty, in case the latter GC's. */ - if (SPROP_HAS_VALID_SLOT(sprop, scope)) { - if (added) - LOCKED_OBJ_SET_SLOT(obj, sprop->slot, value); - else - LOCKED_OBJ_WRITE_BARRIER(cx, obj, sprop->slot, value); - } + if (SPROP_HAS_VALID_SLOT(sprop, scope)) + LOCKED_OBJ_WRITE_SLOT(cx, obj, sprop->slot, value); /* XXXbe called with lock held */ ADD_PROPERTY_HELPER(cx, clasp, obj, scope, sprop, &value, @@ -4156,7 +4138,7 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id) JSBool js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, - JSScopeProperty *sprop, uintN getHow, jsval *vp) + JSScopeProperty *sprop, jsval *vp) { js_LeaveTraceIfGlobalObject(cx, pobj); @@ -4175,20 +4157,17 @@ js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, ? LOCKED_OBJ_GET_SLOT(pobj, slot) : JSVAL_VOID; if (SPROP_HAS_STUB_GETTER(sprop)) - return true; - - if (JS_UNLIKELY(sprop->isMethod()) && !(getHow & JSGET_METHOD_BARRIER)) - return true; + return JS_TRUE; sample = cx->runtime->propertyRemovals; JS_UNLOCK_SCOPE(cx, scope); JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr); JS_PUSH_TEMP_ROOT_OBJECT(cx, pobj, &tvr2); - ok = sprop->get(cx, obj, vp); + ok = js_GetSprop(cx, sprop, obj, vp); JS_POP_TEMP_ROOT(cx, &tvr2); JS_POP_TEMP_ROOT(cx, &tvr); if (!ok) - return false; + return JS_FALSE; JS_LOCK_SCOPE(cx, scope); if (SLOT_IN_SCOPE(slot, scope) && @@ -4197,7 +4176,7 @@ js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, LOCKED_OBJ_SET_SLOT(pobj, slot, *vp); } - return true; + return JS_TRUE; } JSBool @@ -4241,7 +4220,7 @@ js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp) sample = cx->runtime->propertyRemovals; JS_UNLOCK_SCOPE(cx, scope); JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr); - ok = sprop->set(cx, obj, vp); + ok = js_SetSprop(cx, sprop, obj, vp); JS_POP_TEMP_ROOT(cx, &tvr); if (!ok) return JS_FALSE; @@ -4258,7 +4237,7 @@ js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp) } JSBool -js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, +js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, jsval *vp) { JSObject *aobj, *obj2; @@ -4266,8 +4245,7 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, JSProperty *prop; JSScopeProperty *sprop; - JS_ASSERT_IF(getHow & JSGET_CACHE_RESULT, !JS_ON_TRACE(cx)); - + JS_ASSERT_IF(cacheResult, !JS_ON_TRACE(cx)); /* Convert string indices to integers if appropriate. */ id = js_CheckForStringIndex(id); @@ -4282,7 +4260,7 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, if (!OBJ_GET_CLASS(cx, obj)->getProperty(cx, obj, ID_TO_VALUE(id), vp)) return JS_FALSE; - PCMETER(getHow & JSGET_CACHE_RESULT && JS_PROPERTY_CACHE(cx).nofills++); + PCMETER(cacheResult && JS_PROPERTY_CACHE(cx).nofills++); /* * Give a strict warning if foo.bar is evaluated by a script for an @@ -4343,12 +4321,12 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, sprop = (JSScopeProperty *) prop; - if (getHow & JSGET_CACHE_RESULT) { + if (cacheResult) { JS_ASSERT_NOT_ON_TRACE(cx); js_FillPropertyCache(cx, aobj, 0, protoIndex, obj2, sprop, false); } - if (!js_NativeGet(cx, obj, obj2, sprop, getHow, vp)) + if (!js_NativeGet(cx, obj, obj2, sprop, vp)) return JS_FALSE; JS_UNLOCK_OBJ(cx, obj2); @@ -4358,19 +4336,20 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, JSBool js_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp) { - return js_GetPropertyHelper(cx, obj, id, JSGET_METHOD_BARRIER, vp); + return js_GetPropertyHelper(cx, obj, id, false, vp); } JSBool -js_GetMethod(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp) +js_GetMethod(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, + jsval *vp) { JSAutoResolveFlags rf(cx, JSRESOLVE_QUALIFIED); if (obj->map->ops == &js_ObjectOps || obj->map->ops->getProperty == js_GetProperty) { - return js_GetPropertyHelper(cx, obj, id, getHow, vp); + return js_GetPropertyHelper(cx, obj, id, cacheResult, vp); } - JS_ASSERT_IF(getHow & JSGET_CACHE_RESULT, OBJ_IS_DENSE_ARRAY(cx, obj)); + JS_ASSERT_IF(cacheResult, OBJ_IS_DENSE_ARRAY(cx, obj)); #if JS_HAS_XML_SUPPORT if (OBJECT_IS_XML(cx, obj)) return js_GetXMLMethod(cx, obj, id, vp); @@ -4401,11 +4380,10 @@ js_CheckUndeclaredVarAssignment(JSContext *cx) /* * Note: all non-error exits in this function must notify the tracer using - * SetPropHit when called from the interpreter, which is detected by testing - * (defineHow & JSDNP_CACHE_RESULT). + * SetPropHit when called from the interpreter loop (cacheResult is true). */ JSBool -js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, +js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, jsval *vp) { int protoIndex; @@ -4419,8 +4397,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, JSPropertyOp getter, setter; bool added; - JS_ASSERT((defineHow & ~(JSDNP_CACHE_RESULT | JSDNP_SET_METHOD)) == 0); - if (defineHow & JSDNP_CACHE_RESULT) + if (cacheResult) JS_ASSERT_NOT_ON_TRACE(cx); /* Convert string indices to integers if appropriate. */ @@ -4493,8 +4470,8 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, if (attrs & JSPROP_READONLY) { if (!JS_HAS_STRICT_OPTION(cx)) { /* Just return true per ECMA if not in strict mode. */ - PCMETER((defineHow & JSDNP_CACHE_RESULT) && JS_PROPERTY_CACHE(cx).rofills++); - if (defineHow & JSDNP_CACHE_RESULT) + PCMETER(cacheResult && JS_PROPERTY_CACHE(cx).rofills++); + if (cacheResult) TRACE_2(SetPropHit, JS_NO_PROP_CACHE_FILL, sprop); return JS_TRUE; error: // TRACE_2 jumps here in case of error. @@ -4507,10 +4484,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, goto read_only_error; } - if (pobj == obj) { - if (!(defineHow & JSDNP_SET_METHOD) != !sprop->isMethod()) - sprop = NULL; - } else { + if (pobj != obj) { /* * We found id in a prototype object: prepare to share or shadow. * @@ -4522,7 +4496,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, /* Don't clone a shared prototype property. */ if (attrs & JSPROP_SHARED) { - if (defineHow & JSDNP_CACHE_RESULT) { + if (cacheResult) { JSPropCacheEntry *entry; entry = js_FillPropertyCache(cx, obj, 0, protoIndex, pobj, sprop, false); TRACE_2(SetPropHit, entry, sprop); @@ -4533,7 +4507,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, return JS_TRUE; } - return sprop->set(cx, obj, vp); + return js_SetSprop(cx, sprop, obj, vp); } /* Restore attrs to the ECMA default for new properties. */ @@ -4580,23 +4554,8 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, JS_UNLOCK_OBJ(cx, obj); return JS_FALSE; } - if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES) attrs |= JSPROP_SHARED; - - /* - * Check for Object class here to avoid defining a method on a class - * with magic resolve, addProperty, getProperty, etc. hooks. - */ - if ((defineHow & JSDNP_SET_METHOD) && - LOCKED_OBJ_GET_CLASS(obj) == &js_ObjectClass) { - JS_ASSERT(VALUE_IS_FUNCTION(cx, *vp)); - JS_ASSERT(!(attrs & (JSPROP_GETTER | JSPROP_SETTER))); - flags |= SPROP_IS_METHOD; - getter = JS_EXTENSION (JSPropertyOp) JSVAL_TO_OBJECT(*vp); - setter = NULL; - } - sprop = scope->add(cx, id, getter, setter, SPROP_INVALID_SLOT, attrs, flags, shortid); if (!sprop) { @@ -4620,7 +4579,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, added = true; } - if (defineHow & JSDNP_CACHE_RESULT) { + if (cacheResult) { JSPropCacheEntry *entry; entry = js_FillPropertyCache(cx, obj, 0, 0, obj, sprop, added); TRACE_2(SetPropHit, entry, sprop); @@ -4799,7 +4758,7 @@ js_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp) } if (sprop && - SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && + SPROP_HAS_STUB_GETTER(sprop) && SPROP_HAS_VALID_SLOT(sprop, scope)) { jsval fval = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); @@ -5589,7 +5548,7 @@ js_TryMethod(JSContext *cx, JSObject *obj, JSAtom *atom, older = JS_SetErrorReporter(cx, NULL); id = ATOM_TO_JSID(atom); fval = JSVAL_VOID; - ok = js_GetMethod(cx, obj, id, 0, &fval); + ok = js_GetMethod(cx, obj, id, false, &fval); if (!ok) JS_ClearPendingException(cx); JS_SetErrorReporter(cx, older); diff --git a/js/src/jsobj.h b/js/src/jsobj.h index c553f3c37501..57058c9f1d6a 100644 --- a/js/src/jsobj.h +++ b/js/src/jsobj.h @@ -664,9 +664,6 @@ js_DefineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, */ const uintN JSDNP_CACHE_RESULT = 1; /* an interpreter call from JSOP_INITPROP */ const uintN JSDNP_DONT_PURGE = 2; /* suppress js_PurgeScopeChain */ -const uintN JSDNP_SET_METHOD = 4; /* js_{DefineNativeProperty,SetPropertyHelper} - must pass the SPROP_IS_METHOD flag on to - js_AddScopeProperty */ extern JSBool js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, @@ -715,17 +712,6 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id); extern JSObject * js_FindVariableScope(JSContext *cx, JSFunction **funp); -/* - * JSGET_CACHE_RESULT is the analogue of JSDNP_CACHE_RESULT for js_GetMethod. - * - * JSGET_METHOD_BARRIER enables a read barrier that preserves standard function - * object semantics (by default we assume our caller won't leak a joined callee - * to script, where it would create hazardous mutable object sharing as well as - * observable identity according to == and ===. - */ -const uintN JSGET_CACHE_RESULT = 1; // call from a caching interpreter opcode -const uintN JSGET_METHOD_BARRIER = 2; // caller may leak shared function object - /* * NB: js_NativeGet and js_NativeSet are called with the scope containing sprop * (pobj's scope for Get, obj's for Set) locked, and on successful return, that @@ -734,20 +720,21 @@ const uintN JSGET_METHOD_BARRIER = 2; // caller may leak shared function object */ extern JSBool js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, - JSScopeProperty *sprop, uintN getHow, jsval *vp); + JSScopeProperty *sprop, jsval *vp); extern JSBool js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp); extern JSBool -js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, +js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, jsval *vp); extern JSBool js_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp); extern JSBool -js_GetMethod(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp); +js_GetMethod(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, + jsval *vp); /* * Check whether it is OK to assign an undeclared property of the global @@ -757,7 +744,7 @@ extern JS_FRIEND_API(JSBool) js_CheckUndeclaredVarAssignment(JSContext *cx); extern JSBool -js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, +js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, JSBool cacheResult, jsval *vp); extern JSBool diff --git a/js/src/jsopcode.cpp b/js/src/jsopcode.cpp index a2c63c9b0cbb..28376ee0076b 100644 --- a/js/src/jsopcode.cpp +++ b/js/src/jsopcode.cpp @@ -3823,7 +3823,6 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) goto do_getprop; case JSOP_SETPROP: - case JSOP_SETMETHOD: LOAD_ATOM(0); GET_QUOTE_AND_FMT("%s[%s] %s= %s", "%s.%s %s= %s", xval); rval = POP_STR(); @@ -4485,7 +4484,6 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop) break; case JSOP_INITPROP: - case JSOP_INITMETHOD: LOAD_ATOM(0); xval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), (jschar) diff --git a/js/src/jsopcode.tbl b/js/src/jsopcode.tbl index 13129d2fb3e5..6981a1edf309 100644 --- a/js/src/jsopcode.tbl +++ b/js/src/jsopcode.tbl @@ -582,16 +582,10 @@ OPDEF(JSOP_OBJTOP, 227,"objtop", NULL, 3, 0, 0, 0, JOF_UINT16 OPDEF(JSOP_LOOP, 228, "loop", NULL, 1, 0, 0, 0, JOF_BYTE) /* - * Debugger versions of JSOP_{GET,CALL}UPVAR and the flat closure (_FC) ops. + * Debugger versions of JSOP_{GET,CALL}UPVAR. */ OPDEF(JSOP_GETUPVAR_DBG, 229,"getupvar_dbg", NULL, 3, 0, 1, 19, JOF_UINT16|JOF_NAME) OPDEF(JSOP_CALLUPVAR_DBG, 230,"callupvar_dbg", NULL, 3, 0, 2, 19, JOF_UINT16|JOF_NAME|JOF_CALLOP) OPDEF(JSOP_DEFFUN_DBGFC, 231,"deffun_dbgfc", NULL, 3, 0, 0, 0, JOF_OBJECT|JOF_DECLARING) OPDEF(JSOP_DEFLOCALFUN_DBGFC,232,"deflocalfun_dbgfc",NULL, 5, 0, 0, 0, JOF_SLOTOBJECT|JOF_DECLARING) OPDEF(JSOP_LAMBDA_DBGFC, 233,"lambda_dbgfc", NULL, 3, 0, 1, 19, JOF_OBJECT) - -/* - * Joined function object as method optimization support. - */ -OPDEF(JSOP_SETMETHOD, 234,"setmethod", NULL, 3, 2, 1, 3, JOF_ATOM|JOF_PROP|JOF_SET|JOF_DETECTING) -OPDEF(JSOP_INITMETHOD, 235,"initprop", NULL, 3, 1, 0, 3, JOF_ATOM|JOF_PROP|JOF_SET|JOF_DETECTING) diff --git a/js/src/jsparse.cpp b/js/src/jsparse.cpp index cc2d2213c7ab..35cd45dd9d63 100644 --- a/js/src/jsparse.cpp +++ b/js/src/jsparse.cpp @@ -5508,18 +5508,6 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) pn->pn_type = TOK_SEMI; pn->pn_pos = pn2->pn_pos; pn->pn_kid = pn2; - - /* - * Specialize JSOP_SETPROP into JSOP_SETMETHOD to defer or avoid null - * closure cloning. Do this here rather than in AssignExpr as only now - * do we know that the uncloned (unjoined in ES3 terms) function object - * result of the assignment expression can't escape. - */ - if (PN_TYPE(pn2) == TOK_ASSIGN && PN_OP(pn2) == JSOP_NOP && - PN_OP(pn2->pn_left) == JSOP_SETPROP && - PN_OP(pn2->pn_right) == JSOP_LAMBDA) { - pn2->pn_left->pn_op = JSOP_SETMETHOD; - } break; } diff --git a/js/src/jsscope.cpp b/js/src/jsscope.cpp index 3d691274b563..02791e40cff0 100644 --- a/js/src/jsscope.cpp +++ b/js/src/jsscope.cpp @@ -444,13 +444,12 @@ js_HashScopeProperty(JSDHashTable *table, const void *key) /* Accumulate from least to most random so the low bits are most random. */ hash = 0; - JS_ASSERT_IF(sprop->isMethod(), !sprop->setter); gsop = sprop->getter; if (gsop) - hash = JS_ROTATE_LEFT32(hash, 4) ^ jsword(gsop); + hash = JS_ROTATE_LEFT32(hash, 4) ^ (jsword)gsop; gsop = sprop->setter; if (gsop) - hash = JS_ROTATE_LEFT32(hash, 4) ^ jsword(gsop); + hash = JS_ROTATE_LEFT32(hash, 4) ^ (jsword)gsop; hash = JS_ROTATE_LEFT32(hash, 4) ^ (sprop->flags & ~SPROP_FLAGS_NOT_MATCHED); @@ -1055,9 +1054,6 @@ JSScope::add(JSContext *cx, jsid id, JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, this)); CHECK_ANCESTOR_LINE(this, true); - JS_ASSERT_IF(attrs & JSPROP_GETTER, getter); - JS_ASSERT_IF(attrs & JSPROP_SETTER, setter); - /* * You can't add properties to a sealed scope. But note well that you can * change property attributes in a sealed scope, even though that replaces @@ -1073,17 +1069,10 @@ JSScope::add(JSContext *cx, jsid id, * Normalize stub getter and setter values for faster is-stub testing in * the SPROP_CALL_[GS]ETTER macros. */ + if (getter == JS_PropertyStub) + getter = NULL; if (setter == JS_PropertyStub) setter = NULL; - if (flags & SPROP_IS_METHOD) { - /* Here, getter is the method, a function object reference. */ - JS_ASSERT(getter); - JS_ASSERT(!setter); - JS_ASSERT(!(attrs & (JSPROP_GETTER | JSPROP_SETTER))); - } else { - if (getter == JS_PropertyStub) - getter = NULL; - } /* * Search for id in order to claim its entry, allocating a property tree @@ -1365,9 +1354,6 @@ JSScope::add(JSContext *cx, jsid id, if (js_IdIsIndex(sprop->id, &index)) setIndexedProperties(); - if (sprop->isMethod()) - setMethodBarrier(); - METER(adds); return sprop; @@ -1669,11 +1655,11 @@ JSScopeProperty::trace(JSTracer *trc) if (attrs & (JSPROP_GETTER | JSPROP_SETTER)) { if (attrs & JSPROP_GETTER) { JS_SET_TRACING_DETAILS(trc, PrintPropertyGetterOrSetter, this, 0); - JS_CallTracer(trc, getterObject(), JSTRACE_OBJECT); + JS_CallTracer(trc, js_CastAsObject(getter), JSTRACE_OBJECT); } if (attrs & JSPROP_SETTER) { JS_SET_TRACING_DETAILS(trc, PrintPropertyGetterOrSetter, this, 1); - JS_CallTracer(trc, setterObject(), JSTRACE_OBJECT); + JS_CallTracer(trc, js_CastAsObject(setter), JSTRACE_OBJECT); } } #endif /* JS_HAS_GETTER_SETTER */ diff --git a/js/src/jsscope.h b/js/src/jsscope.h index 346f6e368eff..211b88c9014b 100644 --- a/js/src/jsscope.h +++ b/js/src/jsscope.h @@ -1,5 +1,5 @@ /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- - * vim: set ts=8 sw=4 et tw=99: + * vim: set ts=8 sw=4 et tw=78: * * ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 @@ -286,13 +286,12 @@ struct JSScope { BRANDED = 0x0004, INDEXED_PROPERTIES = 0x0008, OWN_SHAPE = 0x0010, - METHOD_BARRIER = 0x0020, /* * This flag toggles with each shape-regenerating GC cycle. * See JSRuntime::gcRegenShapesScopeFlag. */ - SHAPE_REGEN = 0x0040 + SHAPE_REGEN = 0x0020 }; bool hadMiddleDelete() { return flags & MIDDLE_DELETE; } @@ -323,42 +322,6 @@ struct JSScope { bool hasRegenFlag(uint8 regenFlag) { return (flags & SHAPE_REGEN) == regenFlag; } - /* - * A scope has a method barrier when some compiler-created "null closure" - * function objects (functions that do not use lexical bindings above their - * scope, only free variable names) that have a correct JSSLOT_PARENT value - * thanks to the COMPILE_N_GO optimization are stored as newly added direct - * property values. - * - * The de-facto standard JS language requires each evaluation of such a - * closure to result in a unique (according to === and observable effects) - * function object. ES3 tried to allow implementations to "join" such - * objects to a single compiler-created object, but this makes an overt - * mutation hazard, also an "identity hazard" against interoperation among - * implementations that join and do not join. - * - * To stay compatible with the de-facto standard, we store the compiler- - * created function object as the method value, set the METHOD_BARRIER - * flag, and brand the scope with a predictable shape that reflects its - * method values, which are cached and traced without being loaded, based - * on shape-qualified cache hit logic and equivalent trace guards. See - * BRANDED above. - * - * This means scope->hasMethodBarrier() => scope->branded(), but of course - * not the other way around. - * - * Then when reading from a scope for which scope->hasMethodBarrier() is - * true, we count on the scope's qualified/guarded shape being unique and - * add a read barrier that clones the compiler-created function object on - * demand, reshaping the scope. - * - * This read barrier is bypassed when evaluating the callee sub-expression - * of a call expression (see the JOF_CALLOP opcodes in jsopcode.tbl), since - * such ops do not present an identity or mutation hazard. - */ - bool hasMethodBarrier() { return flags & METHOD_BARRIER; } - void setMethodBarrier() { flags |= METHOD_BARRIER | BRANDED; } - bool owned() { return object != NULL; } }; @@ -400,8 +363,7 @@ js_CastAsPropertyOp(JSObject *object) struct JSScopeProperty { jsid id; /* int-tagged jsval/untagged JSAtom* */ JSPropertyOp getter; /* getter and setter hooks or objects */ - JSPropertyOp setter; /* getter is JSObject* and setter is 0 - if sprop->isMethod() */ + JSPropertyOp setter; uint32 slot; /* abstract index in object slots */ uint8 attrs; /* attributes, see jsapi.h JSPROP_* */ uint8 flags; /* flags, see below for defines */ @@ -411,53 +373,6 @@ struct JSScopeProperty { to many-kids data structure */ uint32 shape; /* property cache shape identifier */ -/* Bits stored in sprop->flags. */ -#define SPROP_MARK 0x01 -#define SPROP_IS_ALIAS 0x02 -#define SPROP_HAS_SHORTID 0x04 -#define SPROP_FLAG_SHAPE_REGEN 0x08 -#define SPROP_IS_METHOD 0x10 - - bool isMethod() const { - return flags & SPROP_IS_METHOD; - } - JSObject *method() const { - JS_ASSERT(isMethod()); - return js_CastAsObject(getter); - } - jsval methodValue() const { - JS_ASSERT(isMethod()); - return js_CastAsObjectJSVal(getter); - } - - bool hasGetter() const { - return attrs & JSPROP_GETTER; - } - JSObject *getterObject() const { - JS_ASSERT(hasGetter()); - return js_CastAsObject(getter); - } - jsval getterValue() const { - JS_ASSERT(hasGetter()); - return js_CastAsObjectJSVal(getter); - } - - bool hasSetter() const { - return attrs & JSPROP_SETTER; - } - JSObject *setterObject() const { - JS_ASSERT(hasSetter()); - return js_CastAsObject(setter); - } - jsval setterValue() const { - JS_ASSERT(hasSetter()); - return js_CastAsObjectJSVal(setter); - } - - bool methodBarrier(JSContext *cx, JSObject *obj, jsval *vp); - bool get(JSContext* cx, JSObject* obj, jsval* vp); - bool set(JSContext* cx, JSObject* obj, jsval* vp); - void trace(JSTracer *trc); }; @@ -493,6 +408,12 @@ JSScope::has(JSScopeProperty *sprop) return lookup(sprop->id) == sprop; } +/* Bits stored in sprop->flags. */ +#define SPROP_MARK 0x01 +#define SPROP_IS_ALIAS 0x02 +#define SPROP_HAS_SHORTID 0x04 +#define SPROP_FLAG_SHAPE_REGEN 0x08 + /* * If SPROP_HAS_SHORTID is set in sprop->flags, we use sprop->shortid rather * than id when calling sprop's getter or setter. @@ -509,9 +430,6 @@ JSScope::has(JSScopeProperty *sprop) #define SPROP_HAS_STUB_GETTER(sprop) (!(sprop)->getter) #define SPROP_HAS_STUB_SETTER(sprop) (!(sprop)->setter) -#define SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) \ - (SPROP_HAS_STUB_GETTER(sprop) || (sprop)->isMethod()) - #ifndef JS_THREADSAFE # define js_GenerateShape(cx, gcLocked) js_GenerateShape (cx) #endif @@ -633,47 +551,15 @@ JSScope::trace(JSTracer *trc) } -/* - * Read barrier for deferred cloning of compiler-created function objects - * optimized as typically non-escaping, ad-hoc methods in obj. - */ -JS_ALWAYS_INLINE bool -JSScopeProperty::methodBarrier(JSContext *cx, JSObject *obj, jsval *vp) +static JS_INLINE bool +js_GetSprop(JSContext* cx, JSScopeProperty* sprop, JSObject* obj, jsval* vp) { - JSScope *scope = OBJ_SCOPE(obj); -#ifdef JS_THREADSAFE - JS_ASSERT(scope->title.ownercx == cx); -#endif + JS_ASSERT(!SPROP_HAS_STUB_GETTER(sprop)); - if (scope->hasMethodBarrier()) { - JSObject *funobj = JSVAL_TO_OBJECT(*vp); - JSFunction *fun = GET_FUNCTION_PRIVATE(cx, funobj); - - if (FUN_OBJECT(fun) == funobj && FUN_INTERPRETED(fun)) { - funobj = js_CloneFunctionObject(cx, fun, OBJ_GET_PARENT(cx, funobj)); - if (!funobj) - return false; - *vp = OBJECT_TO_JSVAL(funobj); - return js_SetPropertyHelper(cx, obj, id, 0, vp); - } - } - return true; -} - -JS_ALWAYS_INLINE bool -JSScopeProperty::get(JSContext* cx, JSObject* obj, jsval* vp) -{ - JS_ASSERT(!SPROP_HAS_STUB_GETTER(this)); - - if (attrs & JSPROP_GETTER) { - JS_ASSERT(!isMethod()); - jsval fval = getterValue(); - return js_InternalGetOrSet(cx, obj, id, fval, JSACC_READ, 0, 0, vp); - } - - if (isMethod()) { - *vp = methodValue(); - return methodBarrier(cx, obj, vp); + if (sprop->attrs & JSPROP_GETTER) { + jsval fval = js_CastAsObjectJSVal(sprop->getter); + return js_InternalGetOrSet(cx, obj, sprop->id, fval, JSACC_READ, + 0, 0, vp); } /* @@ -684,28 +570,30 @@ JSScopeProperty::get(JSContext* cx, JSObject* obj, jsval* vp) */ if (STOBJ_GET_CLASS(obj) == &js_WithClass) obj = obj->map->ops->thisObject(cx, obj); - return getter(cx, obj, SPROP_USERID(this), vp); + return sprop->getter(cx, obj, SPROP_USERID(sprop), vp); } -JS_ALWAYS_INLINE bool -JSScopeProperty::set(JSContext* cx, JSObject* obj, jsval* vp) +static JS_INLINE bool +js_SetSprop(JSContext* cx, JSScopeProperty* sprop, JSObject* obj, jsval* vp) { - JS_ASSERT_IF(SPROP_HAS_STUB_SETTER(this), attrs & JSPROP_GETTER); + JS_ASSERT(!(SPROP_HAS_STUB_SETTER(sprop) && + !(sprop->attrs & JSPROP_GETTER))); - if (attrs & JSPROP_SETTER) { - jsval fval = setterValue(); - return js_InternalGetOrSet(cx, obj, id, fval, JSACC_WRITE, 1, vp, vp); + if (sprop->attrs & JSPROP_SETTER) { + jsval fval = js_CastAsObjectJSVal(sprop->setter); + return js_InternalGetOrSet(cx, obj, (sprop)->id, fval, JSACC_WRITE, + 1, vp, vp); } - if (attrs & JSPROP_GETTER) { + if (sprop->attrs & JSPROP_GETTER) { js_ReportGetterOnlyAssignment(cx); return JS_FALSE; } - /* See the comment in JSScopeProperty::get as to why we can check for With. */ + /* See the comment in js_GetSprop as to why we can check for 'with'. */ if (STOBJ_GET_CLASS(obj) == &js_WithClass) obj = obj->map->ops->thisObject(cx, obj); - return setter(cx, obj, SPROP_USERID(this), vp); + return sprop->setter(cx, obj, SPROP_USERID(sprop), vp); } /* Macro for common expression to test for shared permanent attributes. */ diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 4f142149bfa6..90c0fd2bd082 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -2636,9 +2636,8 @@ TraceRecorder::isValidSlot(JSScope* scope, JSScopeProperty* sprop) if (sprop->attrs & JSPROP_READONLY) ABORT_TRACE_RV("writing to a read-only property", false); } - /* This check applies even when setflags == 0. */ - if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)) + if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop)) ABORT_TRACE_RV("non-stub getter", false); if (!SPROP_HAS_VALID_SLOT(sprop, scope)) @@ -4101,7 +4100,7 @@ TraceRecorder::hasMethod(JSObject* obj, jsid id) JSScope* scope = OBJ_SCOPE(pobj); JSScopeProperty* sprop = (JSScopeProperty*) prop; - if (SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && + if (SPROP_HAS_STUB_GETTER(sprop) && SPROP_HAS_VALID_SLOT(sprop, scope)) { jsval v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); if (VALUE_IS_FUNCTION(cx, v)) { @@ -10486,33 +10485,32 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32& slot, LIns*& v_ins) if (setflags && (sprop->attrs & JSPROP_READONLY)) ABORT_TRACE("writing to a readonly property"); if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop)) { - if (setflags == 0) { - // FIXME 450335: generalize this away from regexp built-in getters. - if (sprop->getter == js_RegExpClass.getProperty && - sprop->shortid < 0) { - if (sprop->shortid == REGEXP_LAST_INDEX) - ABORT_TRACE("can't trace RegExp.lastIndex yet"); - LIns* args[] = { INS_CONSTPTR(sprop), obj_ins, cx_ins }; - v_ins = lir->insCall(&js_CallGetter_ci, args); - guard(false, lir->ins2(LIR_eq, v_ins, INS_CONST(JSVAL_ERROR_COOKIE)), OOM_EXIT); - - /* - * BIG FAT WARNING: This snapshot cannot be a BRANCH_EXIT, since - * the value to the top of the stack is not the value we unbox. - */ - unbox_jsval((sprop->shortid == REGEXP_SOURCE) ? JSVAL_STRING : JSVAL_BOOLEAN, - v_ins, - snapshot(MISMATCH_EXIT)); - return JSRS_CONTINUE; - } - if (sprop->getter == js_StringClass.getProperty && - sprop->id == ATOM_KEY(cx->runtime->atomState.lengthAtom)) { - if (!guardClass(obj, obj_ins, &js_StringClass, snapshot(MISMATCH_EXIT))) - ABORT_TRACE("can't trace String.length on non-String objects"); - LIns* str_ins = stobj_get_private(obj_ins, JSVAL_TAGMASK); - v_ins = lir->ins1(LIR_i2f, getStringLength(str_ins)); - return JSRS_CONTINUE; - } + // FIXME 450335: generalize this away from regexp built-in getters. + if (setflags == 0 && + sprop->getter == js_RegExpClass.getProperty && + sprop->shortid < 0) { + if (sprop->shortid == REGEXP_LAST_INDEX) + ABORT_TRACE("can't trace RegExp.lastIndex yet"); + LIns* args[] = { INS_CONSTPTR(sprop), obj_ins, cx_ins }; + v_ins = lir->insCall(&js_CallGetter_ci, args); + guard(false, lir->ins2(LIR_eq, v_ins, INS_CONST(JSVAL_ERROR_COOKIE)), OOM_EXIT); + /* + * BIG FAT WARNING: This snapshot cannot be a BRANCH_EXIT, since + * the value to the top of the stack is not the value we unbox. + */ + unbox_jsval((sprop->shortid == REGEXP_SOURCE) ? JSVAL_STRING : JSVAL_BOOLEAN, + v_ins, + snapshot(MISMATCH_EXIT)); + return JSRS_CONTINUE; + } + if (setflags == 0 && + sprop->getter == js_StringClass.getProperty && + sprop->id == ATOM_KEY(cx->runtime->atomState.lengthAtom)) { + if (!guardClass(obj, obj_ins, &js_StringClass, snapshot(MISMATCH_EXIT))) + ABORT_TRACE("can't trace String.length on non-String objects"); + LIns* str_ins = stobj_get_private(obj_ins, JSVAL_TAGMASK); + v_ins = lir->ins1(LIR_i2f, getStringLength(str_ins)); + return JSRS_CONTINUE; } ABORT_TRACE("non-stub getter"); } @@ -11864,9 +11862,7 @@ TraceRecorder::record_JSOP_CALLPROP() } else if (JSVAL_TAG(l) == JSVAL_BOOLEAN) { if (l == JSVAL_VOID) ABORT_TRACE("callprop on void"); - guard(false, - lir->ins2i(LIR_eq, get(&l), JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID)), - MISMATCH_EXIT); + guard(false, lir->ins2i(LIR_eq, get(&l), JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID)), MISMATCH_EXIT); i = JSProto_Boolean; debug_only_stmt(protoname = "Boolean.prototype";) } else { @@ -12378,18 +12374,6 @@ DBG_STUB(JSOP_DEFFUN_DBGFC) DBG_STUB(JSOP_DEFLOCALFUN_DBGFC) DBG_STUB(JSOP_LAMBDA_DBGFC) -JS_REQUIRES_STACK JSRecordingStatus -TraceRecorder::record_JSOP_SETMETHOD() -{ - return record_JSOP_SETPROP(); -} - -JS_REQUIRES_STACK JSRecordingStatus -TraceRecorder::record_JSOP_INITMETHOD() -{ - return record_JSOP_INITPROP(); -} - #ifdef JS_JIT_SPEW /* Prints information about entry typemaps and unstable exits for all peers at a PC */ void diff --git a/js/src/jstypes.h b/js/src/jstypes.h index 17183e24c5f6..3a5a958ba8b6 100644 --- a/js/src/jstypes.h +++ b/js/src/jstypes.h @@ -456,14 +456,6 @@ typedef JSUintPtr JSUword; # define JS_DATA_TO_FUNC_PTR(type, ptr) ((type) (void *) (ptr)) #endif -#ifdef __GNUC__ -# define JS_EXTENSION __extension__ -# define JS_EXTENSION_(s) __extension__ ({ s; }) -#else -# define JS_EXTENSION -# define JS_EXTENSION_(s) s -#endif - JS_END_EXTERN_C #endif /* jstypes_h___ */ diff --git a/js/src/jsxdrapi.h b/js/src/jsxdrapi.h index 930d7aa74144..f20bc57b3ee9 100644 --- a/js/src/jsxdrapi.h +++ b/js/src/jsxdrapi.h @@ -204,7 +204,7 @@ JS_XDRFindClassById(JSXDRState *xdr, uint32 id); * before deserialization of bytecode. If the saved version does not match * the current version, abort deserialization and invalidate the file. */ -#define JSXDR_BYTECODE_VERSION (0xb973c0de - 50) +#define JSXDR_BYTECODE_VERSION (0xb973c0de - 49) /* * Library-private functions. From cbeb5ce96991b210ce18253bbb15f77473d5a23a Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Wed, 29 Jul 2009 07:48:06 -0500 Subject: [PATCH 18/19] Bug 504520 - TM: a >= b misbehaves if a and b are both Infinity at record time. r=Waldo. --HG-- extra : rebase_source : 3debe2d9be81aa923e8d94081b189fd577a21287 --- js/src/jstracer.cpp | 20 +++++++------------- js/src/trace-test.js | 45 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 13 deletions(-) diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 90c0fd2bd082..5eb10f455d2e 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -7106,24 +7106,24 @@ TraceRecorder::incElem(jsint incr, bool pre) } static bool -evalCmp(LOpcode op, double result) +evalCmp(LOpcode op, double l, double r) { bool cond; switch (op) { case LIR_feq: - cond = (result == 0); + cond = (l == r); break; case LIR_flt: - cond = result < 0; + cond = l < r; break; case LIR_fgt: - cond = result > 0; + cond = l > r; break; case LIR_fle: - cond = result <= 0; + cond = l <= r; break; case LIR_fge: - cond = result >= 0; + cond = l >= r; break; default: JS_NOT_REACHED("unexpected comparison op"); @@ -7132,18 +7132,12 @@ evalCmp(LOpcode op, double result) return cond; } -static bool -evalCmp(LOpcode op, double l, double r) -{ - return evalCmp(op, l - r); -} - static bool evalCmp(LOpcode op, JSString* l, JSString* r) { if (op == LIR_feq) return js_EqualStrings(l, r); - return evalCmp(op, js_CompareStrings(l, r)); + return evalCmp(op, js_CompareStrings(l, r), 0); } JS_REQUIRES_STACK void diff --git a/js/src/trace-test.js b/js/src/trace-test.js index 5525267faef4..00c2dc121483 100644 --- a/js/src/trace-test.js +++ b/js/src/trace-test.js @@ -3660,6 +3660,51 @@ function testComparisons() testComparisons.expected = "no failures reported!"; test(testComparisons); +function testBug504520() { + // A bug involving comparisons. + var arr = [1/0, 1/0, 1/0, 1/0, 1/0, 0]; + assertEq(arr.length > RUNLOOP, true); + + var s = ''; + for (var i = 0; i < arr.length; i++) + arr[i] >= 1/0 ? null : (s += i); + assertEq(s, '5'); +} +test(testBug504520); + +function testBug504520Harder() { + // test 1024 similar cases + var vals = [1/0, -1/0, 0, 0/0]; + var ops = ["===", "!==", "==", "!=", "<", ">", "<=", ">="]; + for each (var x in vals) { + for each (var y in vals) { + for each (var op in ops) { + for each (var z in vals) { + // Assume eval is correct. This depends on the global + // Infinity property not having been reassigned. + var xz = eval(x + op + z); + var yz = eval(y + op + z); + + var arr = [x, x, x, x, x, y]; + assertEq(arr.length > RUNLOOP, true); + var expected = [xz, xz, xz, xz, xz, yz]; + + // ?: looks superfluous but that's what we're testing here + var fun = eval( + '(function (arr, results) {\n' + + ' for (let i = 0; i < arr.length; i++)\n' + + ' results.push(arr[i]' + op + z + ' ? "true" : "false");\n' + + '});\n'); + var actual = []; + fun(arr, actual); + assertEq("" + actual, "" + expected); + } + } + } + } +} +test(testBug504520Harder); + function testCaseAbort() { var four = "4"; From 6e5e35f0ae00157744e3a842b7bab08199be9ef9 Mon Sep 17 00:00:00 2001 From: Jason Orendorff Date: Wed, 29 Jul 2009 11:58:19 -0500 Subject: [PATCH 19/19] Bug 506982 - Fix up jstracer style. r=Waldo. --- js/src/jsarray.h | 4 + js/src/jsbuiltins.h | 3 + js/src/jsdate.h | 4 + js/src/jsobj.h | 4 + js/src/jstracer.cpp | 1740 ++++++++++++++++++++++++------------------- js/src/jstracer.h | 46 +- 6 files changed, 1023 insertions(+), 778 deletions(-) diff --git a/js/src/jsarray.h b/js/src/jsarray.h index 4244a6017ad4..52a365fe3e91 100644 --- a/js/src/jsarray.h +++ b/js/src/jsarray.h @@ -207,6 +207,10 @@ JSBool js_GetDenseArrayElementValue(JSContext *cx, JSObject *obj, JSProperty *prop, jsval *vp); +/* Array constructor native. Exposed only so the JIT can know its address. */ +JSBool +js_Array(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval); + JS_END_EXTERN_C #endif /* jsarray_h___ */ diff --git a/js/src/jsbuiltins.h b/js/src/jsbuiltins.h index 96a1c904f34f..197cd580d145 100644 --- a/js/src/jsbuiltins.h +++ b/js/src/jsbuiltins.h @@ -415,6 +415,9 @@ js_BooleanOrUndefinedToNumber(JSContext* cx, int32 unboxed); extern JS_FRIEND_API(void) js_SetTraceableNativeFailed(JSContext *cx); +extern jsdouble FASTCALL +js_dmod(jsdouble a, jsdouble b); + #else #define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, cse, fold) diff --git a/js/src/jsdate.h b/js/src/jsdate.h index 85e49320422e..238994e2e09f 100644 --- a/js/src/jsdate.h +++ b/js/src/jsdate.h @@ -124,6 +124,10 @@ typedef uint32 JSIntervalTime; extern JS_FRIEND_API(JSIntervalTime) js_IntervalNow(); +/* Date constructor native. Exposed only so the JIT can know its address. */ +JSBool +js_Date(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); + JS_END_EXTERN_C #endif /* jsdate_h___ */ diff --git a/js/src/jsobj.h b/js/src/jsobj.h index 57058c9f1d6a..4bc58c64e90a 100644 --- a/js/src/jsobj.h +++ b/js/src/jsobj.h @@ -914,6 +914,10 @@ JS_FRIEND_API(void) js_DumpStackFrame(JSStackFrame *fp); extern uintN js_InferFlags(JSContext *cx, uintN defaultFlags); +/* Object constructor native. Exposed only so the JIT can know its address. */ +JSBool +js_Object(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); + JS_END_EXTERN_C #endif /* jsobj_h___ */ diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index 5eb10f455d2e..7a899089cf97 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -80,6 +80,9 @@ #include "jsautooplen.h" // generated headers last #include "imacros.c.out" +using namespace avmplus; +using namespace nanojit; + #if JS_HAS_XML_SUPPORT #define ABORT_IF_XML(v) \ JS_BEGIN_MACRO \ @@ -87,12 +90,14 @@ ABORT_TRACE("xml detected"); \ JS_END_MACRO #else -#define ABORT_IF_XML(cx, v) ((void) 0) +#define ABORT_IF_XML(v) ((void) 0) #endif -/* Never use JSVAL_IS_BOOLEAN because it restricts the value (true, false) and - the type. What you want to use is JSVAL_TAG(x) == JSVAL_BOOLEAN and then - handle the undefined case properly (bug 457363). */ +/* + * Never use JSVAL_IS_BOOLEAN because it restricts the value (true, false) and + * the type. What you want to use is JSVAL_TAG(x) == JSVAL_BOOLEAN and then + * handle the undefined case properly (bug 457363). + */ #undef JSVAL_IS_BOOLEAN #define JSVAL_IS_BOOLEAN(x) JS_STATIC_ASSERT(0) @@ -104,8 +109,10 @@ static const char tagChar[] = "OIDISIBI"; /* Blacklist parameters. */ -/* Number of iterations of a loop where we start tracing. That is, we don't - start tracing until the beginning of the HOTLOOP-th iteration. */ +/* + * Number of iterations of a loop where we start tracing. That is, we don't + * start tracing until the beginning of the HOTLOOP-th iteration. + */ #define HOTLOOP 2 /* Attempt recording this many times before blacklisting permanently. */ @@ -147,12 +154,12 @@ static const char tagChar[] = "OIDISIBI"; #define CHECK_STATUS(expr) \ JS_BEGIN_MACRO \ JSRecordingStatus _status = (expr); \ - if (_status != JSRS_CONTINUE) \ + if (_status != JSRS_CONTINUE) \ return _status; \ JS_END_MACRO #ifdef JS_JIT_SPEW -#define ABORT_TRACE_RV(msg, value) \ +#define ABORT_TRACE_RV(msg, value) \ JS_BEGIN_MACRO \ debug_only_printf(LC_TMAbort, "abort: %d: %s\n", __LINE__, (msg)); \ return (value); \ @@ -250,38 +257,42 @@ js_InitJITStatsClass(JSContext *cx, JSObject *glob) #define INS_CONSTWORD(v) addName(lir->insImmPtr((void *) v), #v) #define INS_VOID() INS_CONST(JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID)) -using namespace avmplus; -using namespace nanojit; - static GC gc = GC(); static avmplus::AvmCore s_core = avmplus::AvmCore(); static avmplus::AvmCore* core = &s_core; #ifdef JS_JIT_SPEW -void -js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc); +static void +DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc); #endif -/* We really need a better way to configure the JIT. Shaver, where is - my fancy JIT object? */ -/* NB: this is raced on, if jstracer.cpp should ever be running MT. - I think it's harmless tho. */ +/* + * We really need a better way to configure the JIT. Shaver, where is + * my fancy JIT object? + * + * NB: this is raced on, if jstracer.cpp should ever be running MT. + * I think it's harmless tho. + */ static bool did_we_check_processor_features = false; /* ------ Debug logging control ------ */ -/* All the logging control stuff lives in here. It is shared between - all threads, but I think that's OK. */ +/* + * All the logging control stuff lives in here. It is shared between + * all threads, but I think that's OK. + */ LogControl js_LogController; #ifdef JS_JIT_SPEW -/* NB: this is raced on too, if jstracer.cpp should ever be running MT. - Also harmless. */ +/* + * NB: this is raced on too, if jstracer.cpp should ever be running MT. + * Also harmless. + */ static bool did_we_set_up_debug_logging = false; static void -js_InitJITLogController ( void ) +InitJITLogController() { char *tm, *tmf; uint32_t bits; @@ -298,6 +309,7 @@ js_InitJITLogController ( void ) if (strstr(tmf, "help")) goto help; bits = 0; + /* flags for jstracer.cpp */ if (strstr(tmf, "minimal")) bits |= LC_TMMinimal; if (strstr(tmf, "tracer")) bits |= LC_TMTracer; @@ -306,6 +318,7 @@ js_InitJITLogController ( void ) if (strstr(tmf, "abort")) bits |= LC_TMAbort; if (strstr(tmf, "stats")) bits |= LC_TMStats; if (strstr(tmf, "regexp")) bits |= LC_TMRegexp; + /* flags for nanojit */ if (strstr(tmf, "liveness")) bits |= LC_Liveness; if (strstr(tmf, "readlir")) bits |= LC_ReadLIR; @@ -377,8 +390,10 @@ getExitName(ExitType type) } #endif -/* The entire VM shares one oracle. Collisions and concurrent updates are tolerated and worst - case cause performance regressions. */ +/* + * The entire VM shares one oracle. Collisions and concurrent updates are + * tolerated and worst case cause performance regressions. + */ static Oracle oracle; Tracker::Tracker() @@ -462,17 +477,20 @@ Tracker::set(const void* v, LIns* i) p->map[(jsuword(v) & PAGEMASK) >> 2] = i; } -static inline jsuint argSlots(JSStackFrame* fp) +static inline jsuint +argSlots(JSStackFrame* fp) { return JS_MAX(fp->argc, fp->fun->nargs); } -static inline bool isNumber(jsval v) +static inline bool +isNumber(jsval v) { return JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v); } -static inline jsdouble asNumber(jsval v) +static inline jsdouble +asNumber(jsval v) { JS_ASSERT(isNumber(v)); if (JSVAL_IS_DOUBLE(v)) @@ -480,7 +498,8 @@ static inline jsdouble asNumber(jsval v) return (jsdouble)JSVAL_TO_INT(v); } -static inline bool isInt32(jsval v) +static inline bool +isInt32(jsval v) { if (!isNumber(v)) return false; @@ -489,7 +508,8 @@ static inline bool isInt32(jsval v) return JSDOUBLE_IS_INT(d, i); } -static inline jsint asInt32(jsval v) +static inline jsint +asInt32(jsval v) { JS_ASSERT(isNumber(v)); if (JSVAL_IS_INT(v)) @@ -502,7 +522,8 @@ static inline jsint asInt32(jsval v) } /* Return TT_DOUBLE for all numbers (int and double) and the tag otherwise. */ -static inline JSTraceType getPromotedType(jsval v) +static inline JSTraceType +GetPromotedType(jsval v) { if (JSVAL_IS_INT(v)) return TT_DOUBLE; @@ -522,7 +543,8 @@ static inline JSTraceType getPromotedType(jsval v) } /* Return TT_INT32 for all whole numbers that fit into signed 32-bit and the tag otherwise. */ -static inline JSTraceType getCoercedType(jsval v) +static inline JSTraceType +getCoercedType(jsval v) { if (isInt32(v)) return TT_INT32; @@ -541,32 +563,34 @@ static inline JSTraceType getCoercedType(jsval v) return JSTraceType(tag); } -/* - * Constant seed and accumulate step borrowed from the DJB hash. - */ +/* Constant seed and accumulate step borrowed from the DJB hash. */ -#define ORACLE_MASK (ORACLE_SIZE - 1) -#define FRAGMENT_TABLE_MASK (FRAGMENT_TABLE_SIZE - 1) -#define HASH_SEED 5381 +const uintptr_t ORACLE_MASK = ORACLE_SIZE - 1; +JS_STATIC_ASSERT((ORACLE_MASK & ORACLE_SIZE) == 0); + +const uintptr_t FRAGMENT_TABLE_MASK = FRAGMENT_TABLE_SIZE - 1; +JS_STATIC_ASSERT((FRAGMENT_TABLE_MASK & FRAGMENT_TABLE_SIZE) == 0); + +const uintptr_t HASH_SEED = 5381; static inline void -hash_accum(uintptr_t& h, uintptr_t i, uintptr_t mask) +HashAccum(uintptr_t& h, uintptr_t i, uintptr_t mask) { h = ((h << 5) + h + (mask & i)) & mask; } -JS_REQUIRES_STACK static inline int -stackSlotHash(JSContext* cx, unsigned slot) +static JS_REQUIRES_STACK inline int +StackSlotHash(JSContext* cx, unsigned slot) { uintptr_t h = HASH_SEED; - hash_accum(h, uintptr_t(cx->fp->script), ORACLE_MASK); - hash_accum(h, uintptr_t(cx->fp->regs->pc), ORACLE_MASK); - hash_accum(h, uintptr_t(slot), ORACLE_MASK); + HashAccum(h, uintptr_t(cx->fp->script), ORACLE_MASK); + HashAccum(h, uintptr_t(cx->fp->regs->pc), ORACLE_MASK); + HashAccum(h, uintptr_t(slot), ORACLE_MASK); return int(h); } -JS_REQUIRES_STACK static inline int -globalSlotHash(JSContext* cx, unsigned slot) +static JS_REQUIRES_STACK inline int +GlobalSlotHash(JSContext* cx, unsigned slot) { uintptr_t h = HASH_SEED; JSStackFrame* fp = cx->fp; @@ -574,15 +598,14 @@ globalSlotHash(JSContext* cx, unsigned slot) while (fp->down) fp = fp->down; - hash_accum(h, uintptr_t(fp->script), ORACLE_MASK); - hash_accum(h, uintptr_t(OBJ_SHAPE(JS_GetGlobalForObject(cx, fp->scopeChain))), - ORACLE_MASK); - hash_accum(h, uintptr_t(slot), ORACLE_MASK); + HashAccum(h, uintptr_t(fp->script), ORACLE_MASK); + HashAccum(h, uintptr_t(OBJ_SHAPE(JS_GetGlobalForObject(cx, fp->scopeChain))), ORACLE_MASK); + HashAccum(h, uintptr_t(slot), ORACLE_MASK); return int(h); } static inline int -pcHash(jsbytecode* pc) +PCHash(jsbytecode* pc) { return int(uintptr_t(pc) & ORACLE_MASK); } @@ -599,42 +622,42 @@ Oracle::Oracle() JS_REQUIRES_STACK void Oracle::markGlobalSlotUndemotable(JSContext* cx, unsigned slot) { - _globalDontDemote.set(&gc, globalSlotHash(cx, slot)); + _globalDontDemote.set(&gc, GlobalSlotHash(cx, slot)); } /* Consult with the oracle whether we shouldn't demote a certain global variable. */ JS_REQUIRES_STACK bool Oracle::isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const { - return _globalDontDemote.get(globalSlotHash(cx, slot)); + return _globalDontDemote.get(GlobalSlotHash(cx, slot)); } /* Tell the oracle that a certain slot at a certain stack slot should not be demoted. */ JS_REQUIRES_STACK void Oracle::markStackSlotUndemotable(JSContext* cx, unsigned slot) { - _stackDontDemote.set(&gc, stackSlotHash(cx, slot)); + _stackDontDemote.set(&gc, StackSlotHash(cx, slot)); } /* Consult with the oracle whether we shouldn't demote a certain slot. */ JS_REQUIRES_STACK bool Oracle::isStackSlotUndemotable(JSContext* cx, unsigned slot) const { - return _stackDontDemote.get(stackSlotHash(cx, slot)); + return _stackDontDemote.get(StackSlotHash(cx, slot)); } /* Tell the oracle that a certain slot at a certain bytecode location should not be demoted. */ void Oracle::markInstructionUndemotable(jsbytecode* pc) { - _pcDontDemote.set(&gc, pcHash(pc)); + _pcDontDemote.set(&gc, PCHash(pc)); } /* Consult with the oracle whether we shouldn't demote a certain bytecode location. */ bool Oracle::isInstructionUndemotable(jsbytecode* pc) const { - return _pcDontDemote.get(pcHash(pc)); + return _pcDontDemote.get(PCHash(pc)); } void @@ -645,7 +668,6 @@ Oracle::clearDemotability() _pcDontDemote.reset(); } - struct PCHashEntry : public JSDHashEntryStub { size_t count; }; @@ -653,7 +675,7 @@ struct PCHashEntry : public JSDHashEntryStub { #define PC_HASH_COUNT 1024 static void -js_Blacklist(jsbytecode* pc) +Blacklist(jsbytecode* pc) { AUDIT(blacklisted); JS_ASSERT(*pc == JSOP_LOOP || *pc == JSOP_NOP); @@ -661,7 +683,7 @@ js_Blacklist(jsbytecode* pc) } static void -js_Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree=NULL) +Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree = NULL) { JSDHashTable *table = &JS_TRACE_MONITOR(cx).recordAttempts; @@ -677,7 +699,7 @@ js_Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree=NULL) JS_ASSERT(JS_DHASH_ENTRY_IS_LIVE(&(entry->hdr))); if (entry->count++ > (BL_ATTEMPTS * MAXPEERS)) { entry->count = 0; - js_Blacklist(pc); + Blacklist(pc); return; } } @@ -693,12 +715,12 @@ js_Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree=NULL) * well. */ if (++tree->recordAttempts > BL_ATTEMPTS) - js_Blacklist(pc); + Blacklist(pc); } } static void -js_resetRecordingAttempts(JSContext *cx, jsbytecode* pc) +ResetRecordingAttempts(JSContext *cx, jsbytecode* pc) { JSDHashTable *table = &JS_TRACE_MONITOR(cx).recordAttempts; if (table->ops) { @@ -713,13 +735,13 @@ js_resetRecordingAttempts(JSContext *cx, jsbytecode* pc) } static inline size_t -fragmentHash(const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) +FragmentHash(const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { uintptr_t h = HASH_SEED; - hash_accum(h, uintptr_t(ip), FRAGMENT_TABLE_MASK); - hash_accum(h, uintptr_t(globalObj), FRAGMENT_TABLE_MASK); - hash_accum(h, uintptr_t(globalShape), FRAGMENT_TABLE_MASK); - hash_accum(h, uintptr_t(argc), FRAGMENT_TABLE_MASK); + HashAccum(h, uintptr_t(ip), FRAGMENT_TABLE_MASK); + HashAccum(h, uintptr_t(globalObj), FRAGMENT_TABLE_MASK); + HashAccum(h, uintptr_t(globalShape), FRAGMENT_TABLE_MASK); + HashAccum(h, uintptr_t(argc), FRAGMENT_TABLE_MASK); return size_t(h); } @@ -727,10 +749,10 @@ fragmentHash(const void *ip, JSObject* globalObj, uint32 globalShape, uint32 arg * argc is cx->fp->argc at the trace loop header, i.e., the number of arguments * pushed for the innermost JS frame. This is required as part of the fragment * key because the fragment will write those arguments back to the interpreter - * stack when it exits, using its typemap, which implicitly incorporates a given - * value of argc. Without this feature, a fragment could be called as an inner - * tree with two different values of argc, and entry type checking or exit - * frame synthesis could crash. + * stack when it exits, using its typemap, which implicitly incorporates a + * given value of argc. Without this feature, a fragment could be called as an + * inner tree with two different values of argc, and entry type checking or + * exit frame synthesis could crash. */ struct VMFragment : public Fragment { @@ -751,7 +773,7 @@ static VMFragment* getVMFragment(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { - size_t h = fragmentHash(ip, globalObj, globalShape, argc); + size_t h = FragmentHash(ip, globalObj, globalShape, argc); VMFragment* vf = tm->vmfragments[h]; while (vf && ! (vf->globalObj == globalObj && @@ -764,15 +786,13 @@ getVMFragment(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 gl } static VMFragment* -getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, - uint32 argc) +getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { return getVMFragment(tm, ip, globalObj, globalShape, argc); } static Fragment* -getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, - uint32 argc) +getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc) { VMFragment *f = new (&gc) VMFragment(ip, globalObj, globalShape, argc); JS_ASSERT(f); @@ -789,7 +809,7 @@ getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 global } else { /* this is the first fragment */ f->first = f; - size_t h = fragmentHash(ip, globalObj, globalShape, argc); + size_t h = FragmentHash(ip, globalObj, globalShape, argc); f->next = tm->vmfragments[h]; tm->vmfragments[h] = f; } @@ -801,9 +821,10 @@ getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 global #ifdef DEBUG static void -ensureTreeIsUnique(JSTraceMonitor* tm, VMFragment* f, TreeInfo* ti) +AssertTreeIsUnique(JSTraceMonitor* tm, VMFragment* f, TreeInfo* ti) { JS_ASSERT(f->root == f); + /* * Check for duplicate entry type maps. This is always wrong and hints at * trace explosion since we are trying to stabilize something without @@ -823,21 +844,16 @@ ensureTreeIsUnique(JSTraceMonitor* tm, VMFragment* f, TreeInfo* ti) #endif static void -js_AttemptCompilation(JSContext *cx, JSTraceMonitor* tm, JSObject* globalObj, jsbytecode* pc, - uint32 argc) +AttemptCompilation(JSContext *cx, JSTraceMonitor* tm, JSObject* globalObj, jsbytecode* pc, + uint32 argc) { - /* - * If we already permanently blacklisted the location, undo that. - */ + /* If we already permanently blacklisted the location, undo that. */ JS_ASSERT(*(jsbytecode*)pc == JSOP_NOP || *(jsbytecode*)pc == JSOP_LOOP); *(jsbytecode*)pc = JSOP_LOOP; - js_resetRecordingAttempts(cx, pc); + ResetRecordingAttempts(cx, pc); - /* - * Breath new live into all peer fragments at the designated loop header. - */ - Fragment* f = (VMFragment*)getLoop(tm, pc, globalObj, OBJ_SHAPE(globalObj), - argc); + /* Breathe new life into all peer fragments at the designated loop header. */ + Fragment* f = (VMFragment*)getLoop(tm, pc, globalObj, OBJ_SHAPE(globalObj), argc); if (!f) { /* * If the global object's shape changed, we can't easily find the @@ -861,7 +877,8 @@ js_AttemptCompilation(JSContext *cx, JSTraceMonitor* tm, JSObject* globalObj, js JS_DEFINE_CALLINFO_1(static, DOUBLE, i2f, INT32, 1, 1) JS_DEFINE_CALLINFO_1(static, DOUBLE, u2f, UINT32, 1, 1) -static bool isi2f(LInsp i) +static bool +isi2f(LIns* i) { if (i->isop(LIR_i2f)) return true; @@ -869,8 +886,7 @@ static bool isi2f(LInsp i) if (nanojit::AvmCore::config.soft_float && i->isop(LIR_qjoin) && i->oprnd1()->isop(LIR_call) && - i->oprnd2()->isop(LIR_callh)) - { + i->oprnd2()->isop(LIR_callh)) { if (i->oprnd1()->callInfo() == &i2f_ci) return true; } @@ -878,7 +894,8 @@ static bool isi2f(LInsp i) return false; } -static bool isu2f(LInsp i) +static bool +isu2f(LIns* i) { if (i->isop(LIR_u2f)) return true; @@ -886,8 +903,7 @@ static bool isu2f(LInsp i) if (nanojit::AvmCore::config.soft_float && i->isop(LIR_qjoin) && i->oprnd1()->isop(LIR_call) && - i->oprnd2()->isop(LIR_callh)) - { + i->oprnd2()->isop(LIR_callh)) { if (i->oprnd1()->callInfo() == &u2f_ci) return true; } @@ -895,19 +911,19 @@ static bool isu2f(LInsp i) return false; } -static LInsp iu2fArg(LInsp i) +static LIns* +iu2fArg(LIns* i) { if (nanojit::AvmCore::config.soft_float && - i->isop(LIR_qjoin)) - { + i->isop(LIR_qjoin)) { return i->oprnd1()->arg(0); } return i->oprnd1(); } - -static LIns* demote(LirWriter *out, LInsp i) +static LIns* +demote(LirWriter *out, LIns* i) { if (i->isCall()) return callArgN(i, 0); @@ -921,7 +937,8 @@ static LIns* demote(LirWriter *out, LInsp i) return out->insImm(ci); } -static bool isPromoteInt(LIns* i) +static bool +isPromoteInt(LIns* i) { if (isi2f(i) || i->isconst()) return true; @@ -931,7 +948,8 @@ static bool isPromoteInt(LIns* i) return d == jsdouble(jsint(d)) && !JSDOUBLE_IS_NEGZERO(d); } -static bool isPromoteUint(LIns* i) +static bool +isPromoteUint(LIns* i) { if (isu2f(i) || i->isconst()) return true; @@ -941,12 +959,14 @@ static bool isPromoteUint(LIns* i) return d == jsdouble(jsuint(d)) && !JSDOUBLE_IS_NEGZERO(d); } -static bool isPromote(LIns* i) +static bool +isPromote(LIns* i) { return isPromoteInt(i) || isPromoteUint(i); } -static bool isconst(LIns* i, int32_t c) +static bool +IsConst(LIns* i, int32_t c) { return i->isconst() && i->imm32() == c; } @@ -955,7 +975,8 @@ static bool isconst(LIns* i, int32_t c) * Determine whether this operand is guaranteed to not overflow the specified * integer operation. */ -static bool overflowSafe(LOpcode op, LIns* i) +static bool +IsOverflowSafe(LOpcode op, LIns* i) { LIns* c; switch (op) { @@ -1066,7 +1087,7 @@ public: { } - LInsp quadCall(const CallInfo *ci, LInsp args[]) { + LIns* quadCall(const CallInfo *ci, LIns* args[]) { LInsp qlo, qhi; qlo = out->insCall(ci, args); @@ -1074,7 +1095,7 @@ public: return out->qjoin(qlo, qhi); } - LInsp ins1(LOpcode v, LInsp s0) + LIns* ins1(LOpcode v, LIns* s0) { if (v == LIR_fneg) return quadCall(&fneg_ci, &s0); @@ -1088,10 +1109,10 @@ public: return out->ins1(v, s0); } - LInsp ins2(LOpcode v, LInsp s0, LInsp s1) + LIns* ins2(LOpcode v, LIns* s0, LIns* s1) { - LInsp args[2]; - LInsp bv; + LIns* args[2]; + LIns* bv; // change the numeric value and order of these LIR opcodes and die if (LIR_fadd <= v && v <= LIR_fdiv) { @@ -1116,10 +1137,10 @@ public: return out->ins2(v, s0, s1); } - LInsp insCall(const CallInfo *ci, LInsp args[]) + LIns* insCall(const CallInfo *ci, LIns* args[]) { // if the return type is ARGSIZE_F, we have - // to do a quadCall ( qjoin(call,callh) ) + // to do a quadCall(qjoin(call,callh)) if ((ci->_argtypes & 3) == ARGSIZE_F) return quadCall(ci, args); @@ -1135,7 +1156,7 @@ public: { } - LInsp ins2(LOpcode v, LInsp s0, LInsp s1) + LIns* ins2(LOpcode v, LIns* s0, LIns* s1) { if (s0 == s1 && v == LIR_feq) { if (isPromote(s0)) { @@ -1143,8 +1164,8 @@ public: return insImm(1); } if (s0->isop(LIR_fmul) || s0->isop(LIR_fsub) || s0->isop(LIR_fadd)) { - LInsp lhs = s0->oprnd1(); - LInsp rhs = s0->oprnd2(); + LIns* lhs = s0->oprnd1(); + LIns* rhs = s0->oprnd2(); if (isPromote(lhs) && isPromote(rhs)) { // add/sub/mul promoted ints can't be nan return insImm(1); @@ -1163,8 +1184,8 @@ public: return out->ins2(v, demote(out, s0), demote(out, s1)); } } else if (v == LIR_or && - s0->isop(LIR_lsh) && isconst(s0->oprnd2(), 16) && - s1->isop(LIR_and) && isconst(s1->oprnd2(), 0xffff)) { + s0->isop(LIR_lsh) && IsConst(s0->oprnd2(), 16) && + s1->isop(LIR_and) && IsConst(s1->oprnd2(), 0xffff)) { LIns* msw = s0->oprnd1(); LIns* lsw = s1->oprnd1(); LIns* x; @@ -1172,16 +1193,16 @@ public: if (lsw->isop(LIR_add) && lsw->oprnd1()->isop(LIR_and) && lsw->oprnd2()->isop(LIR_and) && - isconst(lsw->oprnd1()->oprnd2(), 0xffff) && - isconst(lsw->oprnd2()->oprnd2(), 0xffff) && + IsConst(lsw->oprnd1()->oprnd2(), 0xffff) && + IsConst(lsw->oprnd2()->oprnd2(), 0xffff) && msw->isop(LIR_add) && msw->oprnd1()->isop(LIR_add) && msw->oprnd2()->isop(LIR_rsh) && msw->oprnd1()->oprnd1()->isop(LIR_rsh) && msw->oprnd1()->oprnd2()->isop(LIR_rsh) && - isconst(msw->oprnd2()->oprnd2(), 16) && - isconst(msw->oprnd1()->oprnd1()->oprnd2(), 16) && - isconst(msw->oprnd1()->oprnd2()->oprnd2(), 16) && + IsConst(msw->oprnd2()->oprnd2(), 16) && + IsConst(msw->oprnd1()->oprnd1()->oprnd2(), 16) && + IsConst(msw->oprnd1()->oprnd2()->oprnd2(), 16) && (x = lsw->oprnd1()->oprnd1()) == msw->oprnd1()->oprnd1()->oprnd1() && (y = lsw->oprnd2()->oprnd1()) == msw->oprnd1()->oprnd2()->oprnd1() && lsw == msw->oprnd2()->oprnd1()) { @@ -1192,21 +1213,21 @@ public: return out->ins2(v, s0, s1); } - LInsp insCall(const CallInfo *ci, LInsp args[]) + LIns* insCall(const CallInfo *ci, LIns* args[]) { if (ci == &js_DoubleToUint32_ci) { - LInsp s0 = args[0]; + LIns* s0 = args[0]; if (s0->isconstq()) return out->insImm(js_DoubleToECMAUint32(s0->imm64f())); if (isi2f(s0) || isu2f(s0)) return iu2fArg(s0); } else if (ci == &js_DoubleToInt32_ci) { - LInsp s0 = args[0]; + LIns* s0 = args[0]; if (s0->isconstq()) return out->insImm(js_DoubleToECMAInt32(s0->imm64f())); if (s0->isop(LIR_fadd) || s0->isop(LIR_fsub)) { - LInsp lhs = s0->oprnd1(); - LInsp rhs = s0->oprnd2(); + LIns* lhs = s0->oprnd1(); + LIns* rhs = s0->oprnd2(); if (isPromote(lhs) && isPromote(rhs)) { LOpcode op = LOpcode(s0->opcode() & ~LIR64); return out->ins2(op, demote(out, lhs), demote(out, rhs)); @@ -1214,6 +1235,7 @@ public: } if (isi2f(s0) || isu2f(s0)) return iu2fArg(s0); + // XXX ARM -- check for qjoin(call(UnboxDouble),call(UnboxDouble)) if (s0->isCall()) { const CallInfo* ci2 = s0->callInfo(); @@ -1240,7 +1262,7 @@ public: } } } else if (ci == &js_BoxDouble_ci) { - LInsp s0 = args[0]; + LIns* s0 = args[0]; JS_ASSERT(s0->isQuad()); if (isPromoteInt(s0)) { LIns* args2[] = { demote(out, s0), args[1] }; @@ -1254,15 +1276,15 @@ public: }; /* - * Visit the values in the given JSStackFrame that the tracer cares about. This visitor - * function is (implicitly) the primary definition of the native stack area layout. There - * are a few other independent pieces of code that must be maintained to assume the same - * layout. They are marked like this: + * Visit the values in the given JSStackFrame that the tracer cares about. This + * visitor function is (implicitly) the primary definition of the native stack + * area layout. There are a few other independent pieces of code that must be + * maintained to assume the same layout. They are marked like this: * * Duplicate native stack layout computation: see VisitFrameSlots header comment. */ template -JS_REQUIRES_STACK static bool +static JS_REQUIRES_STACK bool VisitFrameSlots(Visitor &visitor, unsigned depth, JSStackFrame *fp, JSStackFrame *up) { @@ -1301,14 +1323,14 @@ VisitFrameSlots(Visitor &visitor, unsigned depth, JSStackFrame *fp, } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE bool +static JS_REQUIRES_STACK JS_ALWAYS_INLINE bool VisitStackSlots(Visitor &visitor, JSContext *cx, unsigned callDepth) { return VisitFrameSlots(visitor, callDepth, cx->fp, NULL); } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitGlobalSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj, unsigned ngslots, uint16 *gslots) { @@ -1321,7 +1343,7 @@ VisitGlobalSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj, class AdjustCallerTypeVisitor; template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitGlobalSlots(Visitor &visitor, JSContext *cx, SlotList &gslots) { VisitGlobalSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain), @@ -1330,7 +1352,7 @@ VisitGlobalSlots(Visitor &visitor, JSContext *cx, SlotList &gslots) template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor& visitor, JSContext* cx, JSObject* globalObj, unsigned callDepth, unsigned ngslots, uint16* gslots) { @@ -1339,7 +1361,7 @@ VisitSlots(Visitor& visitor, JSContext* cx, JSObject* globalObj, } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor& visitor, JSContext* cx, unsigned callDepth, unsigned ngslots, uint16* gslots) { @@ -1348,7 +1370,7 @@ VisitSlots(Visitor& visitor, JSContext* cx, unsigned callDepth, } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj, unsigned callDepth, const SlotList& slots) { @@ -1357,7 +1379,7 @@ VisitSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj, } template -JS_REQUIRES_STACK static JS_ALWAYS_INLINE void +static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor &visitor, JSContext *cx, unsigned callDepth, const SlotList& slots) { @@ -1417,16 +1439,21 @@ public: } }; -/* Calculate the total number of native frame slots we need from this frame - all the way back to the entry frame, including the current stack usage. */ +/* + * Calculate the total number of native frame slots we need from this frame all + * the way back to the entry frame, including the current stack usage. + */ JS_REQUIRES_STACK unsigned -js_NativeStackSlots(JSContext *cx, unsigned callDepth) +NativeStackSlots(JSContext *cx, unsigned callDepth) { JSStackFrame* fp = cx->fp; unsigned slots = 0; unsigned depth = callDepth; for (;;) { - /* Duplicate native stack layout computation: see VisitFrameSlots header comment. */ + /* + * Duplicate native stack layout computation: see VisitFrameSlots + * header comment. + */ unsigned operands = fp->regs->sp - StackBase(fp); slots += operands; if (fp->callee) @@ -1447,7 +1474,7 @@ js_NativeStackSlots(JSContext *cx, unsigned callDepth) if (missing > 0) slots += missing; } - JS_NOT_REACHED("js_NativeStackSlots"); + JS_NOT_REACHED("NativeStackSlots"); } class CaptureTypesVisitor : public SlotVisitorBase @@ -1504,7 +1531,7 @@ public: JS_REQUIRES_STACK void TypeMap::captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth) { - setLength(js_NativeStackSlots(cx, callDepth) + slots.length()); + setLength(NativeStackSlots(cx, callDepth) + slots.length()); CaptureTypesVisitor visitor(cx, data()); VisitSlots(visitor, cx, globalObj, callDepth, slots); JS_ASSERT(visitor.length() == length()); @@ -1530,10 +1557,13 @@ TypeMap::matches(TypeMap& other) const return !memcmp(data(), other.data(), length()); } -/* Use the provided storage area to create a new type map that contains the partial type map - with the rest of it filled up from the complete type map. */ +/* + * Use the provided storage area to create a new type map that contains the + * partial type map with the rest of it filled up from the complete type + * map. + */ static void -mergeTypeMaps(JSTraceType** partial, unsigned* plength, JSTraceType* complete, unsigned clength, JSTraceType* mem) +MergeTypeMaps(JSTraceType** partial, unsigned* plength, JSTraceType* complete, unsigned clength, JSTraceType* mem) { unsigned l = *plength; JS_ASSERT(l < clength); @@ -1545,7 +1575,7 @@ mergeTypeMaps(JSTraceType** partial, unsigned* plength, JSTraceType* complete, u /* Specializes a tree to any missing globals, including any dependent trees. */ static JS_REQUIRES_STACK void -specializeTreesToMissingGlobals(JSContext* cx, JSObject* globalObj, TreeInfo* root) +SpecializeTreesToMissingGlobals(JSContext* cx, JSObject* globalObj, TreeInfo* root) { TreeInfo* ti = root; @@ -1554,19 +1584,38 @@ specializeTreesToMissingGlobals(JSContext* cx, JSObject* globalObj, TreeInfo* ro for (unsigned i = 0; i < root->dependentTrees.length(); i++) { ti = (TreeInfo*)root->dependentTrees[i]->vmprivate; + /* ti can be NULL if we hit the recording tree in emitTreeCall; this is harmless. */ if (ti && ti->nGlobalTypes() < ti->globalSlots->length()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); } for (unsigned i = 0; i < root->linkedTrees.length(); i++) { ti = (TreeInfo*)root->linkedTrees[i]->vmprivate; if (ti && ti->nGlobalTypes() < ti->globalSlots->length()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); } } +static inline JSTraceType* +GetStackTypeMap(nanojit::SideExit* exit) +{ + return (JSTraceType*)(((VMSideExit*)exit) + 1); +} + +static inline JSTraceType* +GetGlobalTypeMap(nanojit::SideExit* exit) +{ + return GetStackTypeMap(exit) + ((VMSideExit*)exit)->numStackSlots; +} + +static inline JSTraceType* +GetFullTypeMap(nanojit::SideExit* exit) +{ + return GetStackTypeMap(exit); +} + static void -js_TrashTree(JSContext* cx, Fragment* f); +TrashTree(JSContext* cx, Fragment* f); JS_REQUIRES_STACK TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment, @@ -1636,7 +1685,7 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag /* If we came from exit, we might not have enough global types. */ if (ti->globalSlots->length() > ti->nGlobalTypes()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); /* read into registers all values on the stack and all globals we know so far */ import(treeInfo, lirbuf->sp, stackSlots, ngslots, callDepth, typeMap); @@ -1650,8 +1699,10 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _frag guard(true, lir->ins_eq0(x), snapshot(TIMEOUT_EXIT)); } - /* If we are attached to a tree call guard, make sure the guard the inner tree exited from - is what we expect it to be. */ + /* + * If we are attached to a tree call guard, make sure the guard the inner + * tree exited from is what we expect it to be. + */ if (_anchor && _anchor->exitType == NESTED_EXIT) { LIns* nested_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, lastTreeExitGuard)), @@ -1690,10 +1741,10 @@ TraceRecorder::~TraceRecorder() } if (trashSelf) - js_TrashTree(cx, fragment->root); + TrashTree(cx, fragment->root); for (unsigned int i = 0; i < whichTreesToTrash.length(); i++) - js_TrashTree(cx, whichTreesToTrash[i]); + TrashTree(cx, whichTreesToTrash[i]); } else if (wasRootFragment) { delete treeInfo; } @@ -1708,12 +1759,14 @@ TraceRecorder::~TraceRecorder() delete generatedTraceableNative; } -void TraceRecorder::removeFragmentoReferences() +void +TraceRecorder::removeFragmentoReferences() { fragment = NULL; } -void TraceRecorder::deepAbort() +void +TraceRecorder::deepAbort() { debug_only_print0(LC_TMTracer|LC_TMAbort, "deep abort"); deepAborted = true; @@ -1724,8 +1777,10 @@ inline LIns* TraceRecorder::addName(LIns* ins, const char* name) { #ifdef JS_JIT_SPEW - /* We'll only ask for verbose Nanojit when .lcbits > 0, so - there's no point in adding names otherwise. */ + /* + * We'll only ask for verbose Nanojit when .lcbits > 0, so there's no point + * in adding names otherwise. + */ if (js_LogController.lcbits > 0) lirbuf->names->addName(ins, name); #endif @@ -1739,7 +1794,7 @@ TraceRecorder::getCallDepth() const return callDepth; } -/* Determine the offset in the native global frame for a jsval we track */ +/* Determine the offset in the native global frame for a jsval we track. */ ptrdiff_t TraceRecorder::nativeGlobalOffset(jsval* p) const { @@ -1749,7 +1804,7 @@ TraceRecorder::nativeGlobalOffset(jsval* p) const return sizeof(InterpState) + ((p - globalObj->dslots) + JS_INITIAL_NSLOTS) * sizeof(double); } -/* Determine whether a value is a global stack slot */ +/* Determine whether a value is a global stack slot. */ bool TraceRecorder::isGlobal(jsval* p) const { @@ -1761,8 +1816,8 @@ TraceRecorder::isGlobal(jsval* p) const * Return the offset in the native stack for the given jsval. More formally, * |p| must be the address of a jsval that is represented in the native stack * area. The return value is the offset, from InterpState::stackBase, in bytes, - * where the native representation of |*p| is stored. To get the offset relative - * to InterpState::sp, subtract TreeInfo::nativeStackBase. + * where the native representation of |*p| is stored. To get the offset + * relative to InterpState::sp, subtract TreeInfo::nativeStackBase. */ JS_REQUIRES_STACK ptrdiff_t TraceRecorder::nativeStackOffset(jsval* p) const @@ -1770,9 +1825,10 @@ TraceRecorder::nativeStackOffset(jsval* p) const CountSlotsVisitor visitor(p); VisitStackSlots(visitor, cx, callDepth); size_t offset = visitor.count() * sizeof(double); + /* - * If it's not in a pending frame, it must be on the stack of the current frame above - * sp but below fp->slots + script->nslots. + * If it's not in a pending frame, it must be on the stack of the current + * frame above sp but below fp->slots + script->nslots. */ if (!visitor.stopped()) { JS_ASSERT(size_t(p - cx->fp->slots) < cx->fp->script->nslots); @@ -1781,8 +1837,7 @@ TraceRecorder::nativeStackOffset(jsval* p) const return offset; } -/* Track the maximum number of native frame slots we need during - execution. */ +/* Track the maximum number of native frame slots we need during execution. */ void TraceRecorder::trackNativeStackUse(unsigned slots) { @@ -1790,9 +1845,11 @@ TraceRecorder::trackNativeStackUse(unsigned slots) treeInfo->maxNativeStackSlots = slots; } -/* Unbox a jsval into a slot. Slots are wide enough to hold double values directly (instead of - storing a pointer to them). We now assert instead of type checking, the caller must ensure the - types are compatible. */ +/* + * Unbox a jsval into a slot. Slots are wide enough to hold double values + * directly (instead of storing a pointer to them). We assert instead of + * type checking. The caller must ensure the types are compatible. + */ static void ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) { @@ -1808,6 +1865,7 @@ ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) ? "null" : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name); return; + case TT_INT32: jsint i; if (JSVAL_IS_INT(v)) @@ -1818,6 +1876,7 @@ ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) JS_ASSERT(JSVAL_IS_INT(v)); debug_only_printf(LC_TMTracer, "int<%d> ", *(jsint*)slot); return; + case TT_DOUBLE: jsdouble d; if (JSVAL_IS_INT(v)) @@ -1828,25 +1887,30 @@ ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) *(jsdouble*)slot = d; debug_only_printf(LC_TMTracer, "double<%g> ", d); return; + case TT_JSVAL: JS_NOT_REACHED("found jsval type in an entry type map"); return; + case TT_STRING: JS_ASSERT(tag == JSVAL_STRING); *(JSString**)slot = JSVAL_TO_STRING(v); debug_only_printf(LC_TMTracer, "string<%p> ", (void*)(*(JSString**)slot)); return; + case TT_NULL: JS_ASSERT(tag == JSVAL_OBJECT); *(JSObject**)slot = NULL; debug_only_print0(LC_TMTracer, "null "); return; + case TT_PSEUDOBOOLEAN: /* Watch out for pseudo-booleans. */ JS_ASSERT(tag == JSVAL_BOOLEAN); *(JSBool*)slot = JSVAL_TO_PSEUDO_BOOLEAN(v); debug_only_printf(LC_TMTracer, "pseudoboolean<%d> ", *(JSBool*)slot); return; + case TT_FUNCTION: { JS_ASSERT(tag == JSVAL_OBJECT); JSObject* obj = JSVAL_TO_OBJECT(v); @@ -1866,8 +1930,10 @@ ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot) JS_NOT_REACHED("unexpected type"); } -/* We maintain an emergency pool of doubles so we can recover safely if a trace runs - out of memory (doubles or objects). */ +/* + * We maintain an emergency pool of doubles so we can recover safely if a trace + * runs out of memory (doubles or objects). + */ static jsval AllocateDoubleFromReservedPool(JSContext* cx) { @@ -1877,7 +1943,7 @@ AllocateDoubleFromReservedPool(JSContext* cx) } static bool -js_ReplenishReservedPool(JSContext* cx, JSTraceMonitor* tm) +ReplenishReservedPool(JSContext* cx, JSTraceMonitor* tm) { /* We should not be called with a full pool. */ JS_ASSERT((size_t) (tm->reservedDoublePoolPtr - tm->reservedDoublePool) < @@ -1923,9 +1989,11 @@ oom: return false; } -/* Box a value from the native stack back into the jsval format. Integers - that are too large to fit into a jsval are automatically boxed into - heap-allocated doubles. */ +/* + * Box a value from the native stack back into the jsval format. Integers that + * are too large to fit into a jsval are automatically boxed into + * heap-allocated doubles. + */ static void NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot) { @@ -1941,6 +2009,7 @@ NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot) ? "null" : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name); break; + case TT_INT32: i = *(jsint*)slot; debug_only_printf(LC_TMTracer, "int<%d> ", i); @@ -1957,8 +2026,10 @@ NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot) if (JSDOUBLE_IS_INT(d, i)) goto store_int; store_double: { - /* Its not safe to trigger the GC here, so use an emergency heap if we are out of - double boxes. */ + /* + * It's not safe to trigger the GC here, so use an emergency heap if we + * are out of double boxes. + */ if (cx->doubleFreeList) { #ifdef DEBUG JSBool ok = @@ -1972,25 +2043,30 @@ NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot) *JSVAL_TO_DOUBLE(v) = d; return; } + case TT_JSVAL: v = *(jsval*)slot; JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */ debug_only_printf(LC_TMTracer, "box<%p> ", (void*)v); break; + case TT_STRING: v = STRING_TO_JSVAL(*(JSString**)slot); debug_only_printf(LC_TMTracer, "string<%p> ", (void*)(*(JSString**)slot)); break; + case TT_NULL: JS_ASSERT(*(JSObject**)slot == NULL); v = JSVAL_NULL; debug_only_printf(LC_TMTracer, "null<%p> ", (void*)(*(JSObject**)slot)); break; + case TT_PSEUDOBOOLEAN: /* Watch out for pseudo-booleans. */ v = PSEUDO_BOOLEAN_TO_JSVAL(*(JSBool*)slot); debug_only_printf(LC_TMTracer, "boolean<%d> ", *(JSBool*)slot); break; + case TT_FUNCTION: { JS_ASSERT(HAS_FUNCTION_CLASS(*(JSObject**)slot)); v = OBJECT_TO_JSVAL(*(JSObject**)slot); @@ -2128,8 +2204,8 @@ FlushNativeGlobalFrame(JSContext *cx, double *global, unsigned ngslots, * callDepth Call depth of current point relative to trace entry */ template -JSTraceType JS_INLINE -js_GetUpvarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) +inline JSTraceType +GetUpvarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) { InterpState* state = cx->interpState; FrameInfo** fip = state->rp + callDepth; @@ -2191,9 +2267,9 @@ struct UpvarArgTraits { }; uint32 JS_FASTCALL -js_GetUpvarArgOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) +GetUpvarArgOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) { - return js_GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); + return GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); } // For this traits type, 'slot' is an index into the local slots array. @@ -2208,15 +2284,15 @@ struct UpvarVarTraits { }; uint32 JS_FASTCALL -js_GetUpvarVarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) +GetUpvarVarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) { - return js_GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); + return GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); } /* - * For this traits type, 'slot' is an index into the stack area (within slots, after nfixed) - * of a frame with no function. (On trace, the top-level frame is the only one that can have - * no function.) + * For this traits type, 'slot' is an index into the stack area (within slots, + * after nfixed) of a frame with no function. (On trace, the top-level frame is + * the only one that can have no function.) */ struct UpvarStackTraits { static jsval interp_get(JSStackFrame* fp, int32 slot) { @@ -2225,8 +2301,8 @@ struct UpvarStackTraits { static uint32 native_slot(uint32 argc, int32 slot) { /* - * Locals are not imported by the tracer when the frame has no function, so - * we do not add fp->script->nfixed. + * Locals are not imported by the tracer when the frame has no + * function, so we do not add fp->script->nfixed. */ JS_ASSERT(argc == 0); return slot; @@ -2234,9 +2310,9 @@ struct UpvarStackTraits { }; uint32 JS_FASTCALL -js_GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) +GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result) { - return js_GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); + return GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result); } /* @@ -2248,9 +2324,9 @@ js_GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 cal * callDepth callDepth of current point relative to trace entry. */ template -uint32 JS_INLINE -js_GetFromClosure(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, - double* result) +inline uint32 +GetFromClosure(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, + double* result) { JS_ASSERT(scopeIndex >= 1); JS_ASSERT(OBJ_GET_CLASS(cx, callee) == &js_FunctionClass); @@ -2298,32 +2374,32 @@ js_GetFromClosure(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slo struct ArgClosureTraits { - static JS_INLINE uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return fp->argc + slot; } - static JS_INLINE jsval* slots(JSStackFrame* fp) { return fp->argv; } + static inline uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return fp->argc + slot; } + static inline jsval* slots(JSStackFrame* fp) { return fp->argv; } private: ArgClosureTraits(); }; uint32 JS_FASTCALL -js_GetClosureArg(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, - double* result) +GetClosureArg(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, + double* result) { - return js_GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); + return GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); } struct VarClosureTraits { - static JS_INLINE uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return slot; } - static JS_INLINE jsval* slots(JSStackFrame* fp) { return fp->slots; } + static inline uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return slot; } + static inline jsval* slots(JSStackFrame* fp) { return fp->slots; } private: VarClosureTraits(); }; uint32 JS_FASTCALL -js_GetClosureVar(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, - double* result) +GetClosureVar(JSContext* cx, JSObject* callee, uint32 scopeIndex, uint32 slot, uint32 callDepth, + double* result) { - return js_GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); + return GetFromClosure(cx, callee, scopeIndex, slot, callDepth, result); } /** @@ -2362,6 +2438,7 @@ FlushNativeStackFrame(JSContext* cx, unsigned callDepth, JSTraceType* mp, double JS_ASSERT(n != 0); --n; } + // Skip over stopFrame itself. JS_ASSERT(n != 0); --n; @@ -2397,7 +2474,7 @@ FlushNativeStackFrame(JSContext* cx, unsigned callDepth, JSTraceType* mp, double * involves it calling the debugger hook. * * Allocating the Call object must not fail, so use an object - * previously reserved by js_ExecuteTree if needed. + * previously reserved by ExecuteTree if needed. */ void* hookData = ((JSInlineFrame*)fp)->hookData; ((JSInlineFrame*)fp)->hookData = NULL; @@ -2430,10 +2507,13 @@ TraceRecorder::import(LIns* base, ptrdiff_t offset, jsval* p, JSTraceType t, LIns* ins; if (t == TT_INT32) { /* demoted */ JS_ASSERT(isInt32(*p)); - /* Ok, we have a valid demotion attempt pending, so insert an integer - read and promote it to double since all arithmetic operations expect - to see doubles on entry. The first op to use this slot will emit a - f2i cast which will cancel out the i2f we insert here. */ + + /* + * Ok, we have a valid demotion attempt pending, so insert an integer + * read and promote it to double since all arithmetic operations expect + * to see doubles on entry. The first op to use this slot will emit a + * f2i cast which will cancel out the i2f we insert here. + */ ins = lir->insLoad(LIR_ld, base, offset); ins = lir->ins1(LIR_i2f, ins); } else { @@ -2582,26 +2662,30 @@ JS_REQUIRES_STACK void TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigned ngslots, unsigned callDepth, JSTraceType* typeMap) { - /* If we get a partial list that doesn't have all the types (i.e. recording from a side - exit that was recorded but we added more global slots later), merge the missing types - from the entry type map. This is safe because at the loop edge we verify that we - have compatible types for all globals (entry type and loop edge type match). While - a different trace of the tree might have had a guard with a different type map for - these slots we just filled in here (the guard we continue from didn't know about them), - since we didn't take that particular guard the only way we could have ended up here - is if that other trace had at its end a compatible type distribution with the entry - map. Since thats exactly what we used to fill in the types our current side exit - didn't provide, this is always safe to do. */ + /* + * If we get a partial list that doesn't have all the types (i.e. recording + * from a side exit that was recorded but we added more global slots + * later), merge the missing types from the entry type map. This is safe + * because at the loop edge we verify that we have compatible types for all + * globals (entry type and loop edge type match). While a different trace + * of the tree might have had a guard with a different type map for these + * slots we just filled in here (the guard we continue from didn't know + * about them), since we didn't take that particular guard the only way we + * could have ended up here is if that other trace had at its end a + * compatible type distribution with the entry map. Since that's exactly + * what we used to fill in the types our current side exit didn't provide, + * this is always safe to do. + */ JSTraceType* globalTypeMap = typeMap + stackSlots; unsigned length = treeInfo->nGlobalTypes(); /* - * This is potentially the typemap of the side exit and thus shorter than the tree's - * global type map. + * This is potentially the typemap of the side exit and thus shorter than + * the tree's global type map. */ if (ngslots < length) { - mergeTypeMaps(&globalTypeMap/*out param*/, &ngslots/*out param*/, + MergeTypeMaps(&globalTypeMap /* out param */, &ngslots /* out param */, treeInfo->globalTypeMap(), length, (JSTraceType*)alloca(sizeof(JSTraceType) * length)); } @@ -2609,9 +2693,8 @@ TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigne ptrdiff_t offset = -treeInfo->nativeStackBase; /* - * Check whether there are any values on the stack we have to unbox and - * do that first before we waste any time fetching the state from the - * stack. + * Check whether there are any values on the stack we have to unbox and do + * that first before we waste any time fetching the state from the stack. */ ImportBoxedStackSlotVisitor boxedStackVisitor(*this, sp, offset, typeMap); VisitStackSlots(boxedStackVisitor, cx, callDepth); @@ -2636,6 +2719,7 @@ TraceRecorder::isValidSlot(JSScope* scope, JSScopeProperty* sprop) if (sprop->attrs & JSPROP_READONLY) ABORT_TRACE_RV("writing to a read-only property", false); } + /* This check applies even when setflags == 0. */ if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop)) ABORT_TRACE_RV("non-stub getter", false); @@ -2652,9 +2736,10 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot) { if (slot != uint16(slot)) /* we use a table of 16-bit ints, bail out if that's not enough */ return false; + /* - * If the global object grows too large, alloca in js_ExecuteTree might fail, so - * abort tracing on global objects with unreasonably many slots. + * If the global object grows too large, alloca in ExecuteTree might fail, + * so abort tracing on global objects with unreasonably many slots. */ if (STOBJ_NSLOTS(globalObj) > MAX_GLOBAL_SLOTS) return false; @@ -2662,6 +2747,7 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot) if (known(vp)) return true; /* we already have it */ unsigned index = treeInfo->globalSlots->length(); + /* Add the slot to the list of interned global slots. */ JS_ASSERT(treeInfo->nGlobalTypes() == treeInfo->globalSlots->length()); treeInfo->globalSlots->add(slot); @@ -2671,7 +2757,7 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot) treeInfo->typeMap.add(type); import(lirbuf->state, sizeof(struct InterpState) + slot*sizeof(double), vp, type, "global", index, NULL); - specializeTreesToMissingGlobals(cx, globalObj, treeInfo); + SpecializeTreesToMissingGlobals(cx, globalObj, treeInfo); return true; } @@ -2679,9 +2765,11 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot) LIns* TraceRecorder::writeBack(LIns* i, LIns* base, ptrdiff_t offset) { - /* Sink all type casts targeting the stack into the side exit by simply storing the original - (uncasted) value. Each guard generates the side exit map based on the types of the - last stores to every stack location, so its safe to not perform them on-trace. */ + /* + * Sink all type casts targeting the stack into the side exit by simply storing the original + * (uncasted) value. Each guard generates the side exit map based on the types of the + * last stores to every stack location, so it's safe to not perform them on-trace. + */ if (isPromoteInt(i)) i = ::demote(lir, i); return lir->insStorei(i, base, offset); @@ -2695,9 +2783,13 @@ TraceRecorder::set(jsval* p, LIns* i, bool initializing) JS_ASSERT(initializing || known(p)); checkForGlobalObjectReallocation(); tracker.set(p, i); - /* If we are writing to this location for the first time, calculate the offset into the - native frame manually, otherwise just look up the last load or store associated with - the same source address (p) and use the same offset/base. */ + + /* + * If we are writing to this location for the first time, calculate the + * offset into the native frame manually. Otherwise just look up the last + * load or store associated with the same source address (p) and use the + * same offset/base. + */ LIns* x = nativeFrameTracker.get(p); if (!x) { if (isGlobal(p)) @@ -2760,7 +2852,7 @@ TraceRecorder::checkForGlobalObjectReallocation() /* Determine whether the current branch is a loop edge (taken or not taken). */ static JS_REQUIRES_STACK bool -js_IsLoopEdge(jsbytecode* pc, jsbytecode* header) +IsLoopEdge(jsbytecode* pc, jsbytecode* header) { switch (*pc) { case JSOP_IFEQ: @@ -2805,8 +2897,11 @@ public: if (isPromote && *mTypeMap == TT_DOUBLE) { mLir->insStorei(mRecorder.get(vp), mLirbuf->state, mRecorder.nativeGlobalOffset(vp)); - /* Aggressively undo speculation so the inner tree will compile - if this fails. */ + + /* + * Aggressively undo speculation so the inner tree will compile + * if this fails. + */ oracle.markGlobalSlotUndemotable(mCx, slot); } JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); @@ -2847,8 +2942,11 @@ public: mLir->insStorei(mRecorder.get(vp), mLirbuf->sp, -mRecorder.treeInfo->nativeStackBase + mRecorder.nativeStackOffset(vp)); - /* Aggressively undo speculation so the inner tree will - compile if this fails. */ + + /* + * Aggressively undo speculation so the inner tree will compile + * if this fails. + */ oracle.markStackSlotUndemotable(mCx, mSlotnum); } JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32)); @@ -2861,9 +2959,9 @@ public: }; /* - * Promote slots if necessary to match the called tree's type map. This function is - * infallible and must only be called if we are certain that it is possible to - * reconcile the types for each slot in the inner and outer trees. + * Promote slots if necessary to match the called tree's type map. This + * function is infallible and must only be called if we are certain that it is + * possible to reconcile the types for each slot in the inner and outer trees. */ JS_REQUIRES_STACK void TraceRecorder::adjustCallerTypes(Fragment* f) @@ -2939,13 +3037,16 @@ TraceRecorder::snapshot(ExitType exitType) JSFrameRegs* regs = fp->regs; jsbytecode* pc = regs->pc; - /* Check for a return-value opcode that needs to restart at the next instruction. */ + /* + * Check for a return-value opcode that needs to restart at the next + * instruction. + */ const JSCodeSpec& cs = js_CodeSpec[*pc]; /* * When calling a _FAIL native, make the snapshot's pc point to the next - * instruction after the CALL or APPLY. Even on failure, a _FAIL native must not - * be called again from the interpreter. + * instruction after the CALL or APPLY. Even on failure, a _FAIL native + * must not be called again from the interpreter. */ bool resumeAfter = (pendingTraceableNative && JSTN_ERRTYPE(pendingTraceableNative) == FAIL_STATUS); @@ -2957,11 +3058,16 @@ TraceRecorder::snapshot(ExitType exitType) MUST_FLOW_THROUGH("restore_pc"); } - /* Generate the entry map for the (possibly advanced) pc and stash it in the trace. */ - unsigned stackSlots = js_NativeStackSlots(cx, callDepth); + /* + * Generate the entry map for the (possibly advanced) pc and stash it in + * the trace. + */ + unsigned stackSlots = NativeStackSlots(cx, callDepth); - /* It's sufficient to track the native stack use here since all stores above the - stack watermark defined by guards are killed. */ + /* + * It's sufficient to track the native stack use here since all stores + * above the stack watermark defined by guards are killed. + */ trackNativeStackUse(stackSlots + 1); /* Capture the type map into a temporary location. */ @@ -2973,9 +3079,9 @@ TraceRecorder::snapshot(ExitType exitType) /* * Determine the type of a store by looking at the current type of the - * actual value the interpreter is using. For numbers we have to check - * what kind of store we used last (integer or double) to figure out - * what the side exit show reflect in its typemap. + * actual value the interpreter is using. For numbers we have to check what + * kind of store we used last (integer or double) to figure out what the + * side exit show reflect in its typemap. */ DetermineTypesVisitor detVisitor(*this, typemap); VisitSlots(detVisitor, cx, callDepth, ngslots, @@ -2984,8 +3090,9 @@ TraceRecorder::snapshot(ExitType exitType) ngslots + stackSlots); /* - * If we are currently executing a traceable native or we are attaching a second trace - * to it, the value on top of the stack is a jsval. Make a note of this in the typemap. + * If we are currently executing a traceable native or we are attaching a + * second trace to it, the value on top of the stack is a jsval. Make a + * note of this in the typemap. */ if (pendingTraceableNative && (pendingTraceableNative->flags & JSTN_UNBOX_AFTER)) typemap[stackSlots - 1] = TT_JSVAL; @@ -2995,9 +3102,11 @@ TraceRecorder::snapshot(ExitType exitType) MUST_FLOW_LABEL(restore_pc); regs->pc = pc - cs.length; } else { - /* If we take a snapshot on a goto, advance to the target address. This avoids inner - trees returning on a break goto, which the outer recorder then would confuse with - a break in the outer tree. */ + /* + * If we take a snapshot on a goto, advance to the target address. This + * avoids inner trees returning on a break goto, which the outer + * recorder then would confuse with a break in the outer tree. + */ if (*pc == JSOP_GOTO) pc += GET_JUMP_OFFSET(pc); else if (*pc == JSOP_GOTOX) @@ -3015,7 +3124,7 @@ TraceRecorder::snapshot(ExitType exitType) VMSideExit* e = exits[n]; if (e->pc == pc && e->imacpc == fp->imacpc && ngslots == e->numGlobalSlots && - !memcmp(getFullTypeMap(exits[n]), typemap, typemap_size)) { + !memcmp(GetFullTypeMap(exits[n]), typemap, typemap_size)) { AUDIT(mergedLoopExits); JS_ARENA_RELEASE(&cx->tempPool, mark); return e; @@ -3059,7 +3168,7 @@ TraceRecorder::snapshot(ExitType exitType) exit->rp_adj = exit->calldepth * sizeof(FrameInfo*); exit->nativeCalleeWord = 0; exit->lookupFlags = js_InferFlags(cx, 0); - memcpy(getFullTypeMap(exit), typemap, typemap_size); + memcpy(GetFullTypeMap(exit), typemap, typemap_size); JS_ARENA_RELEASE(&cx->tempPool, mark); return exit; @@ -3138,16 +3247,21 @@ TraceRecorder::copy(VMSideExit* copy) return exit; } -/* Emit a guard for condition (cond), expecting to evaluate to boolean result (expected) - and generate a side exit with type exitType to jump to if the condition does not hold. */ +/* + * Emit a guard for condition (cond), expecting to evaluate to boolean result + * (expected) and generate a side exit with type exitType to jump to if the + * condition does not hold. + */ JS_REQUIRES_STACK void TraceRecorder::guard(bool expected, LIns* cond, ExitType exitType) { guard(expected, cond, snapshot(exitType)); } -/* Try to match the type of a slot to type t. checkType is used to verify that the type of - * values flowing into the loop edge is compatible with the type we expect in the loop header. +/* + * Try to match the type of a slot to type t. checkType is used to verify that + * the type of each value flowing into the loop edge is compatible with the + * type we expect in the loop header. * * @param v Value. * @param t Typemap entry for value. @@ -3169,11 +3283,14 @@ TraceRecorder::checkType(jsval& v, JSTraceType t, jsval*& stage_val, LIns*& stag if (!isNumber(v)) return false; /* not a number? type mismatch */ LIns* i = get(&v); + /* This is always a type mismatch, we can't close a double to an int. */ if (!isPromoteInt(i)) return false; + /* Looks good, slot is an int32, the last instruction should be promotable. */ JS_ASSERT(isInt32(v) && isPromoteInt(i)); + /* Overwrite the value in this slot with the argument promoted back to an integer. */ stage_val = &v; stage_ins = f2i(i); @@ -3190,8 +3307,12 @@ TraceRecorder::checkType(jsval& v, JSTraceType t, jsval*& stage_val, LIns*& stag if (!isNumber(v)) return false; /* not a number? type mismatch */ LIns* i = get(&v); - /* We sink i2f conversions into the side exit, but at the loop edge we have to make - sure we promote back to double if at loop entry we want a double. */ + + /* + * We sink i2f conversions into the side exit, but at the loop edge we + * have to make sure we promote back to double if at loop entry we want + * a double. + */ if (isPromoteInt(i)) { stage_val = &v; stage_ins = lir->ins1(LIR_i2f, i); @@ -3417,8 +3538,9 @@ public: }; /** - * Make sure that the current values in the given stack frame and all stack frames - * up and including entryFrame are type-compatible with the entry map. + * Make sure that the current values in the given stack frame and all stack + * frames up to and including entryFrame are type-compatible with the entry + * map. * * @param root_peer First fragment in peer list. * @param stable_peer Outparam for first type stable peer. @@ -3428,15 +3550,15 @@ public: JS_REQUIRES_STACK bool TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, bool& demote) { - JS_ASSERT(treeInfo->globalSlots->length() == - treeInfo->nGlobalTypes()); + JS_ASSERT(treeInfo->globalSlots->length() == treeInfo->nGlobalTypes()); if (stable_peer) *stable_peer = NULL; /* - * Rather than calculate all of this stuff twice, it gets cached locally. The "stage" buffers - * are for calls to set() that will change the exit types. + * Rather than calculate all of this stuff twice, it gets cached locally. + * The "stage" buffers are for calls to set() that will change the exit + * types. */ bool success; unsigned stage_count; @@ -3465,8 +3587,9 @@ TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, demote = false; - /* At this point the tree is about to be incomplete, so let's see if we can connect to any - * peer fragment that is type stable. + /* + * At this point the tree is about to be incomplete, so let's see if we can + * connect to any peer fragment that is type stable. */ Fragment* f; TreeInfo* ti; @@ -3477,6 +3600,7 @@ TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, if (!f->code()) continue; ti = (TreeInfo*)f->vmprivate; + /* Don't allow varying stack depths */ if ((ti->nStackTypes != treeInfo->nStackTypes) || (ti->typeMap.length() != treeInfo->typeMap.length()) || @@ -3492,8 +3616,9 @@ TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, if (success) { /* - * There was a successful match. We don't care about restoring the saved staging, but - * we do need to clear the original undemote list. + * There was a successful match. We don't care about restoring the + * saved staging, but we do need to clear the original undemote + * list. */ for (unsigned i = 0; i < stage_count; i++) set(stage_vals[i], stage_ins[i]); @@ -3505,18 +3630,16 @@ TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, } /* - * If this is a loop trace and it would be stable with demotions, build an undemote list - * and return true. Our caller should sniff this and trash the tree, recording a new one - * that will assumedly stabilize. + * If this is a loop trace and it would be stable with demotions, build an + * undemote list and return true. Our caller should sniff this and trash + * the tree, recording a new one that will assumedly stabilize. */ if (demote && fragment->kind == LoopTrace) { UndemoteVisitor visitor(*this, treeInfo->stackTypeMap()); VisitSlots(visitor, cx, 0, *treeInfo->globalSlots); return true; - } else { - demote = false; } - + demote = false; return false; } @@ -3538,8 +3661,7 @@ FlushJITCache(JSContext* cx) Fragmento* fragmento = tm->fragmento; if (fragmento) { if (tm->prohibitFlush) { - debug_only_print0(LC_TMTracer, - "Deferring fragmento flush due to deep bail.\n"); + debug_only_print0(LC_TMTracer, "Deferring fragmento flush due to deep bail.\n"); tm->needFlush = JS_TRUE; return; } @@ -3582,7 +3704,7 @@ TraceRecorder::compile(JSTraceMonitor* tm) Fragmento* fragmento = tm->fragmento; if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) { debug_only_print0(LC_TMTracer, "Blacklist: excessive stack use.\n"); - js_Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*) fragment->root->ip); return; } if (anchor && anchor->exitType != CASE_EXIT) @@ -3596,11 +3718,11 @@ TraceRecorder::compile(JSTraceMonitor* tm) return; if (fragmento->assm()->error() != nanojit::None) { debug_only_print0(LC_TMTracer, "Blacklisted: error during compilation\n"); - js_Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*) fragment->root->ip); return; } - js_resetRecordingAttempts(cx, (jsbytecode*) fragment->ip); - js_resetRecordingAttempts(cx, (jsbytecode*) fragment->root->ip); + ResetRecordingAttempts(cx, (jsbytecode*) fragment->ip); + ResetRecordingAttempts(cx, (jsbytecode*) fragment->root->ip); if (anchor) { #ifdef NANOJIT_IA32 if (anchor->exitType == CASE_EXIT) @@ -3613,6 +3735,7 @@ TraceRecorder::compile(JSTraceMonitor* tm) JS_ASSERT(!fragment->vmprivate); if (fragment == fragment->root) fragment->vmprivate = treeInfo; + /* :TODO: windows support */ #if defined DEBUG && !defined WIN32 const char* filename = cx->fp->script->filename; @@ -3626,14 +3749,14 @@ TraceRecorder::compile(JSTraceMonitor* tm) } static bool -js_JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree, - VMSideExit* exit) +JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree, + VMSideExit* exit) { JS_ASSERT(exit->numStackSlots == stableTree->nStackTypes); /* Must have a matching type unstable exit. */ if ((exit->numGlobalSlots + exit->numStackSlots != stableTree->typeMap.length()) || - memcmp(getFullTypeMap(exit), stableTree->typeMap.data(), stableTree->typeMap.length())) { + memcmp(GetFullTypeMap(exit), stableTree->typeMap.data(), stableTree->typeMap.length())) { return false; } @@ -3651,9 +3774,9 @@ JS_REQUIRES_STACK void TraceRecorder::closeLoop(JSTraceMonitor* tm, bool& demote) { /* - * We should have arrived back at the loop header, and hence we don't want to be in an imacro - * here and the opcode should be either JSOP_LOOP, or in case this loop was blacklisted in the - * meantime JSOP_NOP. + * We should have arrived back at the loop header, and hence we don't want + * to be in an imacro here and the opcode should be either JSOP_LOOP or, in + * case this loop was blacklisted in the meantime, JSOP_NOP. */ JS_ASSERT((*cx->fp->regs->pc == JSOP_LOOP || *cx->fp->regs->pc == JSOP_NOP) && !cx->fp->imacpc); @@ -3665,7 +3788,7 @@ TraceRecorder::closeLoop(JSTraceMonitor* tm, bool& demote) if (callDepth != 0) { debug_only_print0(LC_TMTracer, "Blacklisted: stack depth mismatch, possible recursion.\n"); - js_Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*) fragment->root->ip); trashSelf = true; return; } @@ -3739,14 +3862,14 @@ TraceRecorder::closeLoop(JSTraceMonitor* tm, bool& demote) debug_only_print0(LC_TMTracer, "updating specializations on dependent and linked trees\n"); if (fragment->root->vmprivate) - specializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate); + SpecializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate); /* * If this is a newly formed tree, and the outer tree has not been compiled yet, we * should try to compile the outer tree again. */ if (outer) - js_AttemptCompilation(cx, tm, globalObj, outer, outerArgc); + AttemptCompilation(cx, tm, globalObj, outer, outerArgc); #ifdef JS_JIT_SPEW debug_only_printf(LC_TMMinimal, "recording completed at %s:%u@%u via closeLoop\n", @@ -3776,7 +3899,7 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root) uexit = ti->unstableExits; unext = &ti->unstableExits; while (uexit != NULL) { - bool remove = js_JoinPeersIfCompatible(fragmento, fragment, treeInfo, uexit->exit); + bool remove = JoinPeersIfCompatible(fragmento, fragment, treeInfo, uexit->exit); JS_ASSERT(!remove || fragment != peer); debug_only_stmt( if (remove) { @@ -3786,14 +3909,17 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root) } ) if (!remove) { - /* See if this exit contains mismatch demotions, which imply trashing a tree. - This is actually faster than trashing the original tree as soon as the - instability is detected, since we could have compiled a fairly stable - tree that ran faster with integers. */ + /* + * See if this exit contains mismatch demotions, which + * imply trashing a tree. This is actually faster than + * trashing the original tree as soon as the instability is + * detected, since we could have compiled a fairly stable + * tree that ran faster with integers. + */ unsigned stackCount = 0; unsigned globalCount = 0; t1 = treeInfo->stackTypeMap(); - t2 = getStackTypeMap(uexit->exit); + t2 = GetStackTypeMap(uexit->exit); for (unsigned i = 0; i < uexit->exit->numStackSlots; i++) { if (t2[i] == TT_INT32 && t1[i] == TT_DOUBLE) { stackDemotes[stackCount++] = i; @@ -3803,7 +3929,7 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root) } } t1 = treeInfo->globalTypeMap(); - t2 = getGlobalTypeMap(uexit->exit); + t2 = GetGlobalTypeMap(uexit->exit); for (unsigned i = 0; i < uexit->exit->numGlobalSlots; i++) { if (t2[i] == TT_INT32 && t1[i] == TT_DOUBLE) { globalDemotes[globalCount++] = i; @@ -3838,8 +3964,8 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root) } } - debug_only_stmt(js_DumpPeerStability(traceMonitor, peer_root->ip, peer_root->globalObj, - peer_root->globalShape, peer_root->argc);) + debug_only_stmt(DumpPeerStability(traceMonitor, peer_root->ip, peer_root->globalObj, + peer_root->globalShape, peer_root->argc);) } /* Emit an always-exit guard and compile the tree (used for break statements. */ @@ -3848,7 +3974,7 @@ TraceRecorder::endLoop(JSTraceMonitor* tm) { if (callDepth != 0) { debug_only_print0(LC_TMTracer, "Blacklisted: stack depth mismatch, possible recursion.\n"); - js_Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*) fragment->root->ip); trashSelf = true; return; } @@ -3863,19 +3989,21 @@ TraceRecorder::endLoop(JSTraceMonitor* tm) VMFragment* root = (VMFragment*)fragment->root; joinEdgesToEntry(tm->fragmento, getLoop(tm, root->ip, root->globalObj, root->globalShape, root->argc)); - /* Note: this must always be done, in case we added new globals on trace and haven't yet - propagated those to linked and dependent trees. */ + /* + * Note: this must always be done, in case we added new globals on trace + * and haven't yet propagated those to linked and dependent trees. + */ debug_only_print0(LC_TMTracer, "updating specializations on dependent and linked trees\n"); if (fragment->root->vmprivate) - specializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate); + SpecializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate); /* - * If this is a newly formed tree, and the outer tree has not been compiled yet, we - * should try to compile the outer tree again. + * If this is a newly formed tree, and the outer tree has not been compiled + * yet, we should try to compile the outer tree again. */ if (outer) - js_AttemptCompilation(cx, tm, globalObj, outer, outerArgc); + AttemptCompilation(cx, tm, globalObj, outer, outerArgc); #ifdef JS_JIT_SPEW debug_only_printf(LC_TMMinimal, "Recording completed at %s:%u@%u via endLoop\n", @@ -3892,18 +4020,29 @@ TraceRecorder::prepareTreeCall(Fragment* inner) { TreeInfo* ti = (TreeInfo*)inner->vmprivate; inner_sp_ins = lirbuf->sp; - /* The inner tree expects to be called from the current frame. If the outer tree (this - trace) is currently inside a function inlining code (calldepth > 0), we have to advance - the native stack pointer such that we match what the inner trace expects to see. We - move it back when we come out of the inner tree call. */ + + /* + * The inner tree expects to be called from the current frame. If the outer + * tree (this trace) is currently inside a function inlining code + * (calldepth > 0), we have to advance the native stack pointer such that + * we match what the inner trace expects to see. We move it back when we + * come out of the inner tree call. + */ if (callDepth > 0) { - /* Calculate the amount we have to lift the native stack pointer by to compensate for - any outer frames that the inner tree doesn't expect but the outer tree has. */ + /* + * Calculate the amount we have to lift the native stack pointer by to + * compensate for any outer frames that the inner tree doesn't expect + * but the outer tree has. + */ ptrdiff_t sp_adj = nativeStackOffset(&cx->fp->argv[-2]); - /* Calculate the amount we have to lift the call stack by */ + + /* Calculate the amount we have to lift the call stack by. */ ptrdiff_t rp_adj = callDepth * sizeof(FrameInfo*); - /* Guard that we have enough stack space for the tree we are trying to call on top - of the new value for sp. */ + + /* + * Guard that we have enough stack space for the tree we are trying to + * call on top of the new value for sp. + */ debug_only_printf(LC_TMTracer, "sp_adj=%d outer=%d inner=%d\n", sp_adj, treeInfo->nativeStackBase, ti->nativeStackBase); @@ -3912,10 +4051,12 @@ TraceRecorder::prepareTreeCall(Fragment* inner) + sp_adj /* adjust for stack in outer frame inner tree can't see */ + ti->maxNativeStackSlots * sizeof(double)); /* plus the inner tree's stack */ guard(true, lir->ins2(LIR_lt, sp_top, eos_ins), OOM_EXIT); + /* Guard that we have enough call stack space. */ LIns* rp_top = lir->ins2i(LIR_piadd, lirbuf->rp, rp_adj + ti->maxCallDepth * sizeof(FrameInfo*)); guard(true, lir->ins2(LIR_lt, rp_top, eor_ins), OOM_EXIT); + /* We have enough space, so adjust sp and rp to their new level. */ lir->insStorei(inner_sp_ins = lir->ins2i(LIR_piadd, lirbuf->sp, - treeInfo->nativeStackBase /* rebase sp to beginning of outer tree's stack */ @@ -3941,19 +4082,20 @@ TraceRecorder::emitTreeCall(Fragment* inner, VMSideExit* exit) #ifdef DEBUG JSTraceType* map; size_t i; - map = getGlobalTypeMap(exit); + map = GetGlobalTypeMap(exit); for (i = 0; i < exit->numGlobalSlots; i++) JS_ASSERT(map[i] != TT_JSVAL); - map = getStackTypeMap(exit); + map = GetStackTypeMap(exit); for (i = 0; i < exit->numStackSlots; i++) JS_ASSERT(map[i] != TT_JSVAL); #endif - /* bug 502604 - It is illegal to extend from the outer typemap without first extending from the - * inner. Make a new typemap here. + /* + * Bug 502604 - It is illegal to extend from the outer typemap without + * first extending from the inner. Make a new typemap here. */ TypeMap fullMap; - fullMap.add(getStackTypeMap(exit), exit->numStackSlots); - fullMap.add(getGlobalTypeMap(exit), exit->numGlobalSlots); + fullMap.add(GetStackTypeMap(exit), exit->numStackSlots); + fullMap.add(GetGlobalTypeMap(exit), exit->numGlobalSlots); TreeInfo* innerTree = (TreeInfo*)exit->from->root->vmprivate; if (exit->numGlobalSlots < innerTree->nGlobalTypes()) { fullMap.add(innerTree->globalTypeMap() + exit->numGlobalSlots, @@ -3973,6 +4115,7 @@ TraceRecorder::emitTreeCall(Fragment* inner, VMSideExit* exit) * we called the inner tree at recording time. */ guard(true, lir->ins2(LIR_eq, ret, INS_CONSTPTR(exit)), NESTED_EXIT); + /* Register us as a dependent tree of the inner tree. */ ((TreeInfo*)inner->vmprivate)->dependentTrees.addUnique(fragment->root); treeInfo->linkedTrees.addUnique(inner); @@ -3995,18 +4138,21 @@ TraceRecorder::trackCfgMerges(jsbytecode* pc) } } -/* Invert the direction of the guard if this is a loop edge that is not - taken (thin loop). */ +/* + * Invert the direction of the guard if this is a loop edge that is not + * taken (thin loop). + */ JS_REQUIRES_STACK void TraceRecorder::emitIf(jsbytecode* pc, bool cond, LIns* x) { ExitType exitType; - if (js_IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { + if (IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { exitType = LOOP_EXIT; /* - * If we are about to walk out of the loop, generate code for the inverse loop - * condition, pretending we recorded the case that stays on trace. + * If we are about to walk out of the loop, generate code for the + * inverse loop condition, pretending we recorded the case that stays + * on trace. */ if ((*pc == JSOP_IFEQ || *pc == JSOP_IFEQX) == cond) { JS_ASSERT(*pc == JSOP_IFNE || *pc == JSOP_IFNEX || *pc == JSOP_IFEQ || *pc == JSOP_IFEQX); @@ -4016,9 +4162,9 @@ TraceRecorder::emitIf(jsbytecode* pc, bool cond, LIns* x) } /* - * Conditional guards do not have to be emitted if the condition is constant. We - * make a note whether the loop condition is true or false here, so we later know - * whether to emit a loop edge or a loop end. + * Conditional guards do not have to be emitted if the condition is + * constant. We make a note whether the loop condition is true or false + * here, so we later know whether to emit a loop edge or a loop end. */ if (x->isconst()) { loop = (x->imm32() == cond); @@ -4046,12 +4192,12 @@ TraceRecorder::fuseIf(jsbytecode* pc, bool cond, LIns* x) JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::checkTraceEnd(jsbytecode *pc) { - if (js_IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { + if (IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { /* - * If we compile a loop, the trace should have a zero stack balance at the loop - * edge. Currently we are parked on a comparison op or IFNE/IFEQ, so advance - * pc to the loop header and adjust the stack pointer and pretend we have - * reached the loop header. + * If we compile a loop, the trace should have a zero stack balance at + * the loop edge. Currently we are parked on a comparison op or + * IFNE/IFEQ, so advance pc to the loop header and adjust the stack + * pointer and pretend we have reached the loop header. */ if (loop) { JS_ASSERT(!cx->fp->imacpc && (pc == cx->fp->regs->pc || pc == cx->fp->regs->pc + 1)); @@ -4067,14 +4213,14 @@ TraceRecorder::checkTraceEnd(jsbytecode *pc) *cx->fp->regs = orig; /* - * If compiling this loop generated new oracle information which will likely - * lead to a different compilation result, immediately trigger another - * compiler run. This is guaranteed to converge since the oracle only - * accumulates adverse information but never drops it (except when we - * flush it during garbage collection.) + * If compiling this loop generated new oracle information which + * will likely lead to a different compilation result, immediately + * trigger another compiler run. This is guaranteed to converge + * since the oracle only accumulates adverse information but never + * drops it (except when we flush it during garbage collection.) */ if (demote) - js_AttemptCompilation(cx, traceMonitor, globalObj, outer, outerArgc); + AttemptCompilation(cx, traceMonitor, globalObj, outer, outerArgc); } else { endLoop(traceMonitor); } @@ -4126,7 +4272,7 @@ TraceRecorder::hasIteratorMethod(JSObject* obj) } int -nanojit::StackFilter::getTop(LInsp guard) +nanojit::StackFilter::getTop(LIns* guard) { VMSideExit* e = (VMSideExit*)guard->record()->exit; if (sp == lirbuf->sp) @@ -4161,7 +4307,7 @@ nanojit::Fragment::onDestroy() } static JS_REQUIRES_STACK bool -js_DeleteRecorder(JSContext* cx) +DeleteRecorder(JSContext* cx) { JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); @@ -4169,9 +4315,7 @@ js_DeleteRecorder(JSContext* cx) delete tm->recorder; tm->recorder = NULL; - /* - * If we ran out of memory, flush the code cache. - */ + /* If we ran out of memory, flush the code cache. */ if (JS_TRACE_MONITOR(cx).fragmento->assm()->error() == OutOMem || js_OverfullFragmento(tm, tm->fragmento)) { FlushJITCache(cx); @@ -4181,12 +4325,10 @@ js_DeleteRecorder(JSContext* cx) return true; } -/** - * Checks whether the shape of the global object has changed. - */ +/* Check whether the shape of the global object has changed. */ static JS_REQUIRES_STACK bool CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj, - uint32 *shape=NULL, SlotList** slots=NULL) + uint32 *shape = NULL, SlotList** slots = NULL) { if (tm->needFlush) { FlushJITCache(cx); @@ -4201,6 +4343,7 @@ CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj, if (tm->recorder) { VMFragment* root = (VMFragment*)tm->recorder->getFragment()->root; TreeInfo* ti = tm->recorder->getTreeInfo(); + /* Check the global shape matches the recorder's treeinfo's shape. */ if (globalObj != root->globalObj || globalShape != root->globalShape) { AUDIT(globalShapeMismatchAtEntry); @@ -4208,7 +4351,7 @@ CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj, "Global object/shape mismatch (%p/%u vs. %p/%u), flushing cache.\n", (void*)globalObj, globalShape, (void*)root->globalObj, root->globalShape); - js_Backoff(cx, (jsbytecode*) root->ip); + Backoff(cx, (jsbytecode*) root->ip); FlushJITCache(cx); return false; } @@ -4249,9 +4392,9 @@ CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj, } static JS_REQUIRES_STACK bool -js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti, - unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap, - VMSideExit* expectedInnerExit, jsbytecode* outer, uint32 outerArgc) +StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti, + unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap, + VMSideExit* expectedInnerExit, jsbytecode* outer, uint32 outerArgc) { JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); if (JS_TRACE_MONITOR(cx).needFlush) { @@ -4261,7 +4404,7 @@ js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti, JS_ASSERT(f->root != f || !cx->fp->imacpc); - /* start recording if no exception during construction */ + /* Start recording if no exception during construction. */ tm->recorder = new (&gc) TraceRecorder(cx, anchor, f, ti, stackSlots, ngslots, typeMap, expectedInnerExit, outer, outerArgc); @@ -4270,13 +4413,14 @@ js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti, js_AbortRecording(cx, "setting up recorder failed"); return false; } - /* clear any leftover error state */ + + /* Clear any leftover error state. */ tm->fragmento->assm()->setError(None); return true; } static void -js_TrashTree(JSContext* cx, Fragment* f) +TrashTree(JSContext* cx, Fragment* f) { JS_ASSERT((!f->code()) == (!f->vmprivate)); JS_ASSERT(f == f->root); @@ -4291,17 +4435,17 @@ js_TrashTree(JSContext* cx, Fragment* f) Fragment** data = ti->dependentTrees.data(); unsigned length = ti->dependentTrees.length(); for (unsigned n = 0; n < length; ++n) - js_TrashTree(cx, data[n]); + TrashTree(cx, data[n]); data = ti->linkedTrees.data(); length = ti->linkedTrees.length(); for (unsigned n = 0; n < length; ++n) - js_TrashTree(cx, data[n]); + TrashTree(cx, data[n]); delete ti; JS_ASSERT(!f->code() && !f->vmprivate); } static int -js_SynthesizeFrame(JSContext* cx, const FrameInfo& fi) +SynthesizeFrame(JSContext* cx, const FrameInfo& fi) { VOUCH_DOES_NOT_REQUIRE_STACK(); @@ -4313,8 +4457,8 @@ js_SynthesizeFrame(JSContext* cx, const FrameInfo& fi) /* Assert that we have a correct sp distance from cx->fp->slots in fi. */ JSStackFrame* fp = cx->fp; JS_ASSERT_IF(!fi.imacpc, - js_ReconstructStackDepth(cx, fp->script, fi.pc) - == uintN(fi.spdist - fp->script->nfixed)); + js_ReconstructStackDepth(cx, fp->script, fi.pc) == + uintN(fi.spdist - fp->script->nfixed)); uintN nframeslots = JS_HOWMANY(sizeof(JSInlineFrame), sizeof(jsval)); JSScript* script = fun->u.i.script; @@ -4353,7 +4497,10 @@ js_SynthesizeFrame(JSContext* cx, const FrameInfo& fi) a->avail += nbytes; JS_ASSERT(missing == 0); } else { - /* This allocation is infallible: js_ExecuteTree reserved enough stack. */ + /* + * This allocation is infallible: ExecuteTree reserved enough stack. + * (But see bug 491023.) + */ JS_ARENA_ALLOCATE_CAST(newsp, jsval *, &cx->stackPool, nbytes); JS_ASSERT(newsp); @@ -4450,10 +4597,14 @@ js_SynthesizeFrame(JSContext* cx, const FrameInfo& fi) newifp->hookData = NULL; } - /* Duplicate native stack layout computation: see VisitFrameSlots header comment. */ - // FIXME? we must count stack slots from caller's operand stack up to (but not including) - // callee's, including missing arguments. Could we shift everything down to the caller's - // fp->slots (where vars start) and avoid some of the complexity? + /* + * Duplicate native stack layout computation: see VisitFrameSlots header comment. + * + * FIXME - We must count stack slots from caller's operand stack up to (but + * not including) callee's, including missing arguments. Could we shift + * everything down to the caller's fp->slots (where vars start) and avoid + * some of the complexity? + */ return (fi.spdist - fp->down->script->nfixed) + ((fun->nargs > fp->argc) ? fun->nargs - fp->argc : 0) + script->nfixed + 1/*argsobj*/; @@ -4467,7 +4618,7 @@ SynthesizeSlowNativeFrame(JSContext *cx, VMSideExit *exit) void *mark; JSInlineFrame *ifp; - /* This allocation is infallible: js_ExecuteTree reserved enough stack. */ + /* This allocation is infallible: ExecuteTree reserved enough stack. */ mark = JS_ARENA_MARK(&cx->stackPool); JS_ARENA_ALLOCATE_CAST(ifp, JSInlineFrame *, &cx->stackPool, sizeof(JSInlineFrame)); JS_ASSERT(ifp); @@ -4503,16 +4654,16 @@ SynthesizeSlowNativeFrame(JSContext *cx, VMSideExit *exit) cx->fp = fp; } -JS_REQUIRES_STACK bool -js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, - uint32 outerArgc, JSObject* globalObj, uint32 globalShape, - SlotList* globalSlots, uint32 argc) +static JS_REQUIRES_STACK bool +RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, + uint32 outerArgc, JSObject* globalObj, uint32 globalShape, + SlotList* globalSlots, uint32 argc) { JS_ASSERT(f->root == f); /* Make sure the global type map didn't change on us. */ if (!CheckGlobalObjectShape(cx, tm, globalObj)) { - js_Backoff(cx, (jsbytecode*) f->root->ip); + Backoff(cx, (jsbytecode*) f->root->ip); return false; } @@ -4533,7 +4684,7 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, f->lirbuf = tm->lirbuf; if (f->lirbuf->outOMem() || js_OverfullFragmento(tm, tm->fragmento)) { - js_Backoff(cx, (jsbytecode*) f->root->ip); + Backoff(cx, (jsbytecode*) f->root->ip); FlushJITCache(cx); debug_only_print0(LC_TMTracer, "Out of memory recording new tree, flushing cache.\n"); @@ -4542,42 +4693,42 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer, JS_ASSERT(!f->code() && !f->vmprivate); - /* setup the VM-private treeInfo structure for this fragment */ + /* Set up the VM-private treeInfo structure for this fragment. */ TreeInfo* ti = new (&gc) TreeInfo(f, globalSlots); - /* capture the coerced type of each active slot in the type map */ - ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0/*callDepth*/); + /* Capture the coerced type of each active slot in the type map. */ + ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0 /* callDepth */); ti->nStackTypes = ti->typeMap.length() - globalSlots->length(); #ifdef DEBUG - ensureTreeIsUnique(tm, (VMFragment*)f, ti); + AssertTreeIsUnique(tm, (VMFragment*)f, ti); ti->treeFileName = cx->fp->script->filename; ti->treeLineNumber = js_FramePCToLineNumber(cx, cx->fp); ti->treePCOffset = FramePCOffset(cx->fp); #endif - /* determine the native frame layout at the entry point */ + /* Determine the native frame layout at the entry point. */ unsigned entryNativeStackSlots = ti->nStackTypes; - JS_ASSERT(entryNativeStackSlots == js_NativeStackSlots(cx, 0/*callDepth*/)); + JS_ASSERT(entryNativeStackSlots == NativeStackSlots(cx, 0 /* callDepth */)); ti->nativeStackBase = (entryNativeStackSlots - (cx->fp->regs->sp - StackBase(cx->fp))) * sizeof(double); ti->maxNativeStackSlots = entryNativeStackSlots; ti->maxCallDepth = 0; ti->script = cx->fp->script; - /* recording primary trace */ - if (!js_StartRecorder(cx, NULL, f, ti, - ti->nStackTypes, - ti->globalSlots->length(), - ti->typeMap.data(), NULL, outer, outerArgc)) { + /* Recording primary trace. */ + if (!StartRecorder(cx, NULL, f, ti, + ti->nStackTypes, + ti->globalSlots->length(), + ti->typeMap.data(), NULL, outer, outerArgc)) { return false; } return true; } -JS_REQUIRES_STACK static inline void -markSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) +static JS_REQUIRES_STACK inline void +MarkSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) { if (slot < ti->nStackTypes) { oracle.markStackSlotUndemotable(cx, slot); @@ -4588,8 +4739,8 @@ markSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) oracle.markGlobalSlotUndemotable(cx, gslots[slot - ti->nStackTypes]); } -JS_REQUIRES_STACK static inline bool -isSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) +static JS_REQUIRES_STACK inline bool +IsSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) { if (slot < ti->nStackTypes) return oracle.isStackSlotUndemotable(cx, slot); @@ -4598,9 +4749,9 @@ isSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot) return oracle.isGlobalSlotUndemotable(cx, gslots[slot - ti->nStackTypes]); } -JS_REQUIRES_STACK static bool -js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, - jsbytecode* outer, uint32 outerArgc) +static JS_REQUIRES_STACK bool +AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, jsbytecode* outer, + uint32 outerArgc) { JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); if (tm->needFlush) { @@ -4614,16 +4765,16 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, JS_ASSERT(exit->from->root->code()); /* - * The loop edge exit might not know about all types since the tree could have - * been further specialized since it was recorded. Fill in the missing types - * from the entry type map. + * The loop edge exit might not know about all types since the tree could + * have been further specialized since it was recorded. Fill in the missing + * types from the entry type map. */ - JSTraceType* m = getFullTypeMap(exit); + JSTraceType* m = GetFullTypeMap(exit); unsigned ngslots = exit->numGlobalSlots; if (ngslots < from_ti->nGlobalTypes()) { uint32 partial = exit->numStackSlots + exit->numGlobalSlots; m = (JSTraceType*)alloca(from_ti->typeMap.length() * sizeof(JSTraceType)); - memcpy(m, getFullTypeMap(exit), partial); + memcpy(m, GetFullTypeMap(exit), partial); memcpy(m + partial, from_ti->globalTypeMap() + exit->numGlobalSlots, from_ti->nGlobalTypes() - exit->numGlobalSlots); ngslots = from_ti->nGlobalTypes(); @@ -4636,7 +4787,7 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, */ for (unsigned i = 0; i < from_ti->typeMap.length(); i++) { if (m[i] == TT_DOUBLE) - markSlotUndemotable(cx, from_ti, i); + MarkSlotUndemotable(cx, from_ti, i); } bool bound = false; @@ -4645,9 +4796,11 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, continue; TreeInfo* ti = (TreeInfo*)f->vmprivate; JS_ASSERT(exit->numStackSlots == ti->nStackTypes); + /* Check the minimum number of slots that need to be compared. */ unsigned checkSlots = JS_MIN(from_ti->typeMap.length(), ti->typeMap.length()); JSTraceType* m2 = ti->typeMap.data(); + /* Analyze the exit typemap against the peer typemap. * Two conditions are important: * 1) Typemaps are identical: these peers can be attached. @@ -4662,10 +4815,12 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, if (m[i] == m2[i]) continue; matched = false; - /* If there's an I->D that cannot be resolved, flag it. + + /* + * If there's an I->D that cannot be resolved, flag it. * Otherwise, break and go to the next peer. */ - if (m[i] == TT_INT32 && m2[i] == TT_DOUBLE && isSlotUndemotable(cx, ti, i)) { + if (m[i] == TT_INT32 && m2[i] == TT_DOUBLE && IsSlotUndemotable(cx, ti, i)) { undemote = true; } else { undemote = false; @@ -4675,15 +4830,17 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, if (matched) { JS_ASSERT(from_ti->globalSlots == ti->globalSlots); JS_ASSERT(from_ti->nStackTypes == ti->nStackTypes); + /* Capture missing globals on both trees and link the fragments together. */ if (from != f) { ti->dependentTrees.addUnique(from); from_ti->linkedTrees.addUnique(f); } if (ti->nGlobalTypes() < ti->globalSlots->length()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); exit->target = f; tm->fragmento->assm()->patch(exit); + /* Now erase this exit from the unstable exit list. */ UnstableExit** tail = &from_ti->unstableExits; for (UnstableExit* uexit = from_ti->unstableExits; uexit != NULL; uexit = uexit->next) { @@ -4696,11 +4853,12 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, tail = &uexit->next; } JS_ASSERT(bound); - debug_only_stmt( js_DumpPeerStability(tm, f->ip, from->globalObj, from->globalShape, from->argc); ) + debug_only_stmt( DumpPeerStability(tm, f->ip, from->globalObj, from->globalShape, from->argc); ) break; } else if (undemote) { /* The original tree is unconnectable, so trash it. */ - js_TrashTree(cx, f); + TrashTree(cx, f); + /* We shouldn't attempt to record now, since we'll hit a duplicate. */ return false; } @@ -4709,12 +4867,12 @@ js_AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, return false; VMFragment* root = (VMFragment*)from->root; - return js_RecordTree(cx, tm, from->first, outer, outerArgc, root->globalObj, - root->globalShape, from_ti->globalSlots, cx->fp->argc); + return RecordTree(cx, tm, from->first, outer, outerArgc, root->globalObj, + root->globalShape, from_ti->globalSlots, cx->fp->argc); } static JS_REQUIRES_STACK bool -js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, jsbytecode* outer +AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, jsbytecode* outer #ifdef MOZ_TRACEVIS , TraceVisStateObj* tvso = NULL #endif @@ -4733,7 +4891,10 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom JS_ASSERT(f->vmprivate); TreeInfo* ti = (TreeInfo*)f->vmprivate; - /* Don't grow trees above a certain size to avoid code explosion due to tail duplication. */ + /* + * Don't grow trees above a certain size to avoid code explosion due to + * tail duplication. + */ if (ti->branchCount >= MAX_BRANCHES) { #ifdef MOZ_TRACEVIS if (tvso) tvso->r = R_FAIL_EXTEND_MAX_BRANCHES; @@ -4751,9 +4912,10 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom } /* - * If we are recycling a fragment, it might have a different ip so reset it here. This - * can happen when attaching a branch to a NESTED_EXIT, which might extend along separate paths - * (i.e. after the loop edge, and after a return statement). + * If we are recycling a fragment, it might have a different ip so reset it + * here. This can happen when attaching a branch to a NESTED_EXIT, which + * might extend along separate paths (i.e. after the loop edge, and after a + * return statement). */ c->ip = cx->fp->regs->pc; @@ -4769,22 +4931,27 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom JSTraceType* typeMap; TypeMap fullMap; if (exitedFrom == NULL) { - /* If we are coming straight from a simple side exit, just use that exit's type map - as starting point. */ + /* + * If we are coming straight from a simple side exit, just use that + * exit's type map as starting point. + */ ngslots = anchor->numGlobalSlots; stackSlots = anchor->numStackSlots; - typeMap = getFullTypeMap(anchor); + typeMap = GetFullTypeMap(anchor); } else { - /* If we side-exited on a loop exit and continue on a nesting guard, the nesting - guard (anchor) has the type information for everything below the current scope, - and the actual guard we exited from has the types for everything in the current - scope (and whatever it inlined). We have to merge those maps here. */ + /* + * If we side-exited on a loop exit and continue on a nesting + * guard, the nesting guard (anchor) has the type information for + * everything below the current scope, and the actual guard we + * exited from has the types for everything in the current scope + * (and whatever it inlined). We have to merge those maps here. + */ VMSideExit* e1 = anchor; VMSideExit* e2 = exitedFrom; - fullMap.add(getStackTypeMap(e1), e1->numStackSlotsBelowCurrentFrame); - fullMap.add(getStackTypeMap(e2), e2->numStackSlots); + fullMap.add(GetStackTypeMap(e1), e1->numStackSlotsBelowCurrentFrame); + fullMap.add(GetStackTypeMap(e2), e2->numStackSlots); stackSlots = fullMap.length(); - fullMap.add(getGlobalTypeMap(e2), e2->numGlobalSlots); + fullMap.add(GetGlobalTypeMap(e2), e2->numGlobalSlots); if (e2->numGlobalSlots < e1->numGlobalSlots) { /* * Watch out for an extremely rare case (bug 502714). The sequence of events is: @@ -4809,15 +4976,15 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom slots += addSlots; } if (slots < e1->numGlobalSlots) - fullMap.add(getGlobalTypeMap(e1) + slots, e1->numGlobalSlots - slots); + fullMap.add(GetGlobalTypeMap(e1) + slots, e1->numGlobalSlots - slots); JS_ASSERT(slots == e1->numGlobalSlots); } ngslots = e1->numGlobalSlots; typeMap = fullMap.data(); } JS_ASSERT(ngslots >= anchor->numGlobalSlots); - bool rv = js_StartRecorder(cx, anchor, c, (TreeInfo*)f->vmprivate, stackSlots, - ngslots, typeMap, exitedFrom, outer, cx->fp->argc); + bool rv = StartRecorder(cx, anchor, c, (TreeInfo*)f->vmprivate, stackSlots, + ngslots, typeMap, exitedFrom, outer, cx->fp->argc); #ifdef MOZ_TRACEVIS if (!rv && tvso) tvso->r = R_FAIL_EXTEND_START; @@ -4831,11 +4998,11 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom } static JS_REQUIRES_STACK VMSideExit* -js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, - VMSideExit** innermostNestedGuardp); +ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, + VMSideExit** innermostNestedGuardp); -JS_REQUIRES_STACK bool -js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) +static JS_REQUIRES_STACK bool +RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) { #ifdef JS_THREADSAFE if (OBJ_SCOPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain))->title.ownercx != cx) { @@ -4876,13 +5043,15 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) /* Make sure inner tree call will not run into an out-of-memory condition. */ if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) && - !js_ReplenishReservedPool(cx, tm)) { + !ReplenishReservedPool(cx, tm)) { js_AbortRecording(cx, "Couldn't call inner tree (out of memory)"); return false; } - /* Make sure the shape of the global object still matches (this might flush - the JIT cache). */ + /* + * Make sure the shape of the global object still matches (this might flush + * the JIT cache). + */ JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); uint32 globalShape = -1; SlotList* globalSlots = NULL; @@ -4918,13 +5087,13 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) return false; } } - return js_RecordTree(cx, tm, f, outer, outerArgc, globalObj, globalShape, globalSlots, argc); + return RecordTree(cx, tm, f, outer, outerArgc, globalObj, globalShape, globalSlots, argc); } r->adjustCallerTypes(f); r->prepareTreeCall(f); VMSideExit* innermostNestedGuard = NULL; - VMSideExit* lr = js_ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard); + VMSideExit* lr = ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard); if (!lr || r->wasDeepAborted()) { if (!lr) js_AbortRecording(cx, "Couldn't call inner tree"); @@ -4939,23 +5108,27 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) if (innermostNestedGuard) { js_AbortRecording(cx, "Inner tree took different side exit, abort current " "recording and grow nesting tree"); - return js_AttemptToExtendTree(cx, innermostNestedGuard, lr, outer); + return AttemptToExtendTree(cx, innermostNestedGuard, lr, outer); } - /* emit a call to the inner tree and continue recording the outer tree trace */ + + /* Emit a call to the inner tree and continue recording the outer tree trace. */ r->emitTreeCall(f, lr); return true; + case UNSTABLE_LOOP_EXIT: - /* abort recording so the inner loop can become type stable. */ + /* Abort recording so the inner loop can become type stable. */ js_AbortRecording(cx, "Inner tree is trying to stabilize, abort outer recording"); - return js_AttemptToStabilizeTree(cx, globalObj, lr, outer, outerFragment->argc); + return AttemptToStabilizeTree(cx, globalObj, lr, outer, outerFragment->argc); + case OVERFLOW_EXIT: oracle.markInstructionUndemotable(cx->fp->regs->pc); - /* fall through */ + /* FALL THROUGH */ case BRANCH_EXIT: case CASE_EXIT: - /* abort recording the outer tree, extend the inner tree */ + /* Abort recording the outer tree, extend the inner tree. */ js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording"); - return js_AttemptToExtendTree(cx, lr, NULL, outer); + return AttemptToExtendTree(cx, lr, NULL, outer); + default: debug_only_printf(LC_TMTracer, "exit_type=%s\n", getExitName(lr->exitType)); js_AbortRecording(cx, "Inner tree not suitable for calling"); @@ -4964,7 +5137,7 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) } static bool -js_IsEntryTypeCompatible(jsval* vp, JSTraceType* m) +IsEntryTypeCompatible(jsval* vp, JSTraceType* m) { unsigned tag = JSVAL_TAG(*vp); @@ -5040,7 +5213,7 @@ public: JS_REQUIRES_STACK JS_ALWAYS_INLINE void visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) { debug_only_printf(LC_TMTracer, "global%d=", n); - if (!js_IsEntryTypeCompatible(vp, mTypeMap)) { + if (!IsEntryTypeCompatible(vp, mTypeMap)) { mOk = false; } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { oracle.markGlobalSlotUndemotable(mCx, slot); @@ -5055,7 +5228,7 @@ public: visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) { for (size_t i = 0; i < count; ++i) { debug_only_printf(LC_TMTracer, "%s%u=", stackSlotKind(), unsigned(i)); - if (!js_IsEntryTypeCompatible(vp, mTypeMap)) { + if (!IsEntryTypeCompatible(vp, mTypeMap)) { mOk = false; } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) { oracle.markStackSlotUndemotable(mCx, mStackSlotNum); @@ -5093,15 +5266,16 @@ TraceRecorder::findNestedCompatiblePeer(Fragment* f) debug_only_printf(LC_TMTracer, "checking nested types %p: ", (void*)f); if (ngslots > ti->nGlobalTypes()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); /* - * Determine whether the typemap of the inner tree matches the outer tree's - * current state. If the inner tree expects an integer, but the outer tree - * doesn't guarantee an integer for that slot, we mark the slot undemotable - * and mismatch here. This will force a new tree to be compiled that accepts - * a double for the slot. If the inner tree expects a double, but the outer - * tree has an integer, we can proceed, but we mark the location undemotable. + * Determine whether the typemap of the inner tree matches the outer + * tree's current state. If the inner tree expects an integer, but the + * outer tree doesn't guarantee an integer for that slot, we mark the + * slot undemotable and mismatch here. This will force a new tree to be + * compiled that accepts a double for the slot. If the inner tree + * expects a double, but the outer tree has an integer, we can proceed, + * but we mark the location undemotable. */ TypeCompatibilityVisitor visitor(*this, ti->typeMap.data()); VisitSlots(visitor, cx, 0, *treeInfo->globalSlots); @@ -5127,7 +5301,7 @@ public: JS_ALWAYS_INLINE void checkSlot(jsval *vp, char const *name, int i) { debug_only_printf(LC_TMTracer, "%s%d=", name, i); JS_ASSERT(*(uint8_t*)mTypeMap != 0xCD); - mOk = js_IsEntryTypeCompatible(vp, mTypeMap++); + mOk = IsEntryTypeCompatible(vp, mTypeMap++); } JS_REQUIRES_STACK JS_ALWAYS_INLINE void @@ -5159,16 +5333,16 @@ public: * @return True if compatible (with or without demotions), false otherwise. */ static JS_REQUIRES_STACK bool -js_CheckEntryTypes(JSContext* cx, JSObject* globalObj, TreeInfo* ti) +CheckEntryTypes(JSContext* cx, JSObject* globalObj, TreeInfo* ti) { unsigned int ngslots = ti->globalSlots->length(); - JS_ASSERT(ti->nStackTypes == js_NativeStackSlots(cx, 0)); + JS_ASSERT(ti->nStackTypes == NativeStackSlots(cx, 0)); if (ngslots > ti->nGlobalTypes()) - specializeTreesToMissingGlobals(cx, globalObj, ti); + SpecializeTreesToMissingGlobals(cx, globalObj, ti); - JS_ASSERT(ti->typeMap.length() == js_NativeStackSlots(cx, 0) + ngslots); + JS_ASSERT(ti->typeMap.length() == NativeStackSlots(cx, 0) + ngslots); JS_ASSERT(ti->typeMap.length() == ti->nStackTypes + ngslots); JS_ASSERT(ti->nGlobalTypes() == ngslots); @@ -5189,7 +5363,7 @@ js_CheckEntryTypes(JSContext* cx, JSObject* globalObj, TreeInfo* ti) * @out count Number of fragments consulted. */ static JS_REQUIRES_STACK Fragment* -js_FindVMCompatiblePeer(JSContext* cx, JSObject* globalObj, Fragment* f, uintN& count) +FindVMCompatiblePeer(JSContext* cx, JSObject* globalObj, Fragment* f, uintN& count) { count = 0; for (; f != NULL; f = f->peer) { @@ -5197,7 +5371,7 @@ js_FindVMCompatiblePeer(JSContext* cx, JSObject* globalObj, Fragment* f, uintN& continue; debug_only_printf(LC_TMTracer, "checking vm types %p (ip: %p): ", (void*)f, f->ip); - if (js_CheckEntryTypes(cx, globalObj, (TreeInfo*)f->vmprivate)) + if (CheckEntryTypes(cx, globalObj, (TreeInfo*)f->vmprivate)) return f; ++count; } @@ -5207,12 +5381,9 @@ js_FindVMCompatiblePeer(JSContext* cx, JSObject* globalObj, Fragment* f, uintN& static void LeaveTree(InterpState&, VMSideExit* lr); -/** - * Executes a tree. - */ static JS_REQUIRES_STACK VMSideExit* -js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, - VMSideExit** innermostNestedGuardp) +ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, + VMSideExit** innermostNestedGuardp) { #ifdef MOZ_TRACEVIS TraceVisStateObj tvso(S_EXECUTE); @@ -5228,8 +5399,10 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, unsigned globalFrameSize = STOBJ_NSLOTS(globalObj); /* Make sure the global object is sane. */ - JS_ASSERT(!ngslots || (OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain)) == - ((VMFragment*)f)->globalShape)); + JS_ASSERT_IF(ngslots != 0, + OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain)) == + ((VMFragment*)f)->globalShape); + /* Make sure our caller replenished the double pool. */ JS_ASSERT(tm->reservedDoublePoolPtr >= tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS); @@ -5241,7 +5414,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, uintN savedProhibitFlush = JS_TRACE_MONITOR(cx).prohibitFlush; #endif - /* Setup the interpreter state block, which is followed by the native global frame. */ + /* Set up the interpreter state block, which is followed by the native global frame. */ InterpState* state = (InterpState*)alloca(sizeof(InterpState) + (globalFrameSize+1)*sizeof(double)); state->cx = cx; state->inlineCallCountp = &inlineCallCount; @@ -5252,16 +5425,16 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, state->rpAtLastTreeCall = NULL; state->builtinStatus = 0; - /* Setup the native global frame. */ + /* Set up the native global frame. */ double* global = (double*)(state+1); - /* Setup the native stack frame. */ + /* Set up the native stack frame. */ double stack_buffer[MAX_NATIVE_STACK_SLOTS]; state->stackBase = stack_buffer; state->sp = stack_buffer + (ti->nativeStackBase/sizeof(double)); state->eos = stack_buffer + MAX_NATIVE_STACK_SLOTS; - /* Setup the native call stack frame. */ + /* Set up the native call stack frame. */ FrameInfo* callstack_buffer[MAX_CALL_STACK_ENTRIES]; state->callstackBase = callstack_buffer; state->rp = callstack_buffer; @@ -5289,7 +5462,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, f->code()); JS_ASSERT(ti->nGlobalTypes() == ngslots); - BuildNativeFrame(cx, globalObj, 0/*callDepth*/, ngslots, gslots, + BuildNativeFrame(cx, globalObj, 0 /* callDepth */, ngslots, gslots, ti->typeMap.data(), global, stack_buffer); union { NIns *code; GuardRecord* (FASTCALL *func)(InterpState*, Fragment*); } u; @@ -5306,6 +5479,7 @@ js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount, debug_only_stmt(fflush(NULL)); GuardRecord* rec; + // Note that the block scoping is crucial here for TraceVis; the // TraceVisStateObj constructors and destructors must run at the right times. { @@ -5340,34 +5514,46 @@ LeaveTree(InterpState& state, VMSideExit* lr) FrameInfo** callstack = state.callstackBase; double* stack = state.stackBase; - /* Except if we find that this is a nested bailout, the guard the call returned is the - one we have to use to adjust pc and sp. */ + /* + * Except if we find that this is a nested bailout, the guard the call + * returned is the one we have to use to adjust pc and sp. + */ VMSideExit* innermost = lr; - /* While executing a tree we do not update state.sp and state.rp even if they grow. Instead, - guards tell us by how much sp and rp should be incremented in case of a side exit. When - calling a nested tree, however, we actively adjust sp and rp. If we have such frames - from outer trees on the stack, then rp will have been adjusted. Before we can process - the stack of the frames of the tree we directly exited from, we have to first work our - way through the outer frames and generate interpreter frames for them. Once the call - stack (rp) is empty, we can process the final frames (which again are not directly - visible and only the guard we exited on will tells us about). */ + /* + * While executing a tree we do not update state.sp and state.rp even if + * they grow. Instead, guards tell us by how much sp and rp should be + * incremented in case of a side exit. When calling a nested tree, however, + * we actively adjust sp and rp. If we have such frames from outer trees on + * the stack, then rp will have been adjusted. Before we can process the + * stack of the frames of the tree we directly exited from, we have to + * first work our way through the outer frames and generate interpreter + * frames for them. Once the call stack (rp) is empty, we can process the + * final frames (which again are not directly visible and only the guard we + * exited on will tells us about). + */ FrameInfo** rp = (FrameInfo**)state.rp; if (lr->exitType == NESTED_EXIT) { VMSideExit* nested = state.lastTreeCallGuard; if (!nested) { - /* If lastTreeCallGuard is not set in state, we only have a single level of - nesting in this exit, so lr itself is the innermost and outermost nested - guard, and hence we set nested to lr. The calldepth of the innermost guard - is not added to state.rp, so we do it here manually. For a nesting depth - greater than 1 the CallTree builtin already added the innermost guard's - calldepth to state.rpAtLastTreeCall. */ + /* + * If lastTreeCallGuard is not set in state, we only have a single + * level of nesting in this exit, so lr itself is the innermost and + * outermost nested guard, and hence we set nested to lr. The + * calldepth of the innermost guard is not added to state.rp, so we + * do it here manually. For a nesting depth greater than 1 the + * CallTree builtin already added the innermost guard's calldepth + * to state.rpAtLastTreeCall. + */ nested = lr; rp += lr->calldepth; } else { - /* During unwinding state.rp gets overwritten at every step and we restore - it here to its state at the innermost nested guard. The builtin already - added the calldepth of that innermost guard to rpAtLastTreeCall. */ + /* + * During unwinding state.rp gets overwritten at every step and we + * restore it here to its state at the innermost nested guard. The + * builtin already added the calldepth of that innermost guard to + * rpAtLastTreeCall. + */ rp = (FrameInfo**)state.rpAtLastTreeCall; } innermost = state.lastTreeExitGuard; @@ -5408,7 +5594,7 @@ LeaveTree(InterpState& state, VMSideExit* lr) * then immediately flunked the guard on state->builtinStatus. * * Now LeaveTree has been called again from the tail of - * js_ExecuteTree. We are about to return to the interpreter. Adjust + * ExecuteTree. We are about to return to the interpreter. Adjust * the top stack frame to resume on the next op. */ jsbytecode *pc = cx->fp->regs->pc; @@ -5426,7 +5612,7 @@ LeaveTree(InterpState& state, VMSideExit* lr) * The return value was not available when we reconstructed the stack, * but we have it now. Box it. */ - JSTraceType* typeMap = getStackTypeMap(innermost); + JSTraceType* typeMap = GetStackTypeMap(innermost); /* * If there's a tree call around the point that we deep exited at, @@ -5448,9 +5634,11 @@ LeaveTree(InterpState& state, VMSideExit* lr) JS_ARENA_RELEASE(&cx->stackPool, state.stackMark); while (callstack < rp) { - /* Synthesize a stack frame and write out the values in it using the type map pointer - on the native call stack. */ - js_SynthesizeFrame(cx, **callstack); + /* + * Synthesize a stack frame and write out the values in it using the + * type map pointer on the native call stack. + */ + SynthesizeFrame(cx, **callstack); int slots = FlushNativeStackFrame(cx, 1 /* callDepth */, (JSTraceType*)(*callstack + 1), stack, cx->fp); #ifdef DEBUG @@ -5462,20 +5650,25 @@ LeaveTree(InterpState& state, VMSideExit* lr) FramePCOffset(fp), slots); #endif - /* Keep track of the additional frames we put on the interpreter stack and the native - stack slots we consumed. */ + /* + * Keep track of the additional frames we put on the interpreter stack + * and the native stack slots we consumed. + */ ++*state.inlineCallCountp; ++callstack; stack += slots; } - /* We already synthesized the frames around the innermost guard. Here we just deal - with additional frames inside the tree we are bailing out from. */ + /* + * We already synthesized the frames around the innermost guard. Here we + * just deal with additional frames inside the tree we are bailing out + * from. + */ JS_ASSERT(rp == callstack); unsigned calldepth = innermost->calldepth; unsigned calldepth_slots = 0; for (unsigned n = 0; n < calldepth; ++n) { - calldepth_slots += js_SynthesizeFrame(cx, *callstack[n]); + calldepth_slots += SynthesizeFrame(cx, *callstack[n]); ++*state.inlineCallCountp; #ifdef DEBUG JSStackFrame* fp = cx->fp; @@ -5486,16 +5679,21 @@ LeaveTree(InterpState& state, VMSideExit* lr) #endif } - /* Adjust sp and pc relative to the tree we exited from (not the tree we entered into). - These are our final values for sp and pc since js_SynthesizeFrame has already taken - care of all frames in between. But first we recover fp->blockChain, which comes from - the side exit struct. */ + /* + * Adjust sp and pc relative to the tree we exited from (not the tree we + * entered into). These are our final values for sp and pc since + * SynthesizeFrame has already taken care of all frames in between. But + * first we recover fp->blockChain, which comes from the side exit + * struct. + */ JSStackFrame* fp = cx->fp; fp->blockChain = innermost->block; - /* If we are not exiting from an inlined frame the state->sp is spbase, otherwise spbase - is whatever slots frames around us consume. */ + /* + * If we are not exiting from an inlined frame, the state->sp is spbase. + * Otherwise spbase is whatever slots frames around us consume. + */ fp->regs->pc = innermost->pc; fp->imacpc = innermost->imacpc; fp->regs->sp = StackBase(fp) + (innermost->sp_adj / sizeof(double)) - calldepth_slots; @@ -5522,47 +5720,54 @@ LeaveTree(InterpState& state, VMSideExit* lr) calldepth, cycles); - /* If this trace is part of a tree, later branches might have added additional globals for - which we don't have any type information available in the side exit. We merge in this - information from the entry type-map. See also comment in the constructor of TraceRecorder - why this is always safe to do. */ + /* + * If this trace is part of a tree, later branches might have added + * additional globals for which we don't have any type information + * available in the side exit. We merge in this information from the entry + * type-map. See also the comment in the constructor of TraceRecorder + * regarding why this is always safe to do. + */ TreeInfo* outermostTree = state.outermostTree; uint16* gslots = outermostTree->globalSlots->data(); unsigned ngslots = outermostTree->globalSlots->length(); JS_ASSERT(ngslots == outermostTree->nGlobalTypes()); JSTraceType* globalTypeMap; - /* Are there enough globals? This is the ideal fast path. */ + /* Are there enough globals? */ if (innermost->numGlobalSlots == ngslots) { - globalTypeMap = getGlobalTypeMap(innermost); - /* Otherwise, merge the typemap of the innermost entry and exit together. This should always - work because it is invalid for nested trees or linked trees to have incompatible types. - Thus, whenever a new global type is lazily added into a tree, all dependent and linked - trees are immediately specialized (see bug 476653). */ + /* Yes. This is the ideal fast path. */ + globalTypeMap = GetGlobalTypeMap(innermost); } else { + /* + * No. Merge the typemap of the innermost entry and exit together. This + * should always work because it is invalid for nested trees or linked + * trees to have incompatible types. Thus, whenever a new global type + * is lazily added into a tree, all dependent and linked trees are + * immediately specialized (see bug 476653). + */ TreeInfo* ti = (TreeInfo*)innermost->from->root->vmprivate; JS_ASSERT(ti->nGlobalTypes() == ngslots); JS_ASSERT(ti->nGlobalTypes() > innermost->numGlobalSlots); globalTypeMap = (JSTraceType*)alloca(ngslots * sizeof(JSTraceType)); - memcpy(globalTypeMap, getGlobalTypeMap(innermost), innermost->numGlobalSlots); + memcpy(globalTypeMap, GetGlobalTypeMap(innermost), innermost->numGlobalSlots); memcpy(globalTypeMap + innermost->numGlobalSlots, ti->globalTypeMap() + innermost->numGlobalSlots, ti->nGlobalTypes() - innermost->numGlobalSlots); } - /* write back native stack frame */ + /* Write back the topmost native stack frame. */ #ifdef DEBUG int slots = #endif FlushNativeStackFrame(cx, innermost->calldepth, - getStackTypeMap(innermost), + GetStackTypeMap(innermost), stack, NULL); JS_ASSERT(unsigned(slots) == innermost->numStackSlots); if (innermost->nativeCalleeWord) SynthesizeSlowNativeFrame(cx, innermost); - /* write back interned globals */ + /* Write back interned globals. */ double* global = (double*)(&state + 1); FlushNativeGlobalFrame(cx, global, ngslots, gslots, globalTypeMap); @@ -5572,7 +5777,7 @@ LeaveTree(InterpState& state, VMSideExit* lr) cx->nativeVp = NULL; #ifdef DEBUG - // Verify that our state restoration worked. + /* Verify that our state restoration worked. */ for (JSStackFrame* fp = cx->fp; fp; fp = fp->down) { JS_ASSERT_IF(fp->callee, JSVAL_IS_OBJECT(fp->argv[-1])); } @@ -5600,18 +5805,19 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) if (tm->recorder) { jsbytecode* innerLoopHeaderPC = cx->fp->regs->pc; - if (js_RecordLoopEdge(cx, tm->recorder, inlineCallCount)) + if (RecordLoopEdge(cx, tm->recorder, inlineCallCount)) return true; /* - * js_RecordLoopEdge will invoke an inner tree if we have a matching one. If we - * arrive here, that tree didn't run to completion and instead we mis-matched - * or the inner tree took a side exit other than the loop exit. We are thus - * no longer guaranteed to be parked on the same loop header js_MonitorLoopEdge - * was called for. In fact, this might not even be a loop header at all. Hence - * if the program counter no longer hovers over the inner loop header, return to - * the interpreter and do not attempt to trigger or record a new tree at this - * location. + * RecordLoopEdge will invoke an inner tree if we have a matching + * one. If we arrive here, that tree didn't run to completion and + * instead we mis-matched or the inner tree took a side exit other than + * the loop exit. We are thus no longer guaranteed to be parked on the + * same loop header js_MonitorLoopEdge was called for. In fact, this + * might not even be a loop header at all. Hence if the program counter + * no longer hovers over the inner loop header, return to the + * interpreter and do not attempt to trigger or record a new tree at + * this location. */ if (innerLoopHeaderPC != cx->fp->regs->pc) { #ifdef MOZ_TRACEVIS @@ -5624,20 +5830,23 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) /* Check the pool of reserved doubles (this might trigger a GC). */ if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) && - !js_ReplenishReservedPool(cx, tm)) { + !ReplenishReservedPool(cx, tm)) { #ifdef MOZ_TRACEVIS tvso.r = R_DOUBLES; #endif return false; /* Out of memory, don't try to record now. */ } - /* Make sure the shape of the global object still matches (this might flush the JIT cache). */ + /* + * Make sure the shape of the global object still matches (this might flush + * the JIT cache). + */ JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); uint32 globalShape = -1; SlotList* globalSlots = NULL; if (!CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots)) { - js_Backoff(cx, cx->fp->regs->pc); + Backoff(cx, cx->fp->regs->pc); return false; } @@ -5664,8 +5873,10 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) return false; } - /* If we have no code in the anchor and no peers, we definitively won't be able to - activate any trees so, start compiling. */ + /* + * If we have no code in the anchor and no peers, we definitively won't be + * able to activate any trees, so start compiling. + */ if (!f->code() && !f->peer) { record: if (++f->hits() < HOTLOOP) { @@ -5674,10 +5885,14 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) #endif return false; } - /* We can give RecordTree the root peer. If that peer is already taken, it will - walk the peer list and find us a free slot or allocate a new tree if needed. */ - bool rv = js_RecordTree(cx, tm, f->first, NULL, 0, globalObj, globalShape, - globalSlots, argc); + + /* + * We can give RecordTree the root peer. If that peer is already taken, + * it will walk the peer list and find us a free slot or allocate a new + * tree if needed. + */ + bool rv = RecordTree(cx, tm, f->first, NULL, 0, globalObj, globalShape, + globalSlots, argc); #ifdef MOZ_TRACEVIS if (!rv) tvso.r = R_FAIL_RECORD_TREE; @@ -5691,14 +5906,17 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) FramePCOffset(cx->fp), (void*)f, f->ip); uintN count; - Fragment* match = js_FindVMCompatiblePeer(cx, globalObj, f, count); + Fragment* match = FindVMCompatiblePeer(cx, globalObj, f, count); if (!match) { if (count < MAXPEERS) goto record; - /* If we hit the max peers ceiling, don't try to lookup fragments all the time. Thats - expensive. This must be a rather type-unstable loop. */ + + /* + * If we hit the max peers ceiling, don't try to lookup fragments all + * the time. That's expensive. This must be a rather type-unstable loop. + */ debug_only_print0(LC_TMTracer, "Blacklisted: too many peer trees.\n"); - js_Blacklist((jsbytecode*) f->root->ip); + Blacklist((jsbytecode*) f->root->ip); #ifdef MOZ_TRACEVIS tvso.r = R_MAX_PEERS; #endif @@ -5708,7 +5926,7 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) VMSideExit* lr = NULL; VMSideExit* innermostNestedGuard = NULL; - lr = js_ExecuteTree(cx, match, inlineCallCount, &innermostNestedGuard); + lr = ExecuteTree(cx, match, inlineCallCount, &innermostNestedGuard); if (!lr) { #ifdef MOZ_TRACEVIS tvso.r = R_FAIL_EXECUTE_TREE; @@ -5716,31 +5934,35 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) return false; } - /* If we exit on a branch, or on a tree call guard, try to grow the inner tree (in case - of a branch exit), or the tree nested around the tree we exited from (in case of the - tree call guard). */ + /* + * If we exit on a branch, or on a tree call guard, try to grow the inner + * tree (in case of a branch exit), or the tree nested around the tree we + * exited from (in case of the tree call guard). + */ bool rv; switch (lr->exitType) { case UNSTABLE_LOOP_EXIT: - rv = js_AttemptToStabilizeTree(cx, globalObj, lr, NULL, NULL); + rv = AttemptToStabilizeTree(cx, globalObj, lr, NULL, NULL); #ifdef MOZ_TRACEVIS if (!rv) tvso.r = R_FAIL_STABILIZE; #endif return rv; + case OVERFLOW_EXIT: oracle.markInstructionUndemotable(cx->fp->regs->pc); - /* fall through */ + /* FALL THROUGH */ case BRANCH_EXIT: case CASE_EXIT: - return js_AttemptToExtendTree(cx, lr, NULL, NULL + return AttemptToExtendTree(cx, lr, NULL, NULL #ifdef MOZ_TRACEVIS , &tvso #endif ); + case LOOP_EXIT: if (innermostNestedGuard) - return js_AttemptToExtendTree(cx, innermostNestedGuard, lr, NULL + return AttemptToExtendTree(cx, innermostNestedGuard, lr, NULL #ifdef MOZ_TRACEVIS , &tvso #endif @@ -5749,6 +5971,7 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) tvso.r = R_NO_EXTEND_OUTER; #endif return false; + #ifdef MOZ_TRACEVIS case MISMATCH_EXIT: tvso.r = R_MISMATCH_EXIT; return false; case OOM_EXIT: tvso.r = R_OOM_EXIT; return false; @@ -5756,8 +5979,12 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount) case DEEP_BAIL_EXIT: tvso.r = R_DEEP_BAIL_EXIT; return false; case STATUS_EXIT: tvso.r = R_STATUS_EXIT; return false; #endif + default: - /* No, this was an unusual exit (i.e. out of memory/GC), so just resume interpretation. */ + /* + * No, this was an unusual exit (i.e. out of memory/GC), so just resume + * interpretation. + */ #ifdef MOZ_TRACEVIS tvso.r = R_OTHER_EXIT; #endif @@ -5795,9 +6022,12 @@ TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op) } ) - /* If op is not a break or a return from a loop, continue recording and follow the - trace. We check for imacro-calling bytecodes inside each switch case to resolve - the if (JSOP_IS_IMACOP(x)) conditions at compile time. */ + /* + * If op is not a break or a return from a loop, continue recording and + * follow the trace. We check for imacro-calling bytecodes inside each + * switch case to resolve the if (JSOP_IS_IMACOP(x)) conditions at compile + * time. + */ JSRecordingStatus status; #ifdef DEBUG @@ -5845,7 +6075,7 @@ TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op) stop_recording: /* If we recorded the end of the trace, destroy the recorder now. */ if (tr->fragment->lastIns) { - js_DeleteRecorder(cx); + DeleteRecorder(cx); return status; } @@ -5865,13 +6095,13 @@ js_AbortRecording(JSContext* cx, const char* reason) Fragment* f = tm->recorder->getFragment(); /* - * If the recorder already had its fragment disposed, or we actually finished - * recording and this recorder merely is passing through the deep abort state - * to the next recorder on the stack, just destroy the recorder. There is - * nothing to abort. + * If the recorder already had its fragment disposed, or we actually + * finished recording and this recorder merely is passing through the deep + * abort state to the next recorder on the stack, just destroy the + * recorder. There is nothing to abort. */ if (!f || f->lastIns) { - js_DeleteRecorder(cx); + DeleteRecorder(cx); return; } @@ -5889,12 +6119,10 @@ js_AbortRecording(JSContext* cx, const char* reason) reason); #endif - js_Backoff(cx, (jsbytecode*) f->root->ip, f->root); + Backoff(cx, (jsbytecode*) f->root->ip, f->root); - /* - * If js_DeleteRecorder flushed the code cache, we can't rely on f any more. - */ - if (!js_DeleteRecorder(cx)) + /* If DeleteRecorder flushed the code cache, we can't rely on f any more. */ + if (!DeleteRecorder(cx)) return; /* @@ -5902,12 +6130,12 @@ js_AbortRecording(JSContext* cx, const char* reason) * TreeInfo object. */ if (!f->code() && (f->root == f)) - js_TrashTree(cx, f); + TrashTree(cx, f); } #if defined NANOJIT_IA32 static bool -js_CheckForSSE2() +CheckForSSE2() { int features = 0; #if defined _MSC_VER @@ -6147,9 +6375,9 @@ void js_InitJIT(JSTraceMonitor *tm) { #if defined JS_JIT_SPEW - /* Set up debug logging */ + /* Set up debug logging. */ if (!did_we_set_up_debug_logging) { - js_InitJITLogController(); + InitJITLogController(); did_we_set_up_debug_logging = true; } #else @@ -6159,7 +6387,7 @@ js_InitJIT(JSTraceMonitor *tm) if (!did_we_check_processor_features) { #if defined NANOJIT_IA32 avmplus::AvmCore::config.use_cmov = - avmplus::AvmCore::config.sse2 = js_CheckForSSE2(); + avmplus::AvmCore::config.sse2 = CheckForSSE2(); #endif #if defined NANOJIT_ARM bool arm_vfp = js_arm_check_vfp(); @@ -6186,9 +6414,7 @@ js_InitJIT(JSTraceMonitor *tm) did_we_check_processor_features = true; } - /* - * Set the default size for the code cache to 16MB. - */ + /* Set the default size for the code cache to 16MB. */ tm->maxCodeCacheBytes = 16 M; if (!tm->recordAttempts.ops) { @@ -6262,7 +6488,7 @@ js_FinishJIT(JSTraceMonitor *tm) for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) { VMFragment* f = tm->vmfragments[i]; - while(f) { + while (f) { VMFragment* next = f->next; tm->fragmento->clearFragment(f); f = next; @@ -6314,9 +6540,7 @@ js_PurgeJITOracle() } static JSDHashOperator -js_PurgeScriptRecordingAttempts(JSDHashTable *table, - JSDHashEntryHdr *hdr, - uint32 number, void *arg) +PurgeScriptRecordingAttempts(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number, void *arg) { PCHashEntry *e = (PCHashEntry *)hdr; JSScript *script = (JSScript *)arg; @@ -6327,19 +6551,17 @@ js_PurgeScriptRecordingAttempts(JSDHashTable *table, return JS_DHASH_NEXT; } -/* - * Call 'action' for each root fragment created for 'script'. - */ +/* Call 'action' for each root fragment created for 'script'. */ template static void -js_IterateScriptFragments(JSContext* cx, JSScript* script, FragmentAction action) +IterateScriptFragments(JSContext* cx, JSScript* script, FragmentAction action) { JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) { for (VMFragment **f = &(tm->vmfragments[i]); *f; ) { VMFragment* frag = *f; - /* Disable future use of any script-associated VMFragment.*/ if (JS_UPTRDIFF(frag->ip, script->code) < script->length) { + /* This fragment is associated with the script. */ JS_ASSERT(frag->root == frag); VMFragment* next = frag->next; if (action(cx, tm, frag)) { @@ -6360,15 +6582,15 @@ js_IterateScriptFragments(JSContext* cx, JSScript* script, FragmentAction action } static bool -trashTreeAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) +TrashTreeAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) { for (Fragment *p = frag; p; p = p->peer) - js_TrashTree(cx, p); + TrashTree(cx, p); return false; } static bool -clearFragmentAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) +ClearFragmentAction(JSContext* cx, JSTraceMonitor* tm, Fragment* frag) { tm->fragmento->clearFragment(frag); return true; @@ -6381,16 +6603,15 @@ js_PurgeScriptFragments(JSContext* cx, JSScript* script) return; debug_only_printf(LC_TMTracer, "Purging fragments for JSScript %p.\n", (void*)script); - /* - * js_TrashTree trashes dependent trees recursively, so we must do all the trashing - * before clearing in order to avoid calling js_TrashTree with a deleted fragment. - */ - js_IterateScriptFragments(cx, script, trashTreeAction); - js_IterateScriptFragments(cx, script, clearFragmentAction); - JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); - JS_DHashTableEnumerate(&(tm->recordAttempts), - js_PurgeScriptRecordingAttempts, script); + /* + * TrashTree trashes dependent trees recursively, so we must do all the trashing + * before clearing in order to avoid calling TrashTree with a deleted fragment. + */ + IterateScriptFragments(cx, script, TrashTreeAction); + IterateScriptFragments(cx, script, ClearFragmentAction); + JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx); + JS_DHashTableEnumerate(&(tm->recordAttempts), PurgeScriptRecordingAttempts, script); } bool @@ -6502,9 +6723,9 @@ TraceRecorder::scopeChain() const } /* - * Return the frame of a call object if that frame is part of the current trace. |depthp| is an - * optional outparam: if it is non-null, it will be filled in with the depth of the call object's - * frame relevant to cx->fp. + * Return the frame of a call object if that frame is part of the current + * trace. |depthp| is an optional outparam: if it is non-null, it will be + * filled in with the depth of the call object's frame relevant to cx->fp. */ JS_REQUIRES_STACK JSStackFrame* TraceRecorder::frameIfInRange(JSObject* obj, unsigned* depthp) const @@ -6523,22 +6744,22 @@ TraceRecorder::frameIfInRange(JSObject* obj, unsigned* depthp) const return NULL; } -JS_DEFINE_CALLINFO_6(extern, UINT32, js_GetClosureVar, CONTEXT, OBJECT, UINT32, +JS_DEFINE_CALLINFO_6(extern, UINT32, GetClosureVar, CONTEXT, OBJECT, UINT32, UINT32, UINT32, DOUBLEPTR, 0, 0) -JS_DEFINE_CALLINFO_6(extern, UINT32, js_GetClosureArg, CONTEXT, OBJECT, UINT32, +JS_DEFINE_CALLINFO_6(extern, UINT32, GetClosureArg, CONTEXT, OBJECT, UINT32, UINT32, UINT32, DOUBLEPTR, 0, 0) /* - * Search the scope chain for a property lookup operation at the current PC and generate LIR - * to access the given property. Return JSRS_CONTINUE on success, otherwise abort and return - * JSRS_STOP. There are 3 outparams: + * Search the scope chain for a property lookup operation at the current PC and + * generate LIR to access the given property. Return JSRS_CONTINUE on success, + * otherwise abort and return JSRS_STOP. There are 3 outparams: * * vp the address of the current property value * ins LIR instruction representing the property value on trace - * tracked true iff the property value is tracked on this trace. If true, then the - * tracked value can be modified using the tracker set functions. If false, - * then the value comes from a call to a builtin to access an upvar, and - * can not be modified directly. + * tracked true iff the property value is tracked on this trace. If true, + * then the tracked value can be modified using the tracker set + * functions. If false, then the value comes from a call to a + * builtin to access an upvar, and can't be modified directly. */ JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::scopeChainProp(JSObject* obj, jsval*& vp, LIns*& ins, bool& tracked) @@ -6632,9 +6853,9 @@ TraceRecorder::scopeChainProp(JSObject* obj, jsval*& vp, LIns*& ins, bool& track }; const CallInfo* ci; if (sprop->getter == js_GetCallArg) - ci = &js_GetClosureArg_ci; + ci = &GetClosureArg_ci; else - ci = &js_GetClosureVar_ci; + ci = &GetClosureVar_ci; LIns* call_ins = lir->insCall(ci, args); JSTraceType type = getCoercedType(*vp); @@ -6689,8 +6910,6 @@ TraceRecorder::stack(int n, LIns* i) set(&stackval(n), i, n >= 0); } -extern jsdouble FASTCALL js_dmod(jsdouble a, jsdouble b); - JS_REQUIRES_STACK LIns* TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) { @@ -6763,36 +6982,30 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) exit = snapshot(OVERFLOW_EXIT); - /* - * Make sure we don't trigger division by zero at runtime. - */ + /* Make sure we don't trigger division by zero at runtime. */ if (!d1->isconst()) guard(false, lir->ins_eq0(d1), exit); result = lir->ins2(v = LIR_div, d0, d1); - /* - * As long the modulus is zero, the result is an integer. - */ + /* As long the modulus is zero, the result is an integer. */ guard(true, lir->ins_eq0(lir->ins1(LIR_mod, result)), exit); - /* Don't lose a -0 */ + + /* Don't lose a -0. */ guard(false, lir->ins_eq0(result), exit); break; + case LIR_fmod: { if (d0->isconst() && d1->isconst()) return lir->ins1(LIR_i2f, lir->insImm(jsint(r))); exit = snapshot(OVERFLOW_EXIT); - /* - * Make sure we don't trigger division by zero at runtime. - */ + /* Make sure we don't trigger division by zero at runtime. */ if (!d1->isconst()) guard(false, lir->ins_eq0(d1), exit); result = lir->ins1(v = LIR_mod, lir->ins2(LIR_div, d0, d1)); - /* - * If the result is not 0, it is always within the integer domain. - */ + /* If the result is not 0, it is always within the integer domain. */ LIns* branch = lir->insBranch(LIR_jf, lir->ins_eq0(result), NULL); /* @@ -6804,6 +7017,7 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) break; } #endif + default: v = (LOpcode)((int)v & ~LIR64); result = lir->ins2(v, d0, d1); @@ -6815,7 +7029,7 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) * that will inform the oracle and cause a non-demoted trace to be * attached that uses floating-point math for this operation. */ - if (!result->isconst() && (!overflowSafe(v, d0) || !overflowSafe(v, d1))) { + if (!result->isconst() && (!IsOverflowSafe(v, d0) || !IsOverflowSafe(v, d1))) { exit = snapshot(OVERFLOW_EXIT); guard(false, lir->ins1(LIR_ov, result), exit); if (v == LIR_mul) // make sure we don't lose a -0 @@ -6934,9 +7148,11 @@ TraceRecorder::ifop() } #ifdef NANOJIT_IA32 -/* Record LIR for a tableswitch or tableswitchx op. We record LIR only the - "first" time we hit the op. Later, when we start traces after exiting that - trace, we just patch. */ +/* + * Record LIR for a tableswitch or tableswitchx op. We record LIR only the + * "first" time we hit the op. Later, when we start traces after exiting that + * trace, we just patch. + */ JS_REQUIRES_STACK LIns* TraceRecorder::tableswitch() { @@ -6944,7 +7160,7 @@ TraceRecorder::tableswitch() if (!isNumber(v)) return NULL; - /* no need to guard if condition is constant */ + /* No need to guard if the condition is constant. */ LIns* v_ins = f2i(get(&v)); if (v_ins->isconst() || v_ins->isconstq()) return NULL; @@ -6971,11 +7187,15 @@ TraceRecorder::tableswitch() high = GET_JUMPX_OFFSET(pc); } - /* Really large tables won't fit in a page. This is a conservative check. - If it matters in practice we need to go off-page. */ + /* + * Really large tables won't fit in a page. This is a conservative check. + * If it matters in practice we need to go off-page. + */ if ((high + 1 - low) * sizeof(intptr_t*) + 128 > (unsigned) LARGEST_UNDERRUN_PROT) { - // This throws away the return value of switchop but it seems - // ok because switchop always returns true. + /* + * This throws away the return value of switchop but it seems ok + * because switchop always returns true. + */ (void) switchop(); return NULL; } @@ -7001,7 +7221,8 @@ TraceRecorder::switchop() { jsval& v = stackval(-1); LIns* v_ins = get(&v); - /* no need to guard if condition is constant */ + + /* No need to guard if the condition is constant. */ if (v_ins->isconst() || v_ins->isconstq()) return JSRS_CONTINUE; if (isNumber(v)) { @@ -7037,8 +7258,8 @@ TraceRecorder::inc(jsval& v, jsint incr, bool pre) } /* - * On exit, v_ins is the incremented unboxed value, and the appropriate - * value (pre- or post-increment as described by pre) is stacked. + * On exit, v_ins is the incremented unboxed value, and the appropriate value + * (pre- or post-increment as described by pre) is stacked. */ JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::inc(jsval& v, LIns*& v_ins, jsint incr, bool pre) @@ -7106,7 +7327,7 @@ TraceRecorder::incElem(jsint incr, bool pre) } static bool -evalCmp(LOpcode op, double l, double r) +EvalCmp(LOpcode op, double l, double r) { bool cond; switch (op) { @@ -7133,11 +7354,11 @@ evalCmp(LOpcode op, double l, double r) } static bool -evalCmp(LOpcode op, JSString* l, JSString* r) +EvalCmp(LOpcode op, JSString* l, JSString* r) { if (op == LIR_feq) return js_EqualStrings(l, r); - return evalCmp(op, js_CompareStrings(l, r), 0); + return EvalCmp(op, js_CompareStrings(l, r), 0); } JS_REQUIRES_STACK void @@ -7150,8 +7371,8 @@ TraceRecorder::strictEquality(bool equal, bool cmpCase) LIns* x; bool cond; - JSTraceType ltag = getPromotedType(l); - if (ltag != getPromotedType(r)) { + JSTraceType ltag = GetPromotedType(l); + if (ltag != GetPromotedType(r)) { cond = !equal; x = lir->insImm(cond); } else if (ltag == TT_STRING) { @@ -7210,7 +7431,7 @@ TraceRecorder::equalityHelper(jsval l, jsval r, LIns* l_ins, LIns* r_ins, * a primitive value (which would terminate recursion). */ - if (getPromotedType(l) == getPromotedType(r)) { + if (GetPromotedType(l) == GetPromotedType(r)) { if (JSVAL_TAG(l) == JSVAL_OBJECT || JSVAL_TAG(l) == JSVAL_BOOLEAN) { cond = (l == r); } else if (JSVAL_IS_STRING(l)) { @@ -7355,7 +7576,7 @@ TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond) LIns* args[] = { r_ins, l_ins }; l_ins = lir->insCall(&js_CompareStrings_ci, args); r_ins = lir->insImm(0); - cond = evalCmp(op, JSVAL_TO_STRING(l), JSVAL_TO_STRING(r)); + cond = EvalCmp(op, JSVAL_TO_STRING(l), JSVAL_TO_STRING(r)); goto do_comparison; } @@ -7415,12 +7636,15 @@ TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond) tmp = r; rnum = js_ValueToNumber(cx, &tmp); } - cond = evalCmp(op, lnum, rnum); + cond = EvalCmp(op, lnum, rnum); fp = true; /* 11.8.5 steps 6-15. */ do_comparison: - /* If the result is not a number or it's not a quad, we must use an integer compare. */ + /* + * If the result is not a number or it's not a quad, we must use an integer + * compare. + */ if (!fp) { JS_ASSERT(op >= LIR_feq && op <= LIR_fge); op = LOpcode(op + (LIR_eq - LIR_feq)); @@ -7502,7 +7726,7 @@ TraceRecorder::binary(LOpcode op) bool rightIsNumber = isNumber(r); jsdouble rnum = rightIsNumber ? asNumber(r) : 0; - if ((op >= LIR_sub && op <= LIR_ush) || // sub, mul, (callh), or, xor, (not,) lsh, rsh, ush + if ((op >= LIR_sub && op <= LIR_ush) || // sub, mul, (callh), or, xor, (not,) lsh, rsh, ush (op >= LIR_fsub && op <= LIR_fmod)) { // fsub, fmul, fdiv, fmod LIns* args[2]; if (JSVAL_IS_STRING(l)) { @@ -7948,15 +8172,14 @@ TraceRecorder::getThis(LIns*& this_ins) if (!thisObj) ABORT_TRACE_ERROR("js_ComputeThisForName failed"); - /* - * In global code, bake in the global object as 'this' object. - */ + /* In global code, bake in the global object as 'this' object. */ if (!cx->fp->callee) { JS_ASSERT(callDepth == 0); this_ins = INS_CONSTPTR(thisObj); /* - * We don't have argv[-1] in global code, so we don't update the tracker here. + * We don't have argv[-1] in global code, so we don't update the + * tracker here. */ return JSRS_CONTINUE; } @@ -7965,11 +8188,12 @@ TraceRecorder::getThis(LIns*& this_ins) JS_ASSERT(JSVAL_IS_OBJECT(thisv)); /* - * Traces type-specialize between null and objects, so if we currently see a null - * value in argv[-1], this trace will only match if we see null at runtime as well. - * Bake in the global object as 'this' object, updating the tracker as well. We - * can only detect this condition prior to calling js_ComputeThisForFrame, since it - * updates the interpreter's copy of argv[-1]. + * Traces type-specialize between null and objects, so if we currently see + * a null value in argv[-1], this trace will only match if we see null at + * runtime as well. Bake in the global object as 'this' object, updating + * the tracker as well. We can only detect this condition prior to calling + * js_ComputeThisForFrame, since it updates the interpreter's copy of + * argv[-1]. */ JSClass* clasp = NULL;; if (JSVAL_IS_NULL(original) || @@ -7987,8 +8211,8 @@ TraceRecorder::getThis(LIns*& this_ins) this_ins = get(&thisv); /* - * The only unwrapped object that needs to be wrapped that we can get here is the - * global object obtained throught the scope chain. + * The only unwrapped object that needs to be wrapped that we can get here + * is the global object obtained throught the scope chain. */ JSObject* obj = js_GetWrappedObject(cx, JSVAL_TO_OBJECT(thisv)); JSObject* inner = obj; @@ -8000,8 +8224,10 @@ TraceRecorder::getThis(LIns*& this_ins) original == OBJECT_TO_JSVAL(inner) || original == OBJECT_TO_JSVAL(obj)); - // If the returned this object is the unwrapped inner or outer object, - // then we need to use the wrapped outer object. + /* + * If the returned this object is the unwrapped inner or outer object, + * then we need to use the wrapped outer object. + */ LIns* is_inner = lir->ins2(LIR_eq, this_ins, INS_CONSTPTR(inner)); LIns* is_outer = lir->ins2(LIR_eq, this_ins, INS_CONSTPTR(obj)); LIns* wrapper = INS_CONSTPTR(JSVAL_TO_OBJECT(thisv)); @@ -8076,8 +8302,8 @@ JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::guardPrototypeHasNoIndexedProperties(JSObject* obj, LIns* obj_ins, ExitType exitType) { /* - * Guard that no object along the prototype chain has any indexed properties which - * might become visible through holes in the array. + * Guard that no object along the prototype chain has any indexed + * properties which might become visible through holes in the array. */ VMSideExit* exit = snapshot(exitType); @@ -8111,16 +8337,22 @@ TraceRecorder::guardNotGlobalObject(JSObject* obj, LIns* obj_ins) JS_REQUIRES_STACK void TraceRecorder::clearFrameSlotsFromCache() { - /* Clear out all slots of this frame in the nativeFrameTracker. Different locations on the - VM stack might map to different locations on the native stack depending on the - number of arguments (i.e.) of the next call, so we have to make sure we map - those in to the cache with the right offsets. */ + /* + * Clear out all slots of this frame in the nativeFrameTracker. Different + * locations on the VM stack might map to different locations on the native + * stack depending on the number of arguments (i.e.) of the next call, so + * we have to make sure we map those in to the cache with the right + * offsets. + */ JSStackFrame* fp = cx->fp; jsval* vp; jsval* vpstop; - // Duplicate native stack layout computation: see VisitFrameSlots header comment. - // This doesn't do layout arithmetic, but it must clear out all the slots defined as - // imported by VisitFrameSlots. + + /* + * Duplicate native stack layout computation: see VisitFrameSlots header comment. + * This doesn't do layout arithmetic, but it must clear out all the slots defined as + * imported by VisitFrameSlots. + */ if (fp->callee) { vp = &fp->argv[-2]; vpstop = &fp->argv[argSlots(fp)]; @@ -8141,6 +8373,7 @@ TraceRecorder::record_EnterFrame() if (++callDepth >= MAX_CALLDEPTH) ABORT_TRACE("exceeded maximum call depth"); + // FIXME: Allow and attempt to inline a single level of recursion until we compile // recursive calls as independent trees (459301). if (fp->script == fp->down->script && fp->down->down && fp->down->down->script == fp->script) @@ -8240,8 +8473,11 @@ TraceRecorder::record_JSOP_RETURN() return JSRS_STOP; } - // If we have created an |arguments| object for the frame, we must copy the argument - // values into the object as properties in case it is used after this frame returns. + /* + * If we have created an |arguments| object for the frame, we must copy the + * argument values into the object as properties in case it is used after + * this frame returns. + */ if (cx->fp->argsobj) { LIns* argsobj_ins = get(&cx->fp->argsobj); LIns* length_ins = INS_CONST(cx->fp->argc); @@ -8534,9 +8770,10 @@ TraceRecorder::record_JSOP_NEG() if (isNumber(v)) { LIns* a = get(&v); - /* If we're a promoted integer, we have to watch out for 0s since -0 is a double. - Only follow this path if we're not an integer that's 0 and we're not a double - that's zero. + /* + * If we're a promoted integer, we have to watch out for 0s since -0 is + * a double. Only follow this path if we're not an integer that's 0 and + * we're not a double that's zero. */ if (!oracle.isInstructionUndemotable(cx->fp->regs->pc) && isPromoteInt(a) && @@ -8618,15 +8855,6 @@ TraceRecorder::record_JSOP_OBJTOP() return JSRS_CONTINUE; } -JSBool -js_Array(JSContext* cx, JSObject* obj, uintN argc, jsval* argv, jsval* rval); - -JSBool -js_Object(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); - -JSBool -js_Date(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); - JSRecordingStatus TraceRecorder::getClassPrototype(JSObject* ctor, LIns*& proto_ins) { @@ -8859,7 +9087,8 @@ TraceRecorder::emitNativeCall(JSTraceableNative* known, uintN argc, LIns* args[] } /* - * Check whether we have a specialized implementation for this native invocation. + * Check whether we have a specialized implementation for this native + * invocation. */ JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::callTraceableNative(JSFunction* fun, uintN argc, bool constructing) @@ -8898,11 +9127,11 @@ TraceRecorder::callTraceableNative(JSFunction* fun, uintN argc, bool constructin argtype = known->prefix[i]; if (argtype == 'C') { *argp = cx_ins; - } else if (argtype == 'T') { /* this, as an object */ + } else if (argtype == 'T') { /* this, as an object */ if (JSVAL_IS_PRIMITIVE(tval)) goto next_specialization; *argp = this_ins; - } else if (argtype == 'S') { /* this, as a string */ + } else if (argtype == 'S') { /* this, as a string */ if (!JSVAL_IS_STRING(tval)) goto next_specialization; *argp = this_ins; @@ -8919,7 +9148,7 @@ TraceRecorder::callTraceableNative(JSFunction* fun, uintN argc, bool constructin *argp = INS_CONSTPTR(fp->imacpc); else *argp = INS_CONSTPTR(pc); - } else if (argtype == 'D') { /* this, as a number */ + } else if (argtype == 'D') { /* this, as a number */ if (!isNumber(tval)) goto next_specialization; *argp = this_ins; @@ -8993,7 +9222,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode) // Allocate the vp vector and emit code to root it. uintN vplen = 2 + JS_MAX(argc, FUN_MINARGS(fun)) + fun->u.n.extra; if (!(fun->flags & JSFUN_FAST_NATIVE)) - vplen++; // slow native return value slot + vplen++; // slow native return value slot lir->insStorei(INS_CONST(vplen), cx_ins, offsetof(JSContext, nativeVpLen)); LIns* invokevp_ins = lir->insAlloc(vplen * sizeof(jsval)); lir->insStorei(invokevp_ins, cx_ins, offsetof(JSContext, nativeVp)); @@ -9024,11 +9253,12 @@ TraceRecorder::callNative(uintN argc, JSOp mode) args[2] = cx_ins; newobj_ins = lir->insCall(&js_NewInstance_ci, args); guard(false, lir->ins_eq0(newobj_ins), OOM_EXIT); - this_ins = newobj_ins; // boxing an object is a no-op + this_ins = newobj_ins; /* boxing an object is a no-op */ } else if (JSFUN_BOUND_METHOD_TEST(fun->flags)) { this_ins = INS_CONSTWORD(OBJECT_TO_JSVAL(OBJ_GET_PARENT(cx, funobj))); } else { this_ins = get(&vp[1]); + /* * For fast natives, 'null' or primitives are fine as as 'this' value. * For slow natives we have to ensure the object is substituted for the @@ -9436,7 +9666,7 @@ TraceRecorder::setProp(jsval &l, JSPropCacheEntry* entry, JSScopeProperty* sprop ABORT_TRACE("can't trace JavaScript function setter"); // These two cases are actually errors and can't be cached. - JS_ASSERT(!(sprop->attrs & JSPROP_GETTER)); // getter without setter + JS_ASSERT(!(sprop->attrs & JSPROP_GETTER)); // getter without setter JS_ASSERT(!(sprop->attrs & JSPROP_READONLY)); JS_ASSERT(!JSVAL_IS_PRIMITIVE(l)); @@ -9633,7 +9863,8 @@ TraceRecorder::record_JSOP_GETELEM() /* Property access using a string name or something we have to stringify. */ if (!JSVAL_IS_INT(idx)) { if (!JSVAL_IS_PRIMITIVE(idx)) - ABORT_TRACE("non-primitive index"); + ABORT_TRACE("object used as index"); + // If index is not a string, turn it into a string. if (!js_InternNonIntElementId(cx, obj, idx, &id)) ABORT_TRACE_ERROR("failed to intern non-int element id"); @@ -9677,13 +9908,13 @@ TraceRecorder::record_JSOP_GETELEM() // In this case, we are in the same frame where the arguments object was created. // The entry type map is not necessarily up-to-date, so we capture a new type map // for this point in the code. - unsigned stackSlots = js_NativeStackSlots(cx, 0/*callDepth*/); + unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */); if (stackSlots * sizeof(JSTraceType) > NJ_MAX_SKIP_PAYLOAD_SZB) ABORT_TRACE("|arguments| requires saving too much stack"); JSTraceType* typemap = (JSTraceType*) lir->insSkip(stackSlots * sizeof(JSTraceType))->payload(); DetermineTypesVisitor detVisitor(*this, typemap); VisitStackSlots(detVisitor, cx, 0); - typemap_ins = INS_CONSTPTR(typemap + 2 /*callee,this*/); + typemap_ins = INS_CONSTPTR(typemap + 2 /* callee, this */); } else { // In this case, we are in a deeper frame from where the arguments object was // created. The type map at the point of the call out from the creation frame @@ -9815,7 +10046,6 @@ TraceRecorder::record_JSOP_SETELEM() jsval& idx = stackval(-2); jsval& lval = stackval(-3); - /* no guards for type checks, trace specialized this already */ if (JSVAL_IS_PRIMITIVE(lval)) ABORT_TRACE("left JSOP_SETELEM operand is not an object"); ABORT_IF_XML(lval); @@ -9829,6 +10059,7 @@ TraceRecorder::record_JSOP_SETELEM() if (!JSVAL_IS_INT(idx)) { if (!JSVAL_IS_PRIMITIVE(idx)) ABORT_TRACE("non-primitive index"); + // If index is not a string, turn it into a string. if (!js_InternNonIntElementId(cx, obj, idx, &id)) ABORT_TRACE_ERROR("failed to intern non-int element id"); @@ -9914,17 +10145,17 @@ TraceRecorder::record_JSOP_CALLNAME() return JSRS_CONTINUE; } -JS_DEFINE_CALLINFO_5(extern, UINT32, js_GetUpvarArgOnTrace, CONTEXT, UINT32, INT32, UINT32, +JS_DEFINE_CALLINFO_5(extern, UINT32, GetUpvarArgOnTrace, CONTEXT, UINT32, INT32, UINT32, DOUBLEPTR, 0, 0) -JS_DEFINE_CALLINFO_5(extern, UINT32, js_GetUpvarVarOnTrace, CONTEXT, UINT32, INT32, UINT32, +JS_DEFINE_CALLINFO_5(extern, UINT32, GetUpvarVarOnTrace, CONTEXT, UINT32, INT32, UINT32, DOUBLEPTR, 0, 0) -JS_DEFINE_CALLINFO_5(extern, UINT32, js_GetUpvarStackOnTrace, CONTEXT, UINT32, INT32, UINT32, +JS_DEFINE_CALLINFO_5(extern, UINT32, GetUpvarStackOnTrace, CONTEXT, UINT32, INT32, UINT32, DOUBLEPTR, 0, 0) /* - * Record LIR to get the given upvar. Return the LIR instruction for - * the upvar value. NULL is returned only on a can't-happen condition - * with an invalid typemap. The value of the upvar is returned as v. + * Record LIR to get the given upvar. Return the LIR instruction for the upvar + * value. NULL is returned only on a can't-happen condition with an invalid + * typemap. The value of the upvar is returned as v. */ JS_REQUIRES_STACK LIns* TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) @@ -9945,8 +10176,8 @@ TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) } /* - * The upvar is not in the current trace, so get the upvar value - * exactly as the interpreter does and unbox. + * The upvar is not in the current trace, so get the upvar value exactly as + * the interpreter does and unbox. */ uint32 level = script->staticLevel - UPVAR_FRAME_SKIP(cookie); uint32 cookieSlot = UPVAR_FRAME_SLOT(cookie); @@ -9954,16 +10185,16 @@ TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) const CallInfo* ci; int32 slot; if (!fp->fun) { - ci = &js_GetUpvarStackOnTrace_ci; + ci = &GetUpvarStackOnTrace_ci; slot = cookieSlot; } else if (cookieSlot < fp->fun->nargs) { - ci = &js_GetUpvarArgOnTrace_ci; + ci = &GetUpvarArgOnTrace_ci; slot = cookieSlot; } else if (cookieSlot == CALLEE_UPVAR_SLOT) { - ci = &js_GetUpvarArgOnTrace_ci; + ci = &GetUpvarArgOnTrace_ci; slot = -2; } else { - ci = &js_GetUpvarVarOnTrace_ci; + ci = &GetUpvarVarOnTrace_ci; slot = cookieSlot - fp->fun->nargs; } @@ -9985,8 +10216,8 @@ TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) } /* - * Generate LIR to load a value from the native stack. This method ensures that the - * correct LIR load operator is used. + * Generate LIR to load a value from the native stack. This method ensures that + * the correct LIR load operator is used. */ LIns* TraceRecorder::stackLoad(LIns* base, uint8 type) { @@ -10101,7 +10332,7 @@ TraceRecorder::interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc, } // Generate a type map for the outgoing frame and stash it in the LIR - unsigned stackSlots = js_NativeStackSlots(cx, 0/*callDepth*/); + unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */); if (sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType) > NJ_MAX_SKIP_PAYLOAD_SZB) ABORT_TRACE("interpreted function call requires saving too much stack"); LIns* data = lir->insSkip(sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType)); @@ -10220,8 +10451,8 @@ TraceRecorder::record_JSOP_APPLY() aobj_ins = get(&vp[3]); /* - * We trace dense arrays and arguments objects. The code we generate for apply - * uses imacros to handle a specific number of arguments. + * We trace dense arrays and arguments objects. The code we generate + * for apply uses imacros to handle a specific number of arguments. */ if (OBJ_IS_DENSE_ARRAY(cx, aobj)) { guardDenseArray(aobj, aobj_ins); @@ -10297,7 +10528,7 @@ TraceRecorder::record_NativeCallComplete() */ if (JSTN_ERRTYPE(pendingTraceableNative) == FAIL_STATUS) { - // Keep cx->bailExit null when it's invalid. + /* Keep cx->bailExit null when it's invalid. */ lir->insStorei(INS_CONSTPTR(NULL), cx_ins, (int) offsetof(JSContext, bailExit)); LIns* status = lir->insLoad(LIR_ld, lirbuf->state, (int) offsetof(InterpState, builtinStatus)); @@ -10378,15 +10609,11 @@ TraceRecorder::name(jsval*& vp, LIns*& ins, bool& tracked) */ CHECK_STATUS(test_property_cache(obj, obj_ins, obj2, pcval)); - /* - * Abort if property doesn't exist (interpreter will report an error.) - */ + /* Abort if property doesn't exist (interpreter will report an error.) */ if (PCVAL_IS_NULL(pcval)) ABORT_TRACE("named property not found"); - /* - * Insist on obj being the directly addressed object. - */ + /* Insist on obj being the directly addressed object. */ if (obj2 != obj) ABORT_TRACE("name() hit prototype chain"); @@ -10488,6 +10715,7 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32& slot, LIns*& v_ins) LIns* args[] = { INS_CONSTPTR(sprop), obj_ins, cx_ins }; v_ins = lir->insCall(&js_CallGetter_ci, args); guard(false, lir->ins2(LIR_eq, v_ins, INS_CONST(JSVAL_ERROR_COOKIE)), OOM_EXIT); + /* * BIG FAT WARNING: This snapshot cannot be a BRANCH_EXIT, since * the value to the top of the stack is not the value we unbox. @@ -10634,8 +10862,8 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ if (JSVAL_TAG(*vp) == JSVAL_BOOLEAN) { /* - * If we read a hole from the array, convert it to undefined and guard that there - * are no indexed properties along the prototype chain. + * If we read a hole from the array, convert it to undefined and guard + * that there are no indexed properties along the prototype chain. */ LIns* br = lir->insBranch(LIR_jf, lir->ins2i(LIR_eq, v_ins, JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_HOLE)), @@ -10643,9 +10871,7 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ CHECK_STATUS(guardPrototypeHasNoIndexedProperties(obj, obj_ins, MISMATCH_EXIT)); br->setTarget(lir->ins0(LIR_label)); - /* - * Don't let the hole value escape. Turn it into an undefined. - */ + /* Don't let the hole value escape. Turn it into an undefined. */ v_ins = lir->ins2i(LIR_and, v_ins, ~(JSVAL_HOLE_FLAG >> JSVAL_TAGBITS)); } return JSRS_CONTINUE; @@ -11093,12 +11319,13 @@ TraceRecorder::record_JSOP_BINDNAME() if (obj != globalObj && OBJ_GET_CLASS(cx, obj) != &js_CallClass) ABORT_TRACE("Can only trace JSOP_BINDNAME with global or call object"); - // The trace is specialized to this global object. Furthermore, - // we know it is the sole 'global' object on the scope chain: we - // set globalObj to the scope chain element with no parent, and we - // reached it starting from the function closure or the current - // scopeChain, so there is nothing inner to it. So this must be - // the right base object. + /* + * The trace is specialized to this global object. Furthermore, we know it + * is the sole 'global' object on the scope chain: we set globalObj to the + * scope chain element with no parent, and we reached it starting from the + * function closure or the current scopeChain, so there is nothing inner to + * it. Therefore this must be the right base object. + */ stack(0, INS_CONSTPTR(obj)); return JSRS_CONTINUE; } @@ -11168,15 +11395,18 @@ TraceRecorder::record_JSOP_IN() if (wasDeepAborted()) ABORT_TRACE("deep abort from property lookup"); - /* The interpreter fuses comparisons and the following branch, - so we have to do that here as well. */ + /* + * The interpreter fuses comparisons and the following branch, so we have + * to do that here as well. + */ fuseIf(cx->fp->regs->pc + 1, cond, x); - /* We update the stack after the guard. This is safe since - the guard bails out at the comparison and the interpreter - will therefore re-execute the comparison. This way the - value of the condition doesn't have to be calculated and - saved on the stack in most cases. */ + /* + * We update the stack after the guard. This is safe since the guard bails + * out at the comparison and the interpreter will therefore re-execute the + * comparison. This way the value of the condition doesn't have to be + * calculated and saved on the stack in most cases. + */ set(&lval, x); return JSRS_CONTINUE; } @@ -11937,9 +12167,11 @@ TraceRecorder::record_JSOP_STOP() JSStackFrame *fp = cx->fp; if (fp->imacpc) { - // End of imacro, so return true to the interpreter immediately. The - // interpreter's JSOP_STOP case will return from the imacro, back to - // the pc after the calling op, still in the same JSStackFrame. + /* + * End of imacro, so return true to the interpreter immediately. The + * interpreter's JSOP_STOP case will return from the imacro, back to + * the pc after the calling op, still in the same JSStackFrame. + */ atoms = fp->script->atomMap.vector; return JSRS_CONTINUE; } @@ -12008,7 +12240,7 @@ TraceRecorder::record_JSOP_ENTERBLOCK() JS_REQUIRES_STACK JSRecordingStatus TraceRecorder::record_JSOP_LEAVEBLOCK() { - /* We mustn't exit the lexical block we began recording in. */ + /* We mustn't exit the lexical block we began recording in. */ if (cx->fp->blockChain != lexicalBlock) return JSRS_CONTINUE; else @@ -12068,9 +12300,10 @@ TraceRecorder::record_JSOP_GETTHISPROP() LIns* this_ins; CHECK_STATUS(getThis(this_ins)); + /* - * It's safe to just use cx->fp->thisp here because getThis() returns JSRS_STOP if thisp - * is not available. + * It's safe to just use cx->fp->thisp here because getThis() returns + * JSRS_STOP if thisp is not available. */ CHECK_STATUS(getProp(cx->fp->thisp, this_ins)); return JSRS_CONTINUE; @@ -12230,7 +12463,7 @@ js_GetBuiltinFunction(JSContext *cx, uintN index) STOBJ_CLEAR_PARENT(funobj); JS_LOCK_GC(rt); - if (!rt->builtinFunctions[index]) /* retest now that the lock is held */ + if (!rt->builtinFunctions[index]) /* retest now that the lock is held */ rt->builtinFunctions[index] = funobj; else funobj = rt->builtinFunctions[index]; @@ -12369,10 +12602,13 @@ DBG_STUB(JSOP_DEFLOCALFUN_DBGFC) DBG_STUB(JSOP_LAMBDA_DBGFC) #ifdef JS_JIT_SPEW -/* Prints information about entry typemaps and unstable exits for all peers at a PC */ +/* + * Print information about entry typemaps and unstable exits for all peers + * at a PC. + */ void -js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, - uint32 argc) +DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, + uint32 argc) { Fragment* f; TreeInfo* ti; @@ -12394,7 +12630,7 @@ js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, ui UnstableExit* uexit = ti->unstableExits; while (uexit != NULL) { debug_only_print0(LC_TMStats, "EXIT: "); - JSTraceType* m = getFullTypeMap(uexit->exit); + JSTraceType* m = GetFullTypeMap(uexit->exit); for (unsigned i = 0; i < uexit->exit->numStackSlots; i++) debug_only_printf(LC_TMStats, "S%d ", m[i]); for (unsigned i = 0; i < uexit->exit->numGlobalSlots; i++) @@ -12460,7 +12696,7 @@ JS_StopTraceVis() if (!traceVisLogFile) return false; - fclose(traceVisLogFile); // not worth checking the result + fclose(traceVisLogFile); // not worth checking the result traceVisLogFile = NULL; return true; diff --git a/js/src/jstracer.h b/js/src/jstracer.h index 523ea7047457..671ed8401519 100644 --- a/js/src/jstracer.h +++ b/js/src/jstracer.h @@ -166,8 +166,10 @@ public: #if defined(JS_JIT_SPEW) || defined(MOZ_NO_VARADIC_MACROS) enum LC_TMBits { - /* Output control bits for all non-Nanojit code. Only use bits 16 - and above, since Nanojit uses 0 .. 15 itself. */ + /* + * Output control bits for all non-Nanojit code. Only use bits 16 and + * above, since Nanojit uses 0 .. 15 itself. + */ LC_TMMinimal = 1<<16, LC_TMTracer = 1<<17, LC_TMRecorder = 1<<18, @@ -192,14 +194,22 @@ extern nanojit::LogControl js_LogController; #define debug_only_stmt(stmt) \ stmt -#define debug_only_printf(mask, fmt, ...) \ - do { if ((js_LogController.lcbits & (mask)) > 0) { \ - js_LogController.printf(fmt, __VA_ARGS__); fflush(stdout); \ - }} while (0) -#define debug_only_print0(mask, str) \ - do { if ((js_LogController.lcbits & (mask)) > 0) { \ - js_LogController.printf(str); fflush(stdout); \ - }} while (0) + +#define debug_only_printf(mask, fmt, ...) \ + JS_BEGIN_MACRO \ + if ((js_LogController.lcbits & (mask)) > 0) { \ + js_LogController.printf(fmt, __VA_ARGS__); \ + fflush(stdout); \ + } \ + JS_END_MACRO + +#define debug_only_print0(mask, str) \ + JS_BEGIN_MACRO \ + if ((js_LogController.lcbits & (mask)) > 0) { \ + js_LogController.printf("%s", str); \ + fflush(stdout); \ + } \ + JS_END_MACRO #else @@ -333,7 +343,6 @@ public: _(DEEP_BAIL) \ _(STATUS) - enum ExitType { #define MAKE_EXIT_CODE(x) x##_EXIT, JS_TM_EXITCODES(MAKE_EXIT_CODE) @@ -374,21 +383,6 @@ struct VMSideExit : public nanojit::SideExit } }; -static inline JSTraceType* getStackTypeMap(nanojit::SideExit* exit) -{ - return (JSTraceType*)(((VMSideExit*)exit) + 1); -} - -static inline JSTraceType* getGlobalTypeMap(nanojit::SideExit* exit) -{ - return getStackTypeMap(exit) + ((VMSideExit*)exit)->numStackSlots; -} - -static inline JSTraceType* getFullTypeMap(nanojit::SideExit* exit) -{ - return getStackTypeMap(exit); -} - struct FrameInfo { JSObject* callee; // callee function object JSObject* block; // caller block chain head