A bunch of grunge work to reduce OS-dependent code in ElectricalFire

sources.

Here's a detailed list of the changes:

Renamed files and classes that were inappropriately named, i.e. the
names started with 'x86Win32', but the code was for generic x86, not
specific to Win32.

Segregated the 64-bit arithmetic code into an OS-independent
x86Arith64.{cpp,h}
This commit is contained in:
fur%netscape.com 1999-02-26 22:45:11 +00:00
Родитель f9741014c0
Коммит 49eb5852fe
23 изменённых файлов: 3961 добавлений и 1141 удалений

Просмотреть файл

@ -21,14 +21,15 @@
DEPTH = ../../../..
CPPSRCS = x86-win32.nad.burg.cpp \
CPPSRCS = x86.nad.burg.cpp \
x86ArgumentList.cpp \
x86Arith64.cpp \
x86Opcode.cpp \
x86Win32Emitter.cpp \
x86Win32Instruction.cpp \
x86Formatter.cpp \
x86StdCall.cpp \
x86Emitter.cpp \
x86Float.cpp \
x86Formatter.cpp \
x86Instruction.cpp \
x86StdCall.cpp \
$(OS_SUPPORT) \
$(NULL)
@ -39,9 +40,9 @@ LOCAL_MD_EXPORTS_x86 = x86ArgumentList.h \
x86FreeBSD_Support.h \
x86Opcode.h \
x86StdCall.h \
x86Win32Cpu.h \
x86Win32Emitter.h \
x86Win32Instruction.h \
x86Cpu.h \
x86Emitter.h \
x86Instruction.h \
$(NULL)
MODULE_NAME = EF
@ -84,19 +85,19 @@ include $(DEPTH)/config/rules.mk
#######################################################################
#
# Rules to generate x86-win32.nad.burg.[cpp][h]
# Rules to generate x86.nad.burg.[cpp][h]
#
ifneq ($(OS_ARCH),WINNT)
x86-win32.nad.burg.cpp: x86-win32.nad.burg $(BURG)
x86.nad.burg.cpp: x86.nad.burg $(BURG)
$(BURG) -I -o $@ < $<
x86-win32.nad.burg: x86-win32.nad $(DEPTH)/Compiler/PrimitiveGraph/PrimitiveOperations $(DEPTH)/Tools/Nad/nad.pl
x86.nad.burg: x86.nad $(DEPTH)/Compiler/PrimitiveGraph/PrimitiveOperations $(DEPTH)/Tools/Nad/nad.pl
$(PERL) $(DEPTH)/Tools/Nad/nad.pl $< $(DEPTH)/Compiler/PrimitiveGraph/PrimitiveOperations \
$(LOCAL_EXPORT_DIR)/PrimitiveOperations.h \
$(DEPTH)/Compiler/PrimitiveGraph/PrimitiveOperations.cpp \
$<.burg.h > $@
else
x86-win32.nad.burg.cpp: x86-win32.nad $(BURG) $(DEPTH)/Compiler/PrimitiveGraph/PrimitiveOperations $(DEPTH)/Tools/Nad/nad.pl
x86.nad.burg.cpp: x86.nad $(BURG) $(DEPTH)/Compiler/PrimitiveGraph/PrimitiveOperations $(DEPTH)/Tools/Nad/nad.pl
$(DEPTH)/config/genburg.bat $(BURG) $(DEPTH)/Tools/Nad/nad.pl $< $(DEPTH)/Compiler/PrimitiveGraph/PrimitiveOperations
cp $(DEPTH)/Compiler/PrimitiveGraph/PrimitiveOperations.h $(LOCAL_EXPORT_DIR)
endif
@ -111,7 +112,7 @@ $(DEPTH)/Compiler/PrimitiveGraph/PrimitiveOperations:
# Extra cleaning
#
clean clobber realclean clobber_all::
rm -f x86-win32.nad.burg.cpp x86-win32.nad.burg.h x86-win32.nad.burg
rm -f x86.nad.burg.cpp x86.nad.burg.h x86.nad.burg
#
# Special rules for x86Win32Support.cpp on Windows: it currently breaks the

Просмотреть файл

Просмотреть файл

@ -0,0 +1,390 @@
%top
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
#include "Burg.h"
%
%terminals
%
%startsymbols
Control
Result
Exception
Store
Vcond
Vint
Vlong
Vfloat
Vdouble
Vptr
Cint
Clong
Cfloat
Cdouble
Cptr
Tuple
%
%grammar
Vint: coReg_I $1 $
Vlong: coReg_L $1 $
Vfloat: coReg_F $1 $
Vdouble: coReg_D $1 $
Vptr: coReg_A $1 $
Vcond: coReg_C $1 $
Store: coReg_M $1 $
Cint: coReg_I $1 $
Clong: coReg_L $1 $
Cfloat: coReg_F $1 $
Cdouble: coReg_D $1 $
Cptr: coReg_A $1 $
Vint: poConst_I $1 $emConst_I
Vlong: poConst_L $1 $emConst_L
Vptr: poConst_A $1 $emConst_A
Vfloat: poConst_F $1 $emConst_F
Vdouble: poConst_D $1 $emConst_D
// Vcond: poConst_C $1 $emConst_C
//-----------------------------------------------------------------
// Addressing Mode Helpers
Scale: poShl_I(Vint, poConst_I) $0
ScaleIndex: poAdd_A(Vptr, Scale) $0
//ScaleIndex: Vptr $0
DispScaleIndex: poAdd_A(ScaleIndex, poConst_I) $0
MemDSI: poLd_I(DispScaleIndex) $0
Disp: poAdd_A(Vptr, poConst_I) $0
MemDisp: poLd_I(Disp) $0
//-----------------------------------------------------------------
Store: poBreak(Store) $1 $emBreak
Vint: poArg_I $1 $
Vlong: poArg_L $1 $
Vfloat: poArg_F $1 $
Vdouble: poArg_D $1 $
Vptr: poArg_A $1 $
Store: poArg_M $1 $
Result: poResult_I(Vint) $1 $emResult_I
Result: poResult_A(Vptr) $1 $emResult_A
Result: poResult_L(Vlong) $1 $emResult_L
Result: poResult_F(Vfloat) $1 $emResult_F
Result: poResult_D(Vdouble) $1 $emResult_D
// Result: poResult_C(Acond) $1 $emResult_C
Result: poResult_M(Store) $1 $
//-----------------------------------------------------------------
// Conditional Branches
Control: poIfLt(Vcond) $1 $emIfLt
Control: poIfEq(Vcond) $1 $emIfEq
Control: poIfLe(Vcond) $1 $emIfLe
Control: poIfGt(Vcond) $1 $emIfGt
Control: poIfLgt(Vcond) $1 $emIfLgt
Control: poIfGe(Vcond) $1 $emIfGe
Control: poIfULt(Vcond) $1 $emIfULt
Control: poIfUEq(Vcond) $1 $emIfUEq
Control: poIfULe(Vcond) $1 $emIfULe
Control: poIfUGt(Vcond) $1 $emIfUGt
Control: poIfNe(Vcond) $1 $emIfNe
Control: poIfUGe(Vcond) $1 $emIfUGe
//-----------------------------------------------------------------
// Booleans
Vint: poLt_I(Vcond) $1 $emLt_I
Vint: poEq_I(Vcond) $1 $emEq_I
Vint: poLe_I(Vcond) $1 $emLe_I
Vint: poGt_I(Vcond) $1 $emGt_I
Vint: poLgt_I(Vcond) $1 $emLgt_I
Vint: poGe_I(Vcond) $1 $emGe_I
Vint: poULt_I(Vcond) $1 $emULt_I
Vint: poUEq_I(Vcond) $1 $emUEq_I
Vint: poULe_I(Vcond) $1 $emULe_I
Vint: poUGt_I(Vcond) $1 $emUGt_I
Vint: poNe_I(Vcond) $1 $emNe_I
Vint: poUGe_I(Vcond) $1 $emUGe_I
//-----------------------------------------------------------------
// switch
Control: poSwitch(Vint) $1 $emSwitch
//-----------------------------------------------------------------
// And
Vint: poAnd_I(Vint, Vint) $1 $emAnd_I
Vint: poAnd_I(Vint, poConst_I) $1 $emAndI_I
Vlong: poAnd_L(Vlong, Vlong) $1 $emAnd_L
//-----------------------------------------------------------------
// Or
Vint: poOr_I(Vint, Vint) $1 $emOr_I
Vint: poOr_I(Vint, poConst_I) $1 $emOrI_I
Vlong: poOr_L(Vlong, Vlong) $1 $emOr_L
//-----------------------------------------------------------------
// Xor
Vint: poXor_I(Vint, Vint) $1 $emXor_I
Vint: poXor_I(Vint, poConst_I) $1 $emXorI_I
Vlong: poXor_L(Vlong, Vlong) $1 $emXor_L
//-----------------------------------------------------------------
// Add
Vint: poAdd_I(Vint, Vint) $1 $emAdd_I
Vptr: poAdd_A(Vptr, Vint) $1 $emAdd_A
Vint: poAdd_I(Vint, poConst_I) $1 $emAddI_I
Vptr: poAdd_A(Vptr, poConst_I) $1 $emAddI_A
Vlong: poAdd_L(Vlong, Vlong) $1 $emAdd_L
//-----------------------------------------------------------------
// Sub
Vint: poSub_I(poConst_I, Vint) $1 $emSubR_I
Vint: poSub_I(Vint, Vint) $1 $emSub_I
Vptr: poSub_A(Vptr, Vint) $1 $emSub_A
Vlong: poSub_L(Vlong, Vlong) $1 $emSub_L
//-----------------------------------------------------------------
// Mul
Vint: poMul_I(Vint, Vint) $1 $emMul_I
Vlong: poMul_L(Vlong, Vlong) $1 $emMul_L
//-----------------------------------------------------------------
// Div
Vint: poDiv_I(Vint, Vint) $1 $emDiv_I
Vint: poDivE_I(Vint, Vint) $1 $emDivE_I
Vint: poDivU_I(Vint, Vint) $1 $emDivU_I
Vint: poDivUE_I(Vint, Vint) $1 $emDivUE_I
Vint: poDiv_I(Vint, MemDSI) $1 $emDiv_I_MemDSI
Vint: poDivE_I(Vint, MemDSI) $1 $emDivE_I_MemDSI
Vint: poDiv_I(Vint, MemDSI) $1 $emDivU_I_MemDSI
Vint: poDivE_I(Vint, MemDSI) $1 $emDivUE_I_MemDSI
Vlong: poDiv_L(Vlong, Vlong) $1 $emDiv_L
Vlong: poDivE_L(Vlong, Vlong) $1 $emDivE_L
//-----------------------------------------------------------------
// Mod
Vint: poMod_I(Vint, Vint) $1 $emMod_I
Vint: poModE_I(Vint, Vint) $1 $emModE_I
Vint: poModU_I(Vint, Vint) $1 $emModU_I
Vint: poModUE_I(Vint, Vint) $1 $emModUE_I
Vint: poMod_I(Vint, MemDSI) $1 $emMod_I_MemDSI
Vint: poModE_I(Vint, MemDSI) $1 $emModE_I_MemDSI
Vint: poMod_I(Vint, MemDSI) $1 $emModU_I_MemDSI
Vint: poModE_I(Vint, MemDSI) $1 $emModUE_I_MemDSI
Vlong: poMod_L(Vlong, Vlong) $1 $emMod_L
Vlong: poModE_L(Vlong, Vlong) $1 $emModE_L
//-----------------------------------------------------------------
// Shl
Vint: poShl_I(Vint, Vint) $1 $emShl_I
Vint: poShl_I(Vint, poConst_I) $1 $emShlI_I
Vlong: poShl_L(Vlong, Vint) $1 $emShl_L
//-----------------------------------------------------------------
// Shr
Vint: poShr_I(Vint, Vint) $1 $emShr_I
Vint: poShr_I(Vint, poConst_I) $1 $emShrI_I
Vlong: poShr_L(Vlong, Vint) $1 $emShr_L
//-----------------------------------------------------------------
// Shru
Vint: poShrU_I(Vint, Vint) $1 $emShrU_I
Vint: poShrU_I(Vint, poConst_I) $1 $emShrUI_I
Vlong: poShrU_L(Vlong, Vint) $1 $emShrU_L
//-----------------------------------------------------------------
// sign extend
Vint: poExt_I(Vint, poConst_I) $1 $emExt_I
Vlong: poExt_L(Vlong, poConst_I) $1 $emExt_L
//-----------------------------------------------------------------
// Floating Point
Vfloat: poFAdd_F(Vfloat, Vfloat) $1 $emFAdd_F
Vdouble: poFAdd_D(Vdouble, Vdouble) $1 $emFAdd_D
Vfloat: poFSub_F(Vfloat, Vfloat) $1 $emFSub_F
Vdouble: poFSub_D(Vdouble, Vdouble) $1 $emFSub_D
Vfloat: poFMul_F(Vfloat, Vfloat) $1 $emFMul_F
Vdouble: poFMul_D(Vdouble, Vdouble) $1 $emFMul_D
Vfloat: poFDiv_F(Vfloat, Vfloat) $1 $emFDiv_F
Vdouble: poFDiv_D(Vdouble, Vdouble) $1 $emFDiv_D
Vfloat: poFRem_F(Vfloat, Vfloat) $1 $emFRem_F
Vdouble: poFRem_D(Vdouble, Vdouble) $1 $emFRem_D
//-----------------------------------------------------------------
// Convert
Vint: poConvI_L(Vlong) $1 $emConvI_L
Vlong: poConvL_I(Vint) $1 $emConvL_I
Vint: poFConvI_F(Vfloat) $1 $emFConvI_F
Vint: poFConvI_D(Vdouble) $1 $emFConvI_D
Vlong: poFConvL_F(Vfloat) $1 $emFConvL_F
Vlong: poFConvL_D(Vdouble) $1 $emFConvL_D
Vfloat: poFConvF_I(Vint) $1 $emFConvF_I
Vfloat: poFConvF_L(Vlong) $1 $emFConvF_L
Vfloat: poFConvF_D(Vdouble) $1 $emFConvF_D
Vdouble: poFConvD_I(Vint) $1 $emFConvD_I
Vdouble: poFConvD_L(Vlong) $1 $emFConvD_L
Vdouble: poFConvD_F(Vfloat) $1 $emFConvD_F
//-----------------------------------------------------------------
// Compare
Vcond: poCmp_I(Vint, Vint) $1 $emCmp_I
Vcond: poCmpU_I(Vint, Vint) $1 $emCmpU_I
Vcond: poCmpU_A(Vptr, Vptr) $1 $emCmpU_A
Vcond: poCmp_I(Vint, poConst_I) $1 $emCmpI_I
Vcond: poCmpU_I(Vint, poConst_I) $1 $emCmpUI_I
Vint: poCatL_I(poCmp_L(Vlong, Vlong)) $1 $em3wayCmpL_L
Vint: poCatCL_I(poCmp_L(Vlong, Vlong)) $1 $em3wayCmpCL_L
Vint: poCatL_I(poFCmp_F(Vfloat, Vfloat)) $1 $em3wayCmpF_L
Vint: poCatG_I(poFCmp_F(Vfloat, Vfloat)) $1 $em3wayCmpF_G
Vint: poCatL_I(poFCmp_D(Vdouble, Vdouble)) $1 $em3wayCmpD_L
Vint: poCatG_I(poFCmp_D(Vdouble, Vdouble)) $1 $em3wayCmpD_G
Vint: poCatCL_I(poFCmp_F(Vfloat, Vfloat)) $1 $em3wayCmpCF_L
Vint: poCatCG_I(poFCmp_F(Vfloat, Vfloat)) $1 $em3wayCmpCF_G
Vint: poCatCL_I(poFCmp_D(Vdouble, Vdouble)) $1 $em3wayCmpCD_L
Vint: poCatCG_I(poFCmp_D(Vdouble, Vdouble)) $1 $em3wayCmpCD_G
Vcond: poCmp_I(MemDSI, poConst_I) $1 $emCmpI_I_MemDSI
Vcond: poCmp_I(MemDSI, Vint) $1 $emCmp_I_MemDSI
//-----------------------------------------------------------------
// CheckNull
Exception: poChkNull(Vptr) $1 $emChkNull
//-----------------------------------------------------------------
// Limit
Exception: poLimit(Vint, Vint) $1 $emLimit
Exception: poLimit(poConst_I, Vint) $1 $emLimitR
Exception: poLimit(poConst_I, MemDisp) $1 $emLimitR_MemDisp
Exception: poLimit(Vint, MemDisp) $1 $emLimit_MemDisp
//-----------------------------------------------------------------
// Check/Limit Cast
Exception: poChkCast_A(Vptr, poConst_A) $1 $emChkCastI_A
Exception: poChkCast_A(Vptr, Vptr) $1 $emChkCast_A
Exception: poChkCast_I(Vint, Vint) $1 $emChkCast_I
Exception: poLimCast(Vint) $1 $emLimCast
//-----------------------------------------------------------------
// Load
Vint: poLd_I(Vptr) $1 $emLd_I
Vptr: poLd_A(Vptr) $1 $emLd_A
Vlong: poLd_L(Vptr) $1 $emLd_L
Vfloat: poLd_F(Vptr) $1 $emLd_F
Vdouble: poLd_D(Vptr) $1 $emLd_D
Vint: poLdS_B(Vptr) $1 $emLdS_B
Vint: poLdU_B(Vptr) $1 $emLdU_B
Vint: poLdS_H(Vptr) $1 $emLdS_H
Vint: poLdU_H(Vptr) $1 $emLdU_H
Vint: MemDisp $1 $emLd_I_MemDisp
Vint: MemDSI $1 $emLd_I_MemDSI
// FIXME - Add these patterns for performance
// Vfloat: poLd_F(Disp) $1 $emLd_F_MemDisp
// Vfloat: poLd_F(DispScaleIndex) $1 $emLd_F_MemDSI
// Vdouble: poLd_D(Disp) $1 $emLd_D_MemDisp
// Vdouble: poLd_D(DispScaleIndex) $1 $emLd_D_MemDSI
//-----------------------------------------------------------------
// Store
Store: poSt_I(Vptr, Vint) $1 $emSt_I
Store: poSt_I(Vptr, poConst_I) $1 $emStI_I
Store: poSt_A(Vptr, Vptr) $1 $emSt_A
Store: poSt_L(Vptr, Vlong) $1 $emSt_L
Store: poSt_B(Vptr, Vint) $1 $emSt_B
Store: poSt_H(Vptr, Vint) $1 $emSt_H
Store: poSt_I(Disp, poConst_I) $1 $emStI_I_MemDisp
Store: poSt_I(Disp, Vint) $1 $emSt_I_MemDisp
Store: poSt_I(DispScaleIndex, Vint) $1 $emSt_I_MemDSI
Store: poSt_F(Vptr, Vfloat) $1 $emSt_F
Store: poSt_D(Vptr, Vdouble) $1 $emSt_D
// FIXME - Add below patterns for performance
//Store: poSt_F(Disp, poConst_I) $1 $emStI_F_MemDisp
//Store: poSt_F(Disp, Vint) $1 $emSt_F_MemDisp
//Store: poSt_F(DispScaleIndex, Vint) $1 $emSt_F_MemDSI
//Store: poSt_D(Disp, poConst_I) $1 $emStI_D_MemDisp
//Store: poSt_D(Disp, Vint) $1 $emSt_D_MemDisp
//Store: poSt_D(DispScaleIndex, Vint) $1 $emSt_D_MemDSI
//-----------------------------------------------------------------
// Monitor
//Store: poMEnter_A(Vptr) $1 $emMEnter_A
//Store: poMExit_A(Vptr) $1 $emMExit_A
//-----------------------------------------------------------------
// Monitor
Store: poMEnter(Store, Vptr) $1 $emMEnter
Store: poMExit(Store, Vptr) $1 $emMExit
//-----------------------------------------------------------------
// Projections
Store: poProj_M $1 $
Vint: poProj_I $1 $
Vptr: poProj_A $1 $
Vlong: poProj_L $1 $
Vfloat: poProj_F $1 $
Vdouble: poProj_D $1 $
//-----------------------------------------------------------------
// Syscalls
Tuple: poSysCall $1 $emSysCall
Tuple: poSysCallE $1 $emSysCallE
Tuple: poSysCallC $1 $emSysCallC
Tuple: poSysCallEC $1 $emSysCallEC
Tuple: poSysCallV $1 $emSysCallV
Tuple: poSysCallEV $1 $emSysCallEV
//-----------------------------------------------------------------
// Calls
Tuple: poCall(Vptr) $1 $emDynamicCall
Tuple: poCall(poConst_A) $1 $emStaticCall
//-----------------------------------------------------------------
// Catch
Vptr: poCatch $1 $emCatch
%

Просмотреть файл

@ -23,7 +23,7 @@
#include "x86ArgumentList.h"
#include "x86Opcode.h"
#include "x86Win32Instruction.h"
#include "x86Instruction.h"
#include "MemoryAccess.h"
#include "x86Formatter.h"

Просмотреть файл

@ -0,0 +1,174 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
#include "Exports.h"
#include "prtypes.h"
//================================================================================
// 64bit Arithmetic Support Functions
// x86Extract64Bit
//
// Purpose: signed right-aligned field extraction
// In: 64 bit source (on stack)
// 32 bit extraction size (on stack)
// Out: 64 bit result
// Note: Only works in range 1 <= b <= 63, b is extraction amount
NS_NATIVECALL(int64)
x86Extract64Bit(int64 src, int b)
{
if (b <= 32)
{
b = 32 - b;
return (int)src << b >> b;
}
else
{
b = 64 - b;
return (int)(src >> 32) << b >> b;
}
}
// 3WayCompare
//
// Purpose: compare two longs
// In: two longs on the stack
// Out: depends on condition flags:
// less = -1
// equal = 0
// greater = 1
NS_NATIVECALL(int64)
x86ThreeWayCMP_L(int64 a, int64 b)
{
return (a > b) - (a < b);
}
// 3WayCompare
//
// Purpose: compare two longs
// In: two longs on the stack
// Out: depends on condition flags:
// less = 1
// equal = 0
// greater = -1
NS_NATIVECALL(int64)
x86ThreeWayCMPC_L(int64 a, int64 b)
{
return (a < b) - (a > b);
}
// llmul
//
// Purpose: long multiply (same for signed/unsigned)
// In: args are passed on the stack:
// 1st pushed: multiplier (QWORD)
// 2nd pushed: multiplicand (QWORD)
// Out: EDX:EAX - product of multiplier and multiplicand
// Note: parameters are removed from the stack
// Uses: ECX
NS_NATIVECALL(int64)
x86Mul64Bit(int64 a, int64 b)
{
return a * b;
}
// lldiv
//
// Purpose: signed long divide
// In: args are passed on the stack:
// 1st pushed: divisor (QWORD)
// 2nd pushed: dividend (QWORD)
// Out: EDX:EAX contains the quotient (dividend/divisor)
// Note: parameters are removed from the stack
// Uses: ECX
NS_NATIVECALL(int64)
x86Div64Bit(int64 dividend, int64 divisor)
{
return dividend / divisor;
}
// llrem
//
// Purpose: signed long remainder
// In: args are passed on the stack:
// 1st pushed: divisor (QWORD)
// 2nd pushed: dividend (QWORD)
// Out: EDX:EAX contains the remainder (dividend/divisor)
// Note: parameters are removed from the stack
// Uses: ECX
NS_NATIVECALL(int64)
x86Mod64Bit(int64 dividend, int64 divisor)
{
return dividend % divisor;
}
// llshl
//
// Purpose: long shift left
// In: args are passed on the stack: (FIX make fastcall)
// 1st pushed: amount (int)
// 2nd pushed: source (long)
// Out: EDX:EAX contains the result
// Note: parameters are removed from the stack
// Uses: ECX, destroyed
NS_NATIVECALL(int64)
x86Shl64Bit(int64 src, int amount)
{
return src << amount;
}
// llshr
//
// Purpose: long shift right
// In: args are passed on the stack: (FIX make fastcall)
// 1st pushed: amount (int)
// 2nd pushed: source (long)
// Out: EDX:EAX contains the result
// Note: parameters are removed from the stack
// Uses: ECX, destroyed
NS_NATIVECALL(uint64)
x86Shr64Bit(uint64 src, int amount)
{
return src >> amount;
}
// llsar
//
// Purpose: long shift right signed
// In: args are passed on the stack: (FIX make fastcall)
// 1st pushed: amount (int)
// 2nd pushed: source (long)
// Out: EDX:EAX contains the result
// Note: parameters are removed from the stack
// Uses: ECX, destroyed
NS_NATIVECALL(int64)
x86Sar64Bit(int64 src, int amount)
{
return src >> amount;
}
//================================================================================

Просмотреть файл

@ -0,0 +1,30 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
//================================================================================
// 64bit Arithmetic Support Functions
extern NS_NATIVECALL(int64) x86Mul64Bit(int64 a, int64 b);
extern NS_NATIVECALL(int64) x86Div64Bit(int64 a, int64 b);
extern NS_NATIVECALL(int64) x86Mod64Bit(int64 a, int64 b);
extern NS_NATIVECALL(int64) x86Shl64Bit(int64 a, int b);
extern NS_NATIVECALL(uint64) x86Shr64Bit(uint64 a, int b);
extern NS_NATIVECALL(int64) x86Sar64Bit(int64 a, int b);
extern NS_NATIVECALL(int64) x86ThreeWayCMP_L(int64 a, int64 b);
extern NS_NATIVECALL(int64) x86ThreeWayCMPC_L(int64 a, int64 b);
extern NS_NATIVECALL(int64) x86Extract64Bit(int64 a, int b);

Просмотреть файл

@ -0,0 +1,49 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
#if !defined(_X86_CPU_H_) || defined(INCLUDE_EMITTER)
#define _X86_CPU_H_
#define FIRST_CALLEE_SAVED_GR 4
#define LAST_CALLEE_SAVED_GR 6
#define FIRST_CALLER_SAVED_GR 0
#define LAST_CALLER_SAVED_GR 3
#define FIRST_CALLEE_SAVED_FPR 0
#define LAST_CALLEE_SAVED_FPR -1
#define FIRST_CALLER_SAVED_FPR 0
#define LAST_CALLER_SAVED_FPR -1
#define FIRST_GREGISTER 0
#define LAST_GREGISTER 5
#define FIRST_FPREGISTER 0
#define LAST_FPREGISTER -1
#define NUMBER_OF_SPECIAL_REGISTERS 0
class x86Emitter;
class x86Formatter;
typedef x86Formatter MdFormatter;
typedef x86Emitter MdEmitter;
#ifdef INCLUDE_EMITTER
#include "x86Emitter.h"
#include "x86Formatter.h"
#endif
#define CPU_IS_SUPPORTED
#endif /* _X86WIN32_CPU_H_ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,333 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
//
// x86Emitter.h
//
// Peter DeSantis
// Simon Holmes a Court
//
#ifndef X86_WIN32_EMITTER
#define X86_WIN32_EMITTER
#include "InstructionEmitter.h"
#include "VirtualRegister.h"
#include "ControlNodes.h"
#include "x86Opcode.h"
class x86Instruction;
class InsnFloatMemory;
class x86ArgListInstruction;
class InsnDoubleOpDir;
//-----------------------------------------------------------------------------------------------------------
inline Uint32 nthInputConstantUint32(/* const */ DataNode& inPrimitive, size_t inWhich)
{
PrimConst* primConst = static_cast<PrimConst*>(&inPrimitive.nthInputVariable(inWhich));
assert(primConst->hasCategory(pcConst));
Uint32 constant;
bool test = extractU32(primConst->value, constant);
assert(test);
return constant;
}
//-----------------------------------------------------------------------------------------------------------
// MemDSIParameters
// common operations performed by all MemDSI modes
class MemDSIParameters
{
public:
DataNode& addImmPrimitive;
DataNode& addPrimitive;
DataNode& shiftPrimitive;
DataNode& baseProducer;
DataNode& indexProducer;
uint32 displacement;
uint32 scale;
MemDSIParameters(DataNode& inNode) : // should be load or store
addImmPrimitive(inNode.nthInputVariable(1)),
addPrimitive(addImmPrimitive.nthInputVariable(0)),
shiftPrimitive(addPrimitive.nthInputVariable(1)),
baseProducer(addPrimitive.nthInputVariable(0)),
indexProducer(shiftPrimitive.nthInputVariable(0))
{
displacement = nthInputConstantUint32(addImmPrimitive, 1);
scale = nthInputConstantUint32(shiftPrimitive, 1);
}
};
//-----------------------------------------------------------------------------------------------------------
enum x86AddressModeType
{
amNormal,
amMemDisp,
amMemDSI
};
enum RawConditionCode
{
rawLt,
rawEq,
rawLe,
rawGt,
rawLgt,
rawGe
};
//-----------------------------------------------------------------------------------------------------------
// x86Emitter
class x86Emitter :
public InstructionEmitter
{
friend class x86Instruction;
friend class x86Formatter;
public:
x86Emitter(Pool& inPool, VirtualRegisterManager& vrMan) :
InstructionEmitter(inPool, vrMan)
{ }
void emitPrimitive(Primitive& inPrimitive, NamedRule inRule);
VirtualRegister& emit_CopyOfInput(x86ArgListInstruction& inInsn, DataNode& inPrimitive, Uint8 inWhichInput, VirtualRegisterID inID = vidLow);
void emitArguments(ControlNode::BeginExtra& inBeginNode);
bool emitCopyAfter(DataNode& inDataNode, InstructionList::iterator where, VirtualRegister& fromVr, VirtualRegister& toVr);
void emitLoadAfter(DataNode& inDataNode, InstructionList::iterator where, VirtualRegister& loadedReg, VirtualRegister& stackReg);
void emitStoreAfter(DataNode& inDataNode, InstructionList::iterator where, VirtualRegister& storedReg, VirtualRegister& stackReg);
virtual Instruction& emitAbsoluteBranch(DataNode& inDataNode, ControlNode& inTarget);
void emit_CallReturnF(InsnUseXDefineYFromPool& callInsn, DataNode& callPrimitive, DataNode& returnValProducer);
void emit_CallReturnD(InsnUseXDefineYFromPool& callInsn, DataNode& callPrimitive, DataNode& returnValProducer);
void emit_ArgF(PrimArg& arg, InstructionDefine& order, int curStackOffset);
void emit_ArgD(PrimArg& arg, InstructionDefine& order, int curStackOffset);
private:
void emit_LoadAddress(Primitive& inPrimitive);
// break
void emit_Break(Primitive& inPrimitive);
// result
void emit_Result_I(Primitive& inPrimitive);
void emit_Result_L(Primitive& inPrimitive);
void emit_Result_F(Primitive& inPrimitive);
void emit_Result_D(Primitive& inPrimitive);
void emit_Ld_I_MemDisp(Primitive& inPrimitive);
void emit_LimitR(Primitive& inPrimitive);
void emit_Limit(Primitive& inPrimitive);
void emit_ExceptionCheck(Primitive& inPrimitive, x86ConditionCode condType, Uint32 constant,
void (*throwExceptionFunction)());
void emit_LimCast(Primitive& inPrimitive);
void emit_ChkCast(Primitive& inPrimitive);
void emit_ChkCast_Const(Primitive& inPrimitive);
void emit_ChkNull(Primitive& inPrimitive);
void emit_LoadConstant_I(Primitive& inPrimitive);
void emit_LoadConstant_L(Primitive& inPrimitive);
void emit_LoadConstant_F(Primitive& inPrimitive);
void emit_LoadConstant_D(Primitive& inPrimitive);
void genLdC_I(Primitive& inPrimitive);
void genLd_I(Primitive& inPrimitive);
void emit_Ld_L(Primitive& inPrimitive);
void emit_St_L(Primitive& inPrimitive);
// condition consumers
void emit_B(Primitive& inPrimitive, RawConditionCode rawCondType);
void emit_Cond(Primitive& inPrimitive, RawConditionCode rawCondType);
// logical operator helpers
void genLogicI_I(Primitive& inPrimitive, x86ImmediateArithType iaType);
void genLogic_I(Primitive& inPrimitive, x86DoubleOpDirCode raType);
void emit_Logic_L(Primitive& inPrimitive, x86DoubleOpDirCode insnType);
// and
void emit_AndI_I(Primitive& inPrimitive);
void emit_And_I(Primitive& inPrimitive);
void emit_And_L(Primitive& inPrimitive);
// or
void emit_OrI_I(Primitive& inPrimitive);
void emit_Or_I(Primitive& inPrimitive);
void emit_Or_L(Primitive& inPrimitive);
// xor
void emit_XorI_I(Primitive& inPrimitive);
void emit_Xor_I(Primitive& inPrimitive);
void emit_Xor_L(Primitive& inPrimitive);
// shift helpers
void genShiftI_I(Primitive& inPrimitive, x86ExtendedType eByImmediate, x86ExtendedType eBy1);
void genShift_I(Primitive& inPrimitive, x86ExtendedType eByCl);
// shl
void emit_ShlI_I(Primitive& inPrimitive);
void emit_Shl_I(Primitive& inPrimitive);
void emit_Shl_L(Primitive& inPrimitive);
// sar
void emit_SarI_I(Primitive& inPrimitive);
void emit_Sar_I(Primitive& inPrimitive);
void emit_Sar_L(Primitive& inPrimitive);
// shr
void emit_ShrI_I(Primitive& inPrimitive);
void emit_Shr_I(Primitive& inPrimitive);
void emit_Shr_L(Primitive& inPrimitive);
// mul
void emit_Mul_I(Primitive& inPrimitive);
// void emit_MulI_I(Primitive& inPrimitive); // not yet implemented
void emit_Mul_L(Primitive& inPrimitive);
void emit_Add_I(Primitive& inPrimitive);
void emit_AddI_I(Primitive& inPrimitive);
void emit_Add_L(Primitive& inPrimitive);
void emit_Arithmetic_L(Primitive& inPrimitive, x86DoubleOpDirCode insnTypeLo,
x86DoubleOpDirCode insnTypeHi);
void emit_Sub_I(Primitive& inPrimitive);
void emit_Sub_L(Primitive& inPrimitive);
void emit_SubR_I(Primitive& inPrimitive);
void emit_Cmp_I(Primitive& inPrimitive);
void emit_CmpI_I(Primitive& inPrimitive);
void emit_3wayCmpL_L(Primitive& inPrimitive);
void emit_3wayCmpCL_L(Primitive& inPrimitive);
void emit_3wayCmpF_L(Primitive& inPrimitive);
void emit_3wayCmpF_G(Primitive& inPrimitive);
void emit_3wayCmpD_L(Primitive& inPrimitive);
void emit_3wayCmpD_G(Primitive& inPrimitive);
void emit_3wayCmpCF_L(Primitive& inPrimitive);
void emit_3wayCmpCF_G(Primitive& inPrimitive);
void emit_3wayCmpCD_L(Primitive& inPrimitive);
void emit_3wayCmpCD_G(Primitive& inPrimitive);
void emit_Limit_MemDisp(Primitive& inPrimitive);
void emit_LimitR_MemDisp(Primitive& inPrimitive);
void emit_CmpI_I_MemDSI(Primitive& inPrimitive);
void emit_Cmp_I_MemDSI(Primitive& inPrimitive);
// div/mod
void emit_Div_L(Primitive& inPrimitive);
void emit_Mod_L(Primitive& inPrimitive);
void emit_Div_I(Primitive& inPrimitive);
void emit_DivU_I(Primitive& inPrimitive);
void emit_Mod_I(Primitive& inPrimitive);
void emit_ModU_I(Primitive& inPrimitive);
void emit_Div_I_MemDSI(Primitive& inPrimitive);
void emit_DivU_I_MemDSI(Primitive& inPrimitive);
void emit_Mod_I_MemDSI(Primitive& inPrimitive);
void emit_ModU_I_MemDSI(Primitive& inPrimitive);
// div/mod helpers
x86Instruction& genDivMod_FrontEnd(Primitive& inPrimitive, x86ExtendedType insnType);
x86Instruction& genDivMod_FrontEnd_MemDSI(Primitive& inPrimitive, x86ExtendedType insnType);
x86Instruction& genDivMod_FrontEnd_CInt(Primitive& inPrimitive, x86ExtendedType insnType);
void genDivBackEnd(x86Instruction& inInsn);
void genModBackEnd(x86Instruction& inInsn);
// extract
void emit_Ext_I(Primitive& inPrimitive);
void emit_Ext_L(Primitive& inPrimitive);
// Floating Point Utilities
void emit_BinaryFloat(Primitive& inPrimitive,
x86FloatMemoryType binary_op, x86FloatMemoryType load_op, x86FloatMemoryType store_op,
VRClass vrClass);
void emit_BinaryFloat32(Primitive& inPrimitive,
x86FloatMemoryType binary_op);
void emit_BinaryFloat64(Primitive& inPrimitive,
x86FloatMemoryType binary_op);
void emit_3wayCmpF(Primitive& inPrimitive, DataNode& first_operand, DataNode& second_operand,
bool negate_result, x86FloatMemoryType load_op, x86FloatMemoryType cmpOp, VRClass vrClass);
InsnDoubleOpDir& copyFromFloatToIntegerRegister(DataNode& inDataNode, InsnUseXDefineYFromPool& defInsn);
InsnDoubleOpDir& copyFromIntegerRegisterToFloat(DataNode& inDataNode, InsnUseXDefineYFromPool& defInsn);
// Floating Point
void emit_FAdd_F(Primitive& inPrimitive);
void emit_FAdd_D(Primitive& inPrimitive);
void emit_FMul_F(Primitive& inPrimitive);
void emit_FMul_D(Primitive& inPrimitive);
void emit_FSub_F(Primitive& inPrimitive);
void emit_FSub_D(Primitive& inPrimitive);
void emit_FDiv_F(Primitive& inPrimitive);
void emit_FDiv_D(Primitive& inPrimitive);
void emit_FRem_F(Primitive& inPrimitive);
void emit_FRem_D(Primitive& inPrimitive);
// convert
void emit_ConvI_L(Primitive& inPrimitive);
void emit_ConvL_I(Primitive& inPrimitive);
void emit_FConv(Primitive& inPrimitive);
// load
void emit_Ld_I_MemDSI(Primitive& inPrimitive);
void emit_LdS_B(Primitive& inPrimitive);
void emit_LdU_B(Primitive& inPrimitive);
void emit_LdS_H(Primitive& inPrimitive);
void emit_LdU_H(Primitive& inPrimitive);
void emit_Ld_F(Primitive& inPrimitive);
void emit_Ld_D(Primitive& inPrimitive);
// store
void emit_St_B(Primitive& inPrimitive);
void emit_St_H(Primitive& inPrimitive);
void emit_StI_I(Primitive& inPrimitive);
void emit_St_I(Primitive& inPrimitive);
void emit_St_I_MemDSI(Primitive& inPrimitive);
void emit_StI_I_MemDisp(Primitive& inPrimitive);
void emit_St_I_MemDisp(Primitive& inPrimitive);
void emit_St_F(Primitive& inPrimitive);
void emit_St_D(Primitive& inPrimitive);
// catch
void emit_Catch(Primitive& inPrimitive);
// switch
void emit_Switch(Primitive& inPrimitive);
// monitors
void emit_MonitorEnter(Primitive& inPrimitive);
void emit_MonitorExit(Primitive& inPrimitive);
};
#endif // X86_WIN32_EMITTER

Просмотреть файл

@ -20,7 +20,7 @@
//
#include "x86Float.h"
#include "x86Win32Emitter.h"
#include "x86Emitter.h"
#include "FloatUtils.h"
// Note: In comments below, TOS = Top of FPU stack
@ -128,7 +128,7 @@ formatToMemory(void* inStart, Uint32 /*inOffset*/, MdFormatter& /*inFormatter*/)
//====================================================================================================
// Instruction generation utilities
InsnDoubleOpDir& x86Win32Emitter::
InsnDoubleOpDir& x86Emitter::
copyFromFloatToIntegerRegister(DataNode& inDataNode, InsnUseXDefineYFromPool& defInsn)
{
VirtualRegister& vr = defineTemporary(defInsn, 0, vrcStackSlot);
@ -137,7 +137,7 @@ copyFromFloatToIntegerRegister(DataNode& inDataNode, InsnUseXDefineYFromPool& de
return copyInsn;
}
InsnDoubleOpDir& x86Win32Emitter::
InsnDoubleOpDir& x86Emitter::
copyFromIntegerRegisterToFloat(DataNode& inDataNode, InsnUseXDefineYFromPool& defInsn)
{
VirtualRegister& vr = defineTemporary(defInsn, 0);
@ -149,7 +149,7 @@ copyFromIntegerRegisterToFloat(DataNode& inDataNode, InsnUseXDefineYFromPool& de
//====================================================================================================
// Floating-point binary operations, i.e. add, subtract, multiply, divide, modulus
void x86Win32Emitter::
void x86Emitter::
emit_BinaryFloat(Primitive& inPrimitive,
x86FloatMemoryType binary_op, x86FloatMemoryType load_op, x86FloatMemoryType store_op,
VRClass vrClass)
@ -172,61 +172,61 @@ emit_BinaryFloat(Primitive& inPrimitive,
}
// Emit 32-bit float binary operation
void x86Win32Emitter::
void x86Emitter::
emit_BinaryFloat32(Primitive& inPrimitive, x86FloatMemoryType binary_op)
{
emit_BinaryFloat(inPrimitive, binary_op, fld32, fstp32, vrcFloat);
}
// Emit 64-bit float binary operation
void x86Win32Emitter::
void x86Emitter::
emit_BinaryFloat64(Primitive& inPrimitive, x86FloatMemoryType binary_op)
{
emit_BinaryFloat(inPrimitive, binary_op, fld64, fstp64, vrcDouble);
}
void x86Win32Emitter::
void x86Emitter::
emit_FAdd_F(Primitive& inPrimitive) {
emit_BinaryFloat32(inPrimitive, fadd32);
}
void x86Win32Emitter::
void x86Emitter::
emit_FAdd_D(Primitive& inPrimitive) {
emit_BinaryFloat64(inPrimitive, fadd64);
}
void x86Win32Emitter::
void x86Emitter::
emit_FMul_F(Primitive& inPrimitive) {
emit_BinaryFloat32(inPrimitive, fmul32);
}
void x86Win32Emitter::
void x86Emitter::
emit_FMul_D(Primitive& inPrimitive) {
emit_BinaryFloat64(inPrimitive, fmul64);
}
void x86Win32Emitter::
void x86Emitter::
emit_FSub_F(Primitive& inPrimitive) {
emit_BinaryFloat32(inPrimitive, fsub32);
}
void x86Win32Emitter::
void x86Emitter::
emit_FSub_D(Primitive& inPrimitive) {
emit_BinaryFloat64(inPrimitive, fsub64);
}
void x86Win32Emitter::
void x86Emitter::
emit_FDiv_F(Primitive& inPrimitive) {
emit_BinaryFloat32(inPrimitive, fdiv32);
}
void x86Win32Emitter::
void x86Emitter::
emit_FDiv_D(Primitive& inPrimitive) {
emit_BinaryFloat64(inPrimitive, fdiv64);
}
// FIXME - Modulus is wrapper around fmod function. Should be changed to inline code.
void x86Win32Emitter::
void x86Emitter::
emit_FRem_D(Primitive& inPrimitive)
{
new(mPool) CallS_C(&inPrimitive, mPool, 2, true, *this, (void (*)(void))&javaFMod);
@ -238,7 +238,7 @@ static Flt32 fmod32(Flt32 a, Flt32 b)
return (Flt32)javaFMod(a, b);
}
void x86Win32Emitter::
void x86Emitter::
emit_FRem_F(Primitive& inPrimitive)
{
new(mPool) CallS_C(&inPrimitive, mPool, 2, true, *this, (void (*)(void))&fmod32);
@ -253,7 +253,7 @@ emit_FRem_F(Primitive& inPrimitive)
// 2) Simultaneously convert and store from top of FPU stack into memory location, with possible
// conversion to integer type.
void x86Win32Emitter::
void x86Emitter::
emit_FConv(Primitive& inPrimitive)
{
InsnFloatMemory *loadInsn;
@ -402,7 +402,7 @@ emit_FConv(Primitive& inPrimitive)
// Floating-point function-call glue
// Obtain the 32-bit float return value of a function call
void x86Win32Emitter::
void x86Emitter::
emit_CallReturnF(InsnUseXDefineYFromPool& callInsn, DataNode& callPrimitive, DataNode& returnValProducer)
{
InstructionDefine& define = defineTemporaryOrder(callInsn, 1);
@ -414,7 +414,7 @@ emit_CallReturnF(InsnUseXDefineYFromPool& callInsn, DataNode& callPrimitive, Dat
}
// Obtain the 64-bit double return value of a function call
void x86Win32Emitter::
void x86Emitter::
emit_CallReturnD(InsnUseXDefineYFromPool& callInsn, DataNode& callPrimitive, DataNode& returnValProducer)
{
InstructionDefine& define = defineTemporaryOrder(callInsn, 1);
@ -426,7 +426,7 @@ emit_CallReturnD(InsnUseXDefineYFromPool& callInsn, DataNode& callPrimitive, Dat
}
// Retrieve a 32-bit float argument from the call stack
void x86Win32Emitter::
void x86Emitter::
emit_ArgF(PrimArg& arg, InstructionDefine& order, int curStackOffset)
{
InsnDoubleOpDir& loadParam = *new(mPool) InsnDoubleOpDir(&arg, mPool, raLoadI, curStackOffset, atStackOffset, atRegDirect, 1, 1);
@ -436,7 +436,7 @@ emit_ArgF(PrimArg& arg, InstructionDefine& order, int curStackOffset)
}
// Retrieve a 64-bit double argument from the call stack
void x86Win32Emitter::
void x86Emitter::
emit_ArgD(PrimArg& arg, InstructionDefine& order, int curStackOffset)
{
InsnFloatMemory& loadInsn = *new InsnFloatMemory(&arg, mPool, fld64, atStackOffset, curStackOffset, 1, 1);
@ -449,7 +449,7 @@ emit_ArgD(PrimArg& arg, InstructionDefine& order, int curStackOffset)
}
// Push float function return value on top of FPU stack
void x86Win32Emitter::
void x86Emitter::
emit_Result_F(Primitive& inPrimitive)
{
InsnFloatMemory &copyInsn = *new InsnFloatMemory(&inPrimitive, mPool, fld32, 1, 1);
@ -463,7 +463,7 @@ emit_Result_F(Primitive& inPrimitive)
}
// Push double function return value on top of FPU stack
void x86Win32Emitter::
void x86Emitter::
emit_Result_D(Primitive& inPrimitive)
{
InsnFloatMemory &copyInsn = *new InsnFloatMemory(&inPrimitive, mPool, fld64, 1, 1);
@ -494,7 +494,7 @@ emit_Result_D(Primitive& inPrimitive)
(Some changes in operand usage will appear depending on the exact pattern of primitives being matched.)
*/
void x86Win32Emitter::
void x86Emitter::
emit_3wayCmpF(Primitive& inPrimitive, DataNode &first_operand, DataNode &second_operand,
bool negate_result, x86FloatMemoryType load_op, x86FloatMemoryType cmpOp, VRClass vrClass)
{
@ -562,7 +562,7 @@ emit_3wayCmpF(Primitive& inPrimitive, DataNode &first_operand, DataNode &second_
defineProducer(inPrimitive, extInsn, 0); // exts(tmpVR1) -> result
}
void x86Win32Emitter::
void x86Emitter::
emit_3wayCmpF_G(Primitive& inPrimitive)
{
Primitive& cmpPrimitive = Primitive::cast(inPrimitive.nthInputVariable(0));
@ -570,7 +570,7 @@ emit_3wayCmpF_G(Primitive& inPrimitive)
false, fld32, fcomp32, vrcFloat);
}
void x86Win32Emitter::
void x86Emitter::
emit_3wayCmpF_L(Primitive& inPrimitive)
{
Primitive& cmpPrimitive = Primitive::cast(inPrimitive.nthInputVariable(0));
@ -578,7 +578,7 @@ emit_3wayCmpF_L(Primitive& inPrimitive)
true, fld32, fcomp32, vrcFloat);
}
void x86Win32Emitter::
void x86Emitter::
emit_3wayCmpD_G(Primitive& inPrimitive)
{
Primitive& cmpPrimitive = Primitive::cast(inPrimitive.nthInputVariable(0));
@ -586,7 +586,7 @@ emit_3wayCmpD_G(Primitive& inPrimitive)
false, fld64, fcomp64, vrcDouble);
}
void x86Win32Emitter::
void x86Emitter::
emit_3wayCmpD_L(Primitive& inPrimitive)
{
Primitive& cmpPrimitive = Primitive::cast(inPrimitive.nthInputVariable(0));
@ -594,7 +594,7 @@ emit_3wayCmpD_L(Primitive& inPrimitive)
true, fld64, fcomp64, vrcDouble);
}
void x86Win32Emitter::
void x86Emitter::
emit_3wayCmpCF_G(Primitive& inPrimitive)
{
Primitive& cmpPrimitive = Primitive::cast(inPrimitive.nthInputVariable(0));
@ -602,7 +602,7 @@ emit_3wayCmpCF_G(Primitive& inPrimitive)
false, fld32, fcomp32, vrcFloat);
}
void x86Win32Emitter::
void x86Emitter::
emit_3wayCmpCF_L(Primitive& inPrimitive)
{
Primitive& cmpPrimitive = Primitive::cast(inPrimitive.nthInputVariable(0));
@ -610,7 +610,7 @@ emit_3wayCmpCF_L(Primitive& inPrimitive)
true, fld32, fcomp32, vrcFloat);
}
void x86Win32Emitter::
void x86Emitter::
emit_3wayCmpCD_G(Primitive& inPrimitive)
{
Primitive& cmpPrimitive = Primitive::cast(inPrimitive.nthInputVariable(0));
@ -618,7 +618,7 @@ emit_3wayCmpCD_G(Primitive& inPrimitive)
false, fld64, fcomp64, vrcDouble);
}
void x86Win32Emitter::
void x86Emitter::
emit_3wayCmpCD_L(Primitive& inPrimitive)
{
Primitive& cmpPrimitive = Primitive::cast(inPrimitive.nthInputVariable(0));
@ -630,7 +630,7 @@ emit_3wayCmpCD_L(Primitive& inPrimitive)
// Constants
// Generate 32-bit float constant
void x86Win32Emitter::
void x86Emitter::
emit_LoadConstant_F(Primitive& inPrimitive)
{
Uint32 constant = (*static_cast<const PrimConst *>(&inPrimitive)).value.i;
@ -647,7 +647,7 @@ emit_LoadConstant_F(Primitive& inPrimitive)
// Generate 64-bit double constant
// FIXME: Need to create an in-memory literal pool for storing double constants, rather than using immediate instructions
void x86Win32Emitter::
void x86Emitter::
emit_LoadConstant_D(Primitive& inPrimitive)
{
Flt64 constant = (*static_cast<const PrimConst *>(&inPrimitive)).value.d;
@ -671,7 +671,7 @@ emit_LoadConstant_D(Primitive& inPrimitive)
//====================================================================================================
// Floating-point memory operations
void x86Win32Emitter::
void x86Emitter::
emit_Ld_F(Primitive& inPrimitive)
{
// Load 32-bit float into an integer register
@ -684,7 +684,7 @@ emit_Ld_F(Primitive& inPrimitive)
defineProducer(inPrimitive, storeInsn, 0, vrcFloat); // result
}
void x86Win32Emitter::
void x86Emitter::
emit_Ld_D(Primitive& inPrimitive)
{
// Fetch from memory and temporarily push 64-bit double on the FPU stack
@ -699,7 +699,7 @@ emit_Ld_D(Primitive& inPrimitive)
defineProducer(inPrimitive, storeInsn, 0, vrcDouble); // result
}
void x86Win32Emitter::
void x86Emitter::
emit_St_F(Primitive& inPrimitive)
{
// Load 32-bit float into an integer register
@ -715,7 +715,7 @@ emit_St_F(Primitive& inPrimitive)
defineProducer(inPrimitive, storeInsn, 0); // memory edge out
}
void x86Win32Emitter::
void x86Emitter::
emit_St_D(Primitive& inPrimitive)
{
// Temporarily push 64-bit double on the FPU stack

Просмотреть файл

@ -22,7 +22,7 @@
#ifndef X86_FLOAT_H
#define X86_FLOAT_H
#include "x86Win32Instruction.h"
#include "x86Instruction.h"
//-----------------------------------------------------------------------------------------------------------
// Floating-point instructions in which one operand is a memory location.

Просмотреть файл

@ -28,7 +28,7 @@
#include "Fundamentals.h"
#include "FormatStructures.h" // for FCI, StackFrameInfo
#include "x86Win32Emitter.h" // temp?
#include "x86Emitter.h" // temp?
const int kStackSlotSize = 8;

Просмотреть файл

@ -0,0 +1,695 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
//
// x86Instruction.cpp
//
// Simon Holmes a Court
// Peter DeSantis
#include "prtypes.h"
#include "x86Instruction.h"
#include "InstructionEmitter.h"
#include "x86Formatter.h"
class x86Emitter;
extern char* x86GPRText[];
UT_DEFINE_LOG_MODULE(x86Spill);
//================================================================================
// Debugging Structures
#ifdef DEBUG_LOG
char* conditionalSuffixes[] =
{
"o", // ccJO
"no", // ccJNO
"b", // ccJB
"nb", // ccJNB
"e", // ccJE
"ne", // ccJNE
"be", // ccJBE
"nbe", // ccJNBE
"s", // ccJS
"ns", // ccJNS
"p", // ccJP
"np", // ccJNP
"l", // ccJL
"nl", // ccJNL
"le", // ccJLE
"nle", // ccJNLE
};
#endif // DEBUG_LOG
//================================================================================
// x86ArgListInstruction Methods
// For now this is a member of x86ArgListInstruction, but it really should be available for all
// instructions on the x86 platform
// Method: x86StandardUseDefine
// Caller: Emitter
// Purpose: Due to x86 behaviour of modifying source register, we must make a copy of the source before
// the operation. Unnecessary copies can be removed by the register allocator.
void x86ArgListInstruction::
x86StandardUseDefine(x86Emitter& inEmitter)
{
InstructionUse* instructionUseBegin = getInstructionUseBegin();
InstructionDefine* instructionDefineBegin = getInstructionDefineBegin();
Uint8 curIndex;
// Copy the virtual register which will be overwritten
VirtualRegister& vrToBeOverwritten = inEmitter.emit_CopyOfInput(*this, *mSrcPrimitive, 0);
inEmitter.useProducer(*mSrcPrimitive, *this, 0);
// Set the rest of the uses.
InstructionUse* curUse;
for (curUse = instructionUseBegin + 1, curIndex = 1; curUse < getInstructionUseEnd(); curUse++, curIndex++)
addStandardUse(inEmitter, curIndex);
// Now redefine the copied register.
inEmitter.redefineTemporary(*this, vrToBeOverwritten, 0);
assert(instructionDefineBegin + 1 >= getInstructionDefineEnd());
//// Set the rest of the defines.
//InstructionDefine* curDefine;
//for (curDefine = instructionDefineBegin + 1, curIndex = 1; curDefine < getInstructionDefineEnd(); curDefine++, curIndex++)
// inEmitter.defineProducer(nthOutputProducer(*mSrcPrimitive, curIndex), *this, curIndex);
}
// Method: switchUseToSpill
// Caller: Register Allocator
// Purpose: Folds the spill into the instruction if possible.
// Returns: Returns true if possible, false otherwise
bool x86ArgListInstruction::
switchUseToSpill(Uint8 inWhichUse, VirtualRegister& inVR)
{
#ifdef DEBUG_LOG
UT_LOG(x86Spill, PR_LOG_DEBUG, (" spill use %d of (%p)'", inWhichUse, this));
printOpcode(UT_LOG_MODULE(x86Spill));
#endif
DEBUG_ONLY(checkIntegrity();)
if (!opcodeAcceptsSpill())
goto SpillFail;
// Ask the argument list if it can switch the current argument
if (iArgumentList->alSwitchArgumentTypeToSpill(inWhichUse, *this)) // has side effect!
{
// Tell the Opcode that the argumentlist has been switched to a spill
switchOpcodeToSpill();
// Replace the old virtual register with a new one which contains the colored stack slot.
InstructionUse& use = getInstructionUseBegin()[inWhichUse];
InstructionDefine& define = getInstructionDefineBegin()[0];
assert(use.isVirtualRegister());
if(inWhichUse == 0 && define.isVirtualRegister() && use.getVirtualRegister().getRegisterIndex() == define.getVirtualRegister().getRegisterIndex())
// this virtual register is also redefined by this instruction so we must reinitialize the define also
define.getVirtualRegisterPtr().initialize(inVR);
use.getVirtualRegisterPtr().initialize(inVR);
DEBUG_ONLY(checkIntegrity();)
goto SpillSuccess;
}
// by default we fail
SpillFail:
UT_LOG(x86Spill, PR_LOG_DEBUG, ("': false\n"));
DEBUG_ONLY(checkIntegrity();)
return false;
SpillSuccess:
UT_LOG(x86Spill, PR_LOG_DEBUG, ("': true\n"));
DEBUG_ONLY(checkIntegrity();)
return true;
}
// Method: switchDefineToSpill
// Caller: Register Allocator
// Purpose: Folds the spill into the instruction if possible.
// Returns: Returns true if possible, false otherwise
bool x86ArgListInstruction::
switchDefineToSpill(Uint8 inWhichDefine, VirtualRegister& inVR)
{
#ifdef DEBUG_LOG
UT_LOG(x86Spill, PR_LOG_DEBUG, (" spill def %d of (%p)'", inWhichDefine, this));
printOpcode(UT_LOG_MODULE(x86Spill));
#endif
DEBUG_ONLY(checkIntegrity();)
assert(inWhichDefine == 0); // can only switch the first define
InstructionUse& use = getInstructionUseBegin()[0];
InstructionUse* useEnd = getInstructionUseEnd();
InstructionDefine& define = getInstructionDefineBegin()[inWhichDefine];
assert(define.isVirtualRegister()); // cannot call this routine on anything other than a virtual register
// some instructions cannot spill
if (!opcodeAcceptsSpill())
goto SpillFail;
// If this register is being redefined then it should correspond to the first use
// Make sure that there is a first use
if(&use < useEnd)
{
// If it is indeed the same virtual register
if(use.isVirtualRegister() && use.getVirtualRegister().getRegisterIndex() == define.getVirtualRegister().getRegisterIndex())
{
// define == first use, try to switch the first argument to spill type
if(opcodeAcceptsSpill() && iArgumentList->alSwitchArgumentTypeToSpill(0, *this))
{
switchOpcodeToSpill(); // Tell the Opcode that the argumentlist has been switched to a spill
// Replace the old virtual register with a new one which contains the colored stack slot
// The define is also the same as the use so we need to reinitialize both the use and the define VR.
use.getVirtualRegisterPtr().initialize(inVR);
define.getVirtualRegisterPtr().initialize(inVR);
goto SpillSuccess;
}
}
else
{
// There are no other VRs in the uses, define is the second argument
if(opcodeAcceptsSpill() && iArgumentList->alSwitchArgumentTypeToSpill(1, *this))
{
switchOpcodeToSpill(); // Tell the Opcode that the argumentlist has been switched to a spill
// Replace the old virtual register with a new one which contains the colored stack slot
define.getVirtualRegisterPtr().initialize(inVR);
goto SpillSuccess;
}
}
}
else
{
// There are no VRs in the uses so we need to try to switch the first argument
if(opcodeAcceptsSpill() && iArgumentList->alSwitchArgumentTypeToSpill(0, *this))
{
switchOpcodeToSpill(); // Tell the Opcode that the argumentlist has been switched to a spill
// Replace the old virtual register with a new one which contains the colored stack slot
define.getVirtualRegisterPtr().initialize(inVR);
goto SpillSuccess;
}
}
// by default we fail
SpillFail:
UT_LOG(x86Spill, PR_LOG_DEBUG, ("': false\n"));
DEBUG_ONLY(checkIntegrity();)
return false;
SpillSuccess:
UT_LOG(x86Spill, PR_LOG_DEBUG, ("': true\n"));
DEBUG_ONLY(checkIntegrity();)
return true;
}
//================================================================================
// x86Instruction
void x86Instruction::
formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& inFormatter)
{
assert(iOpcode != NULL && iArgumentList != NULL);
// Format the opcode to memory
iOpcode->opFormatToMemory(inStart, *iArgumentList, *this);
// Find the location of the argumnet list and format it to memory
Uint8* argLocation = (Uint8*)inStart + iOpcode->opSize();
iArgumentList->alFormatToMemory((void*)argLocation , inOffset, *this, inFormatter);
// If the opcode has an opcode extension then or it into the proper place. ( the reg field of the modr/m byte.)
if(iOpcode->opHasRegFieldExtension( *iArgumentList, *this ))
{
Uint8 temp = iOpcode->opGetRegFieldExtension();
*argLocation = *argLocation | temp;
}
}
//================================================================================
// InsnSwitch
InsnSwitch::
InsnSwitch(DataNode* inPrimitive, Pool& inPool) :
InsnUseXDefineYFromPool(inPrimitive, inPool, 1, 0 )
{
mControlNode = inPrimitive->getContainer();
assert(mControlNode);
mNumCases = mControlNode->nSuccessors();
}
void InsnSwitch::
formatToMemory(void* inStartAddress, Uint32 /*inOffset*/, MdFormatter& inFormatter)
{
Uint8* start = (Uint8*)inStartAddress;
// calculate position of jump table
mTableAddress = start + 7;
// get the register
Uint8 reg = useToRegisterNumber(*getInstructionUseBegin());
assert (reg != ESP); // ESP is an invalid index
// calculate the SIB
Uint8 SIB = 0x80 | ( reg << 3) | 0x05;
// write out instruction
*start++ = 0xff; // opcode for jump
*start++ = 0x24; // mod/ext/rm for jmp disp32[reg * 4]
*start++ = SIB;
// write address of jump table
writeLittleWordUnaligned(start, (int)mTableAddress);
// write out table
Uint8* methodBegin = inFormatter.getMethodBegin();
ControlEdge* edgesEnd = mControlNode->getSuccessorsEnd();
for(ControlEdge* edge = mControlNode->getSwitchSuccessors(); edge != edgesEnd; edge++)
{
Uint8* destAddress = methodBegin + edge->getTarget().getNativeOffset();
start += 4;
writeLittleWordUnaligned(start, (Uint32)destAddress);
}
}
size_t InsnSwitch::
getFormattedSize(MdFormatter& /*inFormatter*/)
{
// reserve 7 bytes for the indexed jump, plus 4 bytes per entry
return 7 + (4 * mNumCases);
}
#ifdef DEBUG_LOG
void InsnSwitch::
printPretty(LogModuleObject &f)
{
Uint8 reg = useToRegisterNumber(*getInstructionUseBegin());
UT_OBJECTLOG(f, PR_LOG_ALWAYS, ("jmp 0x%x[4 * %s]", Uint32(mTableAddress), x86GPRText[reg]));
// print table
ControlEdge* edgesEnd = mControlNode->getSuccessorsEnd();
for(ControlEdge* edge = mControlNode->getSwitchSuccessors(); edge != edgesEnd; edge++)
{
Uint32 destAddress = edge->getTarget().getNativeOffset();
UT_OBJECTLOG(f, PR_LOG_ALWAYS, ("\n Method Start + 0x%x [<A HREF=\"#N%d\">N%d</A>]",
destAddress, edge->getTarget().dfsNum, edge->getTarget().dfsNum));
}
}
#endif
//================================================================================
// InsnCondBranch
void InsnCondBranch::
formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& /*inFormatter*/)
{
uint8* start = (uint8*) inStart;
*start++ = 0x0f;
*start++ = 0x80 | condType;
// find destination
ControlNode& cnoTarget = cnoSource.getTrueSuccessor().getTarget();
// To compute the relative branch we subtract our current address from out target address. Then we subtract the size
// of our instruction because our current IP is actually there. This is the sum of the size of the opcode and the size of the
// argumentlist. NOTE: If we use 1 byte jumps in the future then this needs to be fixed from a constant 4.
const int opcodeSize = 2;
Int32 jumpOffset = cnoTarget.getNativeOffset() - inOffset - opcodeSize - 4;
writeLittleWordUnaligned(start, jumpOffset);
}
#ifdef DEBUG_LOG
void InsnCondBranch::
printPretty(LogModuleObject &f)
{
ControlNode& cnoTarget = cnoSource.getTrueSuccessor().getTarget();
UT_OBJECTLOG(f, PR_LOG_ALWAYS, ("j%-7s start+%0d [<A HREF=\"#N%d\">N%d</A>]",
conditionalSuffixes[condType], cnoTarget.getNativeOffset(), cnoTarget.dfsNum, cnoTarget.dfsNum));
}
#endif
//================================================================================
// Set
// Can only be EAX, ECX, EDX or EBX
void InsnSet::
formatToMemory(void* inStart, Uint32 /*inOffset*/, MdFormatter& /*inFormatter*/ )
{
uint8* start = (uint8*) inStart;
// format the opcode to memory
*start++ = 0x0f;
*start++ = 0x90 | condType;
// find the register
InstructionDefine* defineBegin = getInstructionDefineBegin();
#ifdef DEBUG
InstructionUse* useBegin = getInstructionUseBegin();
InstructionUse* useEnd = getInstructionUseEnd();
assert(useBegin < useEnd); // condition code always used
InstructionDefine* defineEnd = getInstructionDefineEnd();
assert((defineBegin < defineEnd) && defineBegin->isVirtualRegister());
#endif
Uint8 reg = defineToRegisterNumber(*defineBegin);
assert(/* (reg >= 0) && */ (reg <= 3)); // these are the only legal registers
// format the register
Uint8 modRM = 0xc0 | reg;
*start = modRM;
}
#ifdef DEBUG_LOG
void InsnSet::
printPretty(LogModuleObject &f)
{
InstructionDefine* defineBegin = getInstructionDefineBegin();
Uint8 reg = defineToRegisterNumber(*defineBegin);
UT_OBJECTLOG(f, PR_LOG_ALWAYS, ("set%-5s %s", conditionalSuffixes[condType], x86GPRText[reg]));
}
#endif
//================================================================================
// InsnCondSysCallBranch
#ifdef DEBUG_LOG
void InsnSysCallCondBranch::
printPretty(LogModuleObject &f)
{
UT_OBJECTLOG(f, PR_LOG_ALWAYS, ("j%-7s (over next)\n call %p", conditionalSuffixes[condType], functionAddress));
}
#endif
//================================================================================
// InsnCondSysCallBranch
struct x86NoArgsInfo
{
uint8 opcode; // generally the opcode, but can have additional info
// eventually will be in DEBUG_LOG build only
char* text; // string for fake disassembly
};
x86NoArgsInfo noArgsInfo[] =
{
{ 0x99, "cdq" }, //opCdq 99
{ 0xcc, "int 3" }, //opBreak cc
{ 0x9e, "sahf" }, //opSahf 9e
};
void InsnNoArgs::
formatToMemory(void* inStart, Uint32 /*inOffset*/, MdFormatter& /*inFormatter*/)
{
*((Uint8*)inStart) = (Uint8) noArgsInfo[code].opcode;
}
#ifdef DEBUG_LOG
void InsnNoArgs::
printPretty(LogModuleObject &f)
{
UT_OBJECTLOG(f, PR_LOG_ALWAYS, ("%-8s", noArgsInfo[code].text));
}
#endif
//================================================================================
// InsnDoubleOp Methods
struct x86DoubleOpInfo
{
uint8 opcode; // generally the opcode, but can have additional info
bool hasPrefix; // does the opcode needs to be prefixed with 0x0f
// eventually will be in DEBUG_LOG build only
char* text; // string for fake disassembly
};
x86DoubleOpInfo doubleOpInfo[] =
{
{ 0xaf, true, "imul " }, //opIMul 0f af
{ 0xbe, true, "movsxB "}, //opMovSxB 0f be
{ 0xbf, true, "movsxH "}, //opMovSxH 0f bf
{ 0xb6, true, "movzxB "}, //opMovZxB 0f b6
{ 0xb7, true, "movzxH "} //opMovZxH 0f b7
};
InsnDoubleOp::
InsnDoubleOp( DataNode* inPrimitive, Pool& inPool, x86DoubleOpCode inCodeType,
x86ArgumentType inArgType1, x86ArgumentType inArgType2,
Uint8 uses, Uint8 defines ) :
x86ArgListInstruction (inPrimitive, inPool, uses, defines ),
codeType(inCodeType)
{
iArgumentList = new x86DoubleArgumentList(inArgType1, inArgType2);
DEBUG_ONLY(debugType = kInsnDoubleOp);
}
InsnDoubleOp::
InsnDoubleOp( DataNode* inPrimitive, Pool& inPool, x86DoubleOpCode inCodeType,
Uint32 inDisplacement,
x86ArgumentType inArgType1, x86ArgumentType inArgType2,
Uint8 uses, Uint8 defines) :
x86ArgListInstruction (inPrimitive, inPool, uses, defines ),
codeType(inCodeType)
{
iArgumentList = new x86DoubleArgumentList(inArgType1, inArgType2, inDisplacement);
DEBUG_ONLY(debugType = kInsnDoubleOp);
}
void InsnDoubleOp::
formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& inFormatter)
{
uint8* start = (uint8*) inStart;
assert(iArgumentList);
// Format the opcode to memory
if(doubleOpInfo[codeType].hasPrefix)
*start++ = kPrefix_For_2_Byte;
*start = doubleOpInfo[codeType].opcode;
// Find the location of the argumnet list and format it to memory
Uint8* argLocation = (Uint8*)inStart + opcodeSize();
iArgumentList->alFormatToMemory((void*)argLocation , inOffset, *this, inFormatter);
}
Uint8 InsnDoubleOp::
opcodeSize()
{
return (doubleOpInfo[codeType].hasPrefix) ? 2 : 1;
}
#ifdef DEBUG_LOG
void InsnDoubleOp::
printOpcode(LogModuleObject &f)
{
UT_OBJECTLOG(f, PR_LOG_ALWAYS, ("%-8s ", doubleOpInfo[codeType].text));
}
#endif
//================================================================================
// InsnDoubleOpDir Methods
// used for debugging only (for now)
enum raType
{
kArith,
kLoad,
kStore,
kCopy
};
struct x86RAOpcodeInfo
{
uint8 opcode; // generally the opcode, but can have additional info
bool needs16BitPrefix; // does the opcode need the 'force to 16 bit' opcode?
raType type; // is it a move
// eventually will be in DEBUG_LOG build only
char* text; // string for fake disassembly
};
// note: store dest, source
// note: copy
x86RAOpcodeInfo raInfo[] =
{
{ 0x01, false, kArith, "add " }, //raAdd
{ 0x11, false, kArith, "adc " }, //raAdc
{ 0x29, false, kArith, "sub " }, //raSub
{ 0x19, false, kArith, "sbb " }, //raSbb
{ 0x21, false, kArith, "and " }, //raAnd
{ 0x09, false, kArith, "or " }, //raOr
{ 0x31, false, kArith, "xor " }, //raXor
{ 0x39, false, kArith, "cmp " }, //raCmp
{ 0x89, false, kLoad, "mov(ld) " }, //raLoadI
{ 0x89, false, kCopy, "mov(cp) " }, //raCopyI
{ 0x89, false, kStore, "mov(st) " }, //raStoreI
{ 0x89, false, kStore, "mov(sv) " }, //raSaveReg
{ 0x88, false, kStore, "movB "}, //raStoreB
{ 0x89, true, kStore, "movH "}, //raStoreH
};
InsnDoubleOpDir::
InsnDoubleOpDir(DataNode* inPrimitive, Pool& inPool, x86DoubleOpDirCode inCodeType,
x86ArgumentType inArgType1, x86ArgumentType inArgType2,
Uint8 uses, Uint8 defines ) :
x86ArgListInstruction (inPrimitive, inPool, uses, defines ),
codeType(inCodeType)
{
iArgumentList = new x86DoubleArgumentList(inArgType1, inArgType2);
DEBUG_ONLY(debugType = kInsnDoubleOpDir);
// temp for asserts
getDirectionBit();
}
InsnDoubleOpDir::
InsnDoubleOpDir(DataNode* inPrimitive, Pool& inPool, x86DoubleOpDirCode inCodeType,
Uint32 inDisplacement, x86ArgumentType inArgType1, x86ArgumentType inArgType2,
Uint8 uses, Uint8 defines) :
x86ArgListInstruction (inPrimitive, inPool, uses, defines ),
codeType(inCodeType)
{
iArgumentList = new x86DoubleArgumentList(inArgType1, inArgType2, inDisplacement);
DEBUG_ONLY(debugType = kInsnDoubleOpDir);
// temp for asserts
getDirectionBit();
}
// copy flag should only be set if insn is a copy and the use and define are registerDirect -- no other flags can be set
InstructionFlags InsnDoubleOpDir::
getFlags() const
{
return (codeType == raCopyI && iArgumentList->alIsRegisterDirect()) ? ifCopy : ifNone;
}
bool InsnDoubleOpDir::
getDirectionBit()
{
x86DoubleArgumentList* arglist = (x86DoubleArgumentList*)iArgumentList;
bool arg1isDirect = arglist->akIsArg1Direct();
bool arg2isDirect = arglist->akIsArg2Direct();
assert(arg1isDirect || arg2isDirect); // at least one has to be register direct
bool dirBit;
switch(codeType)
{
case raSaveReg:
// Save Instructions
// saves use (first arg) to stack slot (define)
// mov r1 -> M => mov r1 -> M dir = false
assert(arg1isDirect);
assert(!arg2isDirect);
dirBit = false;
break;
case raCopyI:
// Copy Instructions
/* OLD
// copies use (first arg) to define (second arg)
// mov r1 -> r2 => mov r1 -> r2 dir = false
// mov r1 -> M => mov r1 -> M dir = false
// mov M -> r2 => mov r2 <- M dir = true
// ie direction bit is clear iff argument 1 is registerDirect
dirBit = !arg1isDirect;
break;
*/
// copies use (first arg) to define (second arg)
// mov r1 -> r2 => mov r2 <- r1 dir = true
// mov r1 -> M => mov r1 -> M dir = false
// mov M -> r2 => mov r2 <- M dir = true
// ie direction bit is clear iff argument 1 is registerDirect
dirBit = arg2isDirect;
break;
case raStoreB:
case raStoreH:
case raStoreI:
// Store Instruction
// stores second use into first use memory
// mov M <- r2 => mov r2 -> M dir = false
dirBit = false;
assert(!arg1isDirect);
assert(arg2isDirect);
break;
case raLoadI:
// Load Instruction
// loads from memory address (use, first arg) into register (define, second arg)
// mov M -> r1 => mov r1 <- M dir = true
dirBit = true;
assert(!arg1isDirect);
assert(arg2isDirect);
break;
default:
// Arithmetic Instructions
// add r1, r2 => add r1, r2 dir = true
// add r1, M => add r1, M dir = true
// add M, r2 => add r2, M dir = false
// ie direction bit is set iff argument 1 is registerDirect
assert(raInfo[codeType].type == kArith);
dirBit = arg1isDirect;
}
return dirBit;
}
void InsnDoubleOpDir::
formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& inFormatter)
{
uint8* start = (uint8*) inStart;
assert(iArgumentList);
// Format the opcode to memory
if(raInfo[codeType].needs16BitPrefix)
*start++ = kPrefix_For_16_Bits;
// calculate opcode
Uint8 direction = getDirectionBit() ? 2 : 0;
Uint8 opcode = raInfo[codeType].opcode | direction;
*start = opcode;
// Find the location of the argumnet list and format it to memory
Uint8* argLocation = (Uint8*)inStart + opcodeSize();
iArgumentList->alFormatToMemory((void*)argLocation , inOffset, *this, inFormatter);
}
Uint8 InsnDoubleOpDir::
opcodeSize()
{
return (raInfo[codeType].needs16BitPrefix) ? 2 : 1;
}
#ifdef DEBUG_LOG
void InsnDoubleOpDir::
printOpcode(LogModuleObject &f)
{
UT_OBJECTLOG(f, PR_LOG_ALWAYS, ("%-8s ", raInfo[codeType].text));
}
#endif
//================================================================================

Просмотреть файл

@ -0,0 +1,655 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
//
// x86Instruction.h
//
// Peter DeSantis
// Simon Holmes a Court
#ifndef X86_WIN32_INSTRUCTION
#define X86_WIN32_INSTRUCTION
#include "CatchAssert.h"
#include <stdio.h>
#include "prtypes.h"
#include "Instruction.h"
#include "InstructionEmitter.h"
#include "VirtualRegister.h"
#include "Value.h"
#include "x86Opcode.h"
#include "x86ArgumentList.h"
#include "MemoryAccess.h"
#include "LogModule.h"
#include "NativeCodeCache.h"
#ifdef DEBUG
enum instructionObjType
{
kx86Instruction,
kSetInstruction,
kInsnDoubleOpDir,
kInsnDoubleOp,
kStandardArith,
kStandardArithDisp,
kNotKnown
};
#endif
//================================================================================
/*
x86Instruction(being eliminated) InsnSet
InsnDoubleOp InsnSwitch
InsnDoubleOpDir InsnSysCallCondBranch
| InsnCondBranch
| InsnNoArgs
x86ArglistInstruction Call
\ /
\ /
\ /
InsnUseXDefineYFromPool
|
Instruction (can't spill)
*/
//================================================================================
// x86ArgListInstruction
class x86ArgListInstruction :
public InsnUseXDefineYFromPool
{
public:
x86ArgListInstruction(DataNode* inPrimitive, Pool& inPool, Uint8 inX, Uint8 inY) :
InsnUseXDefineYFromPool (inPrimitive, inPool, inX, inY )
{
DEBUG_ONLY(debugType = kNotKnown);
}
// utility
void x86StandardUseDefine(x86Emitter& inEmitter);
// virtual methods that must be written
virtual Uint8 opcodeSize() = 0;
// defaults to (result of opcodeSize() + iArgumentList->alSize(*this))
virtual size_t getFormattedSize(MdFormatter& /*inFormatter*/);
virtual void formatToMemory(void * /*inStart*/, Uint32 /*inCurOffset*/, MdFormatter& /*inFormatter*/) = 0;
// flags
InstructionFlags getFlags() const { return ifNone; }
// spilling
virtual bool switchUseToSpill(Uint8 inWhichUse, VirtualRegister&inVR);
virtual bool switchDefineToSpill(Uint8 inWhichDefine, VirtualRegister& inVR);
// Control Spilling
// eg switch cannot spill (at least for now)
// eg special mem-reg type does funky things when spilling
virtual bool opcodeAcceptsSpill() { return true; } // most instructions can spill
virtual void switchOpcodeToSpill() { } // most opcodes don't change on spilling
// simple arithmetic and move instructions have a direction bit
virtual bool canReverseOperands() { return false; } // most instructions can't reverse their operands
// immediates
virtual bool opcodeCanAccept1ByteImmediate() { return false; }
virtual bool opcodeCanAccept4ByteImmediate() { return false; }
virtual bool opcodeCanAcceptImmediate() { return (opcodeCanAccept1ByteImmediate() || opcodeCanAccept4ByteImmediate()); }
// condensable
virtual bool regOperandCanBeCondensed() { return false; }
protected:
x86ArgumentList* iArgumentList;
public:
#ifdef DEBUG_LOG
// Debugging Methods
virtual void printPretty(LogModuleObject &f);
virtual void printOpcode(LogModuleObject &f) = 0;
#endif // DEBUG_LOG
#ifdef DEBUG
instructionObjType debugType; // used so we know what type of object we are in the debugger
void checkIntegrity() { iArgumentList->alCheckIntegrity(*this); }
void printArgs() { iArgumentList->alPrintArgs(*this); }
#endif // DEBUG
};
inline size_t x86ArgListInstruction::
getFormattedSize(MdFormatter& inFormatter)
{
assert(iArgumentList != NULL);
size_t size = opcodeSize() + iArgumentList->alSize(*this, inFormatter);
return size;
}
#ifdef DEBUG_LOG
inline void x86ArgListInstruction::
printPretty(LogModuleObject &f)
{
printOpcode(f);
assert(iArgumentList != NULL);
iArgumentList->alPrintPretty(f, *this );
}
#endif // DEBUG_LOG
//================================================================================
// x86Instruction
// This mega-class will eventually be phased out as it is split up into smaller classes
class x86Instruction :
public x86ArgListInstruction
{
public:
//--------------------------------------------------------------------------------
// ImmediateArithType Instructions
x86Instruction (DataNode* inPrimitive, Pool& inPool, ControlNode& inControlNode) :
x86ArgListInstruction (inPrimitive, inPool, 0, 0 ) , flags(ifNone)
{
iOpcode = new x86Opcode_ImmArith( iaJmp );
iArgumentList = new x86ControlNodeOffsetArgumentList( inControlNode );
}
x86Instruction (DataNode* inPrimitive, Pool& inPool, x86ImmediateArithType inOpInfo, Uint32 inConstant, x86ArgumentType inArgType1, Uint8 inX, Uint8 inY) :
x86ArgListInstruction (inPrimitive, inPool, inX, inY ) , flags(ifNone)
{
iOpcode = new x86Opcode_ImmArith( inOpInfo );
iArgumentList = new x86ImmediateArgumentList( inArgType1, inConstant );
}
x86Instruction (DataNode* inPrimitive, Pool& inPool, x86ImmediateArithType inOpInfo, Uint32 inConstant, x86ArgumentType inArgType1, Uint32 inDisplacement, Uint8 inX, Uint8 inY) :
x86ArgListInstruction (inPrimitive, inPool, inX, inY ) , flags(ifNone)
{ iOpcode = new x86Opcode_ImmArith( inOpInfo );
iArgumentList = new x86ImmediateArgumentList( inArgType1, inDisplacement, inConstant ); }
//--------------------------------------------------------------------------------
// ExtendedType Instructions
x86Instruction (DataNode* inPrimitive, Pool& inPool, x86ExtendedType inOpInfo, Uint32 inConstant, x86ArgumentType inArgType1, Uint8 inX, Uint8 inY) :
x86ArgListInstruction (inPrimitive, inPool, inX, inY ) , flags(ifNone)
{ iOpcode = new x86Opcode_Reg( inOpInfo );
iArgumentList = new x86ImmediateArgumentList( inArgType1, inConstant ); }
x86Instruction (DataNode* inPrimitive, Pool& inPool, x86ExtendedType inOpInfo, Uint32 inConstant, x86ArgumentType inArgType1, Uint32 inDisplacement, Uint8 inX, Uint8 inY) :
x86ArgListInstruction (inPrimitive, inPool, inX, inY ) , flags(ifNone)
{ iOpcode = new x86Opcode_Reg( inOpInfo );
iArgumentList = new x86ImmediateArgumentList( inArgType1, inDisplacement, inConstant ); }
x86Instruction (DataNode* inPrimitive, Pool& inPool, x86ExtendedType inOpInfo, x86ArgumentType inArgType1, Uint8 inX, Uint8 inY ) :
x86ArgListInstruction (inPrimitive, inPool, inX, inY ) , flags(ifNone)
{ iOpcode = new x86Opcode_Reg( inOpInfo );
iArgumentList = new x86SingleArgumentList( inArgType1 ); }
x86Instruction (DataNode* inPrimitive, Pool& inPool, x86ExtendedType inOpInfo, x86ArgumentType inArgType1, Uint32 inDisplacement, Uint8 inX, Uint8 inY) :
x86ArgListInstruction (inPrimitive, inPool, inX, inY ) , flags(ifNone)
{ iOpcode = new x86Opcode_Reg( inOpInfo );
iArgumentList = new x86SingleArgumentList( inArgType1, inDisplacement ); }
//--------------------------------------------------------------------------------
// CondensableExtendedType Instructions
x86Instruction (DataNode* inPrimitive, Pool& inPool, x86CondensableExtendedType inOpInfo, Uint32 inConstant, x86ArgumentType inArgType1, Uint8 inX, Uint8 inY) :
x86ArgListInstruction (inPrimitive, inPool, inX, inY ) , flags(ifNone)
{ iOpcode = new x86Opcode_Condensable_Reg( inOpInfo );
iArgumentList = new x86CondensableImmediateArgumentList( inArgType1, inConstant ); }
x86Instruction (DataNode* inPrimitive, Pool& inPool, x86CondensableExtendedType inOpInfo, x86ArgumentType inArgType1, Uint8 inX, Uint8 inY) :
x86ArgListInstruction (inPrimitive, inPool, inX, inY ) , flags(ifNone)
{ iOpcode = new x86Opcode_Condensable_Reg( inOpInfo );
iArgumentList = new x86SingleArgumentList( inArgType1 ); }
//--------------------------------------------------------------------------------
// SpecialRegMemType Instruction
x86Instruction (DataNode* inPrimitive, Pool& inPool, x86SpecialRegMemType inType, Uint32 inImmediate, Uint8 inX, Uint8 inY ) :
x86ArgListInstruction (inPrimitive, inPool, inX, inY ) , flags(ifNone)
{ iOpcode = new x86Opcode_SpecialRegMem( inType );
iArgumentList = new x86SpecialRegMemArgumentList( inImmediate ); }
size_t getFormattedSize(MdFormatter& /*inFormatter*/);
virtual void formatToMemory(void * /*inStart*/, Uint32 /*inCurOffset*/, MdFormatter& /*inFormatter*/);
// access flags
InstructionFlags getFlags() const { return flags; }
void setFlags(InstructionFlags f) { flags = f; }
// Allows ArgumentList access to opcode without passing the extra reference to the opcode.
Uint8 opcodeSize(){ return iOpcode->opSize(); }
bool opcodeCanAccept1ByteImmediate() { return iOpcode->opCanAccept1ByteImmediate(); }
bool opcodeCanAccept4ByteImmediate() { return iOpcode->opCanAccept4ByteImmediate(); }
bool regOperandCanBeCondensed() { return iOpcode->opRegOperandCanBeCondensed(); }
virtual bool opcodeAcceptsSpill() { return true; }
virtual void switchOpcodeToSpill() { iOpcode->opSwitchToRegisterIndirect(); }
protected:
x86Opcode* iOpcode;
InstructionFlags flags; // Used to mark copy instructions so the register allocator can remove them.
public:
#ifdef DEBUG_LOG
void printOpcode(LogModuleObject &f);
#endif // DEBUG_LOG
};
//--------------------------------------------------------------------------------
// FIX these two methods should be removed (third method replaces them)
inline Uint8 useToRegisterNumber(InstructionUse& inUse)
{
return colorTox86GPR[inUse.getVirtualRegister().getColor()];
}
inline Uint8 defineToRegisterNumber(InstructionDefine& inDefine)
{
Uint8 color = inDefine.getVirtualRegister().getColor();
return colorTox86GPR[color];
}
inline Uint8 getRegisterNumber(VirtualRegister* vreg)
{
Uint8 color = vreg->getColor();
assert(color < 6);
return colorTox86GPR[color];
}
//--------------------------------------------------------------------------------
inline size_t x86Instruction::
getFormattedSize(MdFormatter& inFormatter)
{
assert(iOpcode != NULL && iArgumentList != NULL);
return (iOpcode->opSize() + iArgumentList->alSize(*this, inFormatter));
}
#ifdef DEBUG_LOG
inline void x86Instruction::
printOpcode(LogModuleObject &f)
{
iOpcode->opPrintPretty(f);
}
#endif DEBUG_LOG
//================================================================================
// InsnNoArgs
class InsnNoArgs :
public InsnUseXDefineYFromPool
{
public:
InsnNoArgs(DataNode* inPrimitive, Pool& inPool, x86NoArgsCode inCode, Uint8 inUses, Uint8 inDefines) :
InsnUseXDefineYFromPool(inPrimitive, inPool, inUses, inDefines),
code(inCode)
{}
virtual void formatToMemory(void* inStart, Uint32 /*inOffset*/, MdFormatter& /*inFormatter*/);
virtual size_t getFormattedSize(MdFormatter& /*inFormatter*/) { return 1; }
InstructionFlags getFlags() const { return ifNone; }
virtual bool opcodeAcceptsSpill() { return false; }
virtual void switchOpcodeToSpill() { assert(false); }
protected:
x86NoArgsCode code;
#ifdef DEBUG_LOG
public:
virtual void printPretty(LogModuleObject &f);
#endif
};
//================================================================================
// InsnSwitch
class InsnSwitch :
public InsnUseXDefineYFromPool
{
public:
InsnSwitch(DataNode* inPrimitive, Pool& inPool);
virtual void formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& /*inFormatter*/);
virtual size_t getFormattedSize(MdFormatter& /*inFormatter*/);
InstructionFlags getFlags() const { return ifNone; }
// can't spill
virtual bool opcodeAcceptsSpill() { return false; }
virtual void switchOpcodeToSpill() { assert(false); }
protected:
Uint32 mNumCases;
ControlNode* mControlNode;
Uint8* mTableAddress;
#ifdef DEBUG_LOG
public:
virtual void printPretty(LogModuleObject &f);
#endif
};
//================================================================================
// InsnCondBranch
// FIX For now all branches take 4 bytes immediates -- eventually we want this to take
// the minimum possible
class InsnCondBranch :
public InsnUseXDefineYFromPool
{
public:
InsnCondBranch(DataNode* inPrimitive, Pool& inPool, x86ConditionCode condType, ControlNode& inControlNode) :
InsnUseXDefineYFromPool(inPrimitive, inPool, 1, 0),
condType(condType),
cnoSource(inControlNode)
{};
virtual void formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& /*inFormatter*/);
virtual size_t getFormattedSize(MdFormatter& /*inFormatter*/) { return 6; }
InstructionFlags getFlags() const { return ifNone; }
virtual bool opcodeAcceptsSpill() { return false; } // spilling makes no sense here
virtual void switchOpcodeToSpill() { assert(false); }
protected:
x86ConditionCode condType; // x86 condition codes
ControlNode& cnoSource; // the source of the branch
#ifdef DEBUG_LOG
public:
virtual void printPretty(LogModuleObject &f);
#endif
};
//================================================================================
// InsnSysCallCondBranch
// emit
// jcc OK
// call inFunc
// OK:
//
class InsnSysCallCondBranch :
public InsnUseXDefineYFromPool
{
public:
InsnSysCallCondBranch(DataNode* inPrimitive, Pool& inPool, x86ConditionCode inCondType, void (*inFunc)()) :
InsnUseXDefineYFromPool(inPrimitive, inPool, 1, 0),
functionAddress(inFunc),
condType(inCondType)
{};
virtual void formatToMemory(void* inStart, Uint32 /*inOffset*/, MdFormatter& /*inFormatter*/)
{
Uint8* start = (Uint8*) inStart;
// emit jump
*start++ = 0x0f;
*start++ = 0x80 + condType;
writeLittleWordUnaligned((void*)start, 5);
start += 4;
// emit call
Uint8* callStart = start;
*start++ = 0xe8;
int32 branchOffset = (Uint32)functionAddress - (Uint32) callStart - 5;
writeLittleWordUnaligned((void*)start, branchOffset);
}
virtual size_t getFormattedSize(MdFormatter& /*inFormatter*/) { return 6 + 5; }
InstructionFlags getFlags() const { return ifNone; }
virtual bool opcodeAcceptsSpill() { return false; } // spilling makes no sense here
virtual void switchOpcodeToSpill() { assert(false); }
protected:
void* functionAddress;
x86ConditionCode condType;
#ifdef DEBUG_LOG
public:
virtual void printPretty(LogModuleObject &f);
#endif
};
//================================================================================
// Set on condition flags
// cannot spill
// uses a condition
class InsnSet :
public InsnUseXDefineYFromPool
{
public:
InsnSet(DataNode* inPrimitive, Pool& inPool, x86ConditionCode condType, int inX = 2, int inY = 1) :
InsnUseXDefineYFromPool(inPrimitive, inPool, inX, inY ),
condType(condType)
{}
virtual void formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& /* inFormatter */ );
virtual size_t getFormattedSize(MdFormatter& /*inFormatter*/) { return 3; } // 0xff 0x90 reg
virtual Uint8 opcodeSize() { return 2; }
protected:
x86ConditionCode condType; // x86 condition codes
#ifdef DEBUG_LOG
public:
virtual void printPretty(LogModuleObject &f);
#endif
};
//================================================================================
/* InsnDoubleOp
0110 1001:11 reg1 reg2: immdata register1 with immediate to register2
0110 1001:mod reg r/m: immdata register with immediate to register
0000 1111:1010 1111: 11 reg1 reg2 register1 with register2
0000 1111:1010 1111: mod reg r/m register with memory
*/
class InsnDoubleOp :
public x86ArgListInstruction
{
public:
InsnDoubleOp( DataNode* inPrimitive, Pool& inPool, x86DoubleOpCode inCodeType,
x86ArgumentType inArgType1 = atRegDirect, x86ArgumentType inArgType2 = atRegDirect,
Uint8 uses = 2, Uint8 defines = 1 );
InsnDoubleOp( DataNode* inPrimitive, Pool& inPool, x86DoubleOpCode inCodeType,
Uint32 inDisplacement,
x86ArgumentType inArgType1 = atRegDirect, x86ArgumentType inArgType2 = atRegDirect,
Uint8 uses = 2, Uint8 defines = 1 );
virtual bool canReverseOperands() { return false; }
virtual void formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& inFormatter);
virtual Uint8 opcodeSize();
protected:
x86DoubleOpCode codeType;
public:
#ifdef DEBUG_LOG
virtual void printOpcode(LogModuleObject &f);
#endif
};
//================================================================================
// InsnDoubleOpDir
// use the default spilling behaviour for x86ArgListInstruction
class InsnDoubleOpDir :
public x86ArgListInstruction
{
public:
InsnDoubleOpDir(DataNode* inPrimitive, Pool& inPool, x86DoubleOpDirCode inCodeType,
x86ArgumentType inArgType1 = atRegDirect, x86ArgumentType inArgType2 = atRegDirect,
Uint8 uses = 2, Uint8 defines = 1 );
InsnDoubleOpDir(DataNode* inPrimitive, Pool& inPool, x86DoubleOpDirCode inCodeType,
Uint32 inDisplacement,
x86ArgumentType inArgType1 = atRegDirect, x86ArgumentType inArgType2 = atRegDirect,
Uint8 uses = 2, Uint8 defines = 1 );
// the main feature of these instructions is their ability to reverse operands
virtual bool canReverseOperands() { return true; }
virtual void formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& inFormatter);
virtual Uint8 opcodeSize();
InstructionFlags getFlags() const; // ifCopy if we are an unspilt move, ifNone otherwise
protected:
bool getDirectionBit();
x86DoubleOpDirCode codeType;
public:
#ifdef DEBUG_LOG
virtual void printOpcode(LogModuleObject &f);
#endif
};
//================================================================================
// Utililty
// Returns a new copy instruction
inline InsnDoubleOpDir&
newCopyInstruction(DataNode& inDataNode, Pool& inPool, Uint8 uses = 1, Uint8 defines = 1)
{
return *new(inPool) InsnDoubleOpDir(&inDataNode, inPool, raCopyI, atRegDirect, atRegDirect, uses, defines);
}
//================================================================================
// Calls
template<bool tHasIncomingStore, bool tHasOutgoingStore, bool tHasFunctionAddress, bool tIsDynamic>
class Call :
public InsnUseXDefineYFromPool
{
public:
static inline bool hasReturnValue(DataNode& inDataNode);
static inline Uint8 numberOfArguments(DataNode& inDataNode);
Call( DataNode* inDataNode,
Pool& inPool,
Uint8 inRegisterArguments,
bool inHasReturnValue,
x86Emitter& inEmitter,
void (*inFunc)() = NULL,
DataNode* inUseDataNode = NULL );
public:
virtual void formatToMemory(void* inStart, Uint32 inOffset, MdFormatter& /*inFormatter*/);
virtual size_t getFormattedSize(MdFormatter& /*inFormatter*/) { return (5); }
virtual InstructionFlags getFlags() const { return (ifCall); }
protected:
Uint32 mCalleeAddress;
#ifdef DEBUG_LOG
public:
virtual void printPretty(LogModuleObject &f)
{
CacheEntry* ce = NativeCodeCache::getCache().lookupByRange((Uint8*)mCalleeAddress);
if(ce)
{
Method* method = ce->descriptor.method;
assert(method);
const char* name = method->getName();
const char* tag = method->getHTMLName();
UT_OBJECTLOG(f, PR_LOG_ALWAYS, (" call <A HREF=\"%s\">%s</A>", tag, name));
}
else
UT_OBJECTLOG(f, PR_LOG_ALWAYS, (" call %p", (Uint32 *)mCalleeAddress));
}
#endif
};
template<bool tHasIncomingStore, bool tHasOutgoingStore>
class CallS :
public Call<tHasIncomingStore, tHasOutgoingStore, false, false>
{
public:
inline CallS( DataNode* inDataNode,
Pool& inPool,
Uint8 inRegisterArguments,
bool inHasReturnValue,
x86Emitter& inEmitter,
void (*inFunc)(),
DataNode* inUseDataNode = NULL ) :
Call<tHasIncomingStore, tHasOutgoingStore, false, false>(inDataNode, inPool, inRegisterArguments, inHasReturnValue, inEmitter, inFunc, inUseDataNode) { }
};
typedef CallS<true, true> CallS_V;
typedef CallS<true, false> CallS_;
typedef CallS<false, false> CallS_C;
typedef Call<true, true, true, false> Call_;
// Dynamically dispatched call
class CallD_ :
public Call<true, true, true, true>
{
public:
inline CallD_(DataNode* inDataNode, Pool& inPool, Uint8 inRegisterArguments, bool inHasReturnValue, x86Emitter& inEmitter) :
Call<true, true, true, true>(inDataNode, inPool, inRegisterArguments, inHasReturnValue, inEmitter) { }
inline virtual void formatToMemory(void* inStart, Uint32 /*inOffset*/, MdFormatter& inEmitter);
virtual size_t getFormattedSize(MdFormatter& /*inFormatter*/) { return (2); }
#ifdef DEBUG_LOG
virtual void printPretty(LogModuleObject &f) { UT_OBJECTLOG(f, PR_LOG_ALWAYS, (" call ???")); }
#endif
};
template<bool tHasIncomingStore, bool tHasOutgoingStore, bool tHasFunctionAddress, bool tIsDynamic> bool
Call<tHasIncomingStore, tHasOutgoingStore, tHasFunctionAddress, tIsDynamic>::
hasReturnValue(DataNode& inDataNode)
{
bool hasReturnValue = (inDataNode.getOutgoingEdgesBegin() + tHasOutgoingStore < inDataNode.getOutgoingEdgesEnd());
return (hasReturnValue);
}
template<bool tHasIncomingStore, bool tHasOutgoingStore, bool tHasFunctionAddress, bool tIsDynamic> Uint8
Call<tHasIncomingStore, tHasOutgoingStore,tHasFunctionAddress, tIsDynamic>::
numberOfArguments(DataNode& inDataNode)
{
DataConsumer* firstArg;
DataConsumer* lastArg;
assert(!(tHasFunctionAddress && !tHasIncomingStore)); // no such primitive
firstArg = inDataNode.getInputsBegin() + tHasFunctionAddress + tHasIncomingStore;
lastArg = inDataNode.getInputsEnd();
return (lastArg - firstArg);
}
template<bool tHasIncomingStore, bool tHasOutgoingStore, bool tHasFunctionAddress, bool tIsDynamic>
void Call<tHasIncomingStore, tHasOutgoingStore, tHasFunctionAddress, tIsDynamic>::
formatToMemory(void* inStart, Uint32 /*inOffset*/, MdFormatter& /*inFormatter*/)
{
Uint8* start = (Uint8*)inStart;
int32 branchOffset = mCalleeAddress - (Uint32) inStart - 5;
*start++ = 0xe8;
writeLittleWordUnaligned((void*)start, branchOffset);
}
inline void CallD_::
formatToMemory(void* inStart, Uint32 /*inOffset*/, MdFormatter& /*inFormatter*/)
{
Uint8 *curPC = (Uint8 *) inStart;
*curPC++ = 0xff;
*curPC++ = 0xd0 | useToRegisterNumber(getInstructionUseBegin()[0]);
}
#endif

Просмотреть файл

@ -1,596 +0,0 @@
/* -*- Mode: asm; tab-width:4; truncate-lines:t -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
.file "x86Linux.S"
#define ALIGN .align 16
#define SYMBOL_NAME(name) name
#define SYMBOL_NAME_LABEL(name) name##:
#define GLOBAL_ENTRY_START(name) \
ALIGN; \
.globl SYMBOL_NAME(name); \
.type SYMBOL_NAME(name),@function; \
SYMBOL_NAME_LABEL(name)
#define GLOBAL_ENTRY_END(name) \
SYMBOL_NAME_LABEL(.L##name); \
.size SYMBOL_NAME(name),SYMBOL_NAME(.L##name)-SYMBOL_NAME(name)
GLOBAL_ENTRY_START(staticCompileStub)
push %ebp /* Make stack frame */
mov %esp,%ebp
/* Even though ESI and EDI are non-volatile (callee-saved) registers, we need to
preserve them here in case an exception is thrown. The exception-handling
code expects to encounter this specially-prepared stack "guard" frame when
unwinding the stack. See x86ExceptionHandler.cpp. */
push %edi
push %esi
push %ebx /* XXX - Why push EBX ? It's caller-saved */
/* Call compileAndBackPatchMethod() with 2 args */
push 16(%esp) /* Push the second argument to compileAndBackPatchMethod (return address) */
push %eax /* Push the first argument to compileAndBackPatchMethod (cacheEntry) */
call compileAndBackPatchMethod
pop %edx /* Remove passed-in arguments to compileAndBackPatchMethod */
pop %edx
mov %ebp,%esp /* Pop stack frame */
pop %ebp
jmp *%eax /* Jump to function leaving the return address at the top of the stack */
GLOBAL_ENTRY_END(staticCompileStub)
/*
* 64bit Arithmetic Support Functions
*
* x86Extract64Bit
*
* Origin: Simon
* Purpose: signed right-aligned field extraction
* In: 64 bit source (on stack)
* 32 bit extraction size (on stack)
* Out: 64 bit result
* Note: Only works in range 1 <= b <= 63, b is extraction amount
*/
GLOBAL_ENTRY_START(x86Extract64Bit)
mov 4(%esp),%eax /* load low byte of a */
mov 12(%esp),%ecx /*load shift amount */
cmp $0x20,%ecx
jg greater32
/* extract <= than 32 bits
* shift amount = 32 - extract
*/
neg %ecx
add $0x20,%ecx /* ecx = 32 - extract */
shl %cl,%eax
sar %cl,%eax
cdq /* sign extend into EDX:EAX */
ret $12
greater32:
/* ext > 32 bits
* shift amount = 64 - extract
*/
mov 8(%esp),%edx /* load high byte of a */
neg %ecx
add $0x40,%ecx /* ecx = 64 - extract */
shl %cl,%edx
sar %cl,%edx
ret $12
GLOBAL_ENTRY_END(x86Extract64Bit)
/*
* 3WayCompare
*
* Origin: Symantec JIT
* Purpose: compare two longs
* In: two longs on the stack
* Out: depends on condition flags:
* less = -1
* equal = 0
* greater = 1
*/
GLOBAL_ENTRY_START(x86ThreeWayCMP_L)
/* edx:eax is tos, ecx:ebx is nos */
mov 8(%esp),%ecx
mov 16(%esp),%edx
cmp %edx,%ecx
jl lcmp_m1
jg lcmp_1
mov 4(%esp),%ecx
mov 12(%esp),%edx
cmp %edx,%ecx
ja lcmp_1
mov $0,%eax
jb lcmp_m1
ret $16
.align 4
lcmp_m1:
mov $-1,%eax
ret $16
.align 4
lcmp_1:
mov $1,%eax
ret $16
GLOBAL_ENTRY_END(x86ThreeWayCMP_L)
/*
* llmul
*
* Origin: Intel Code (via MSDev)
* Purpose: long multiply (same for signed/unsigned)
* In: args are passed on the stack:
* 1st pushed: multiplier (QWORD)
* 2nd pushed: multiplicand (QWORD)
* Out: EDX:EAX - product of multiplier and multiplicand
* Note: parameters are removed from the stack
* Uses: ECX
*/
GLOBAL_ENTRY_START(x86Mul64Bit)
/* A*B = (A.lo * B.lo) + (A.lo * B.hi) + (B.lo * A.hi) ??? */
mov 8(%esp),%eax /* A.hi */
mov 16(%esp),%ecx /* B.hi */
or %eax,%ecx /* test for both hiwords zero */
mov 12(%esp),%ecx /* B.lo */
jnz hard
/* easy case
* both are zero, just mult ALO and BLO
*/
mov 4(%esp),%eax /* A.lo */
mul %ecx /* A.lo * B.lo */
ret $16 /* callee restores the stack */
/* hard case */
hard:
push %ebx
mul %ecx /* A.hi * B.lo */
mov %eax,%ebx /* save result */
mov 8(%esp),%eax /* A.lo */
mull 14(%esp) /* A.lo * B.hi */
add %eax,%ebx /* ebx = ((A.lo * B.hi) + (A.hi * B.lo)) */
mov 8(%esp),%eax /* A.lo */
mul %ecx /* edx:eax = A.lo * B.lo */
add %ebx,%edx /* now edx has all the LO*HI stuff */
pop %ebx
ret $16 /* callee restores the stack */
GLOBAL_ENTRY_END(x86Mul64Bit)
/*
* lldiv
*
* Origin: Intel Code (via MSDev)
* Purpose: signed long divide
* In: args are passed on the stack:
* 1st pushed: divisor (QWORD)
* 2nd pushed: dividend (QWORD)
* Out: EDX:EAX contains the quotient (dividend/divisor)
* Note: parameters are removed from the stack
* Uses: ECX
*/
GLOBAL_ENTRY_START(x86Div64Bit)
push %edi
push %esi
push %ebx
/* Determine sign of the result (%edi = 0 if result is positive, non-zero
* otherwise) and make operands positive.
*/
xor %edi,%edi /* result sign assumed positive */
mov 20(%esp),%eax /* hi word of a */
or %eax,%eax /* test to see if signed */
jge L1 /* skip rest if a is already positive */
inc %edi /* complement result sign flag */
mov 16(%esp),%edx /* lo word of a */
neg %eax /* make a positive */
neg %edx
sbb $0,%eax
mov %eax,20(%esp) /* save positive value */
mov %edx,16(%esp)
L1:
mov 28(%esp),%eax /* hi word of b */
or %eax,%eax /* test to see if signed */
jge L2 /* skip rest if b is already positive */
inc %edi /* complement the result sign flag */
mov 24(%esp),%edx /* lo word of a */
neg %eax /* make b positive */
neg %edx
sbb $0,%eax
mov %eax,28(%esp) /* save positive value */
mov %edx,24(%esp)
L2:
/* Now do the divide. First look to see if the divisor is less than 4194304K.
* If so, then we can use a simple algorithm with word divides, otherwise
* things get a little more complex.
* NOTE - %eax currently contains the high order word of DVSR
*/
or %eax,%eax /* check to see if divisor < 4194304K */
jnz L3 /* nope, gotta do this the hard way */
mov 24(%esp),%ecx /* load divisor */
mov 20(%esp),%eax /* load high word of dividend */
xor %edx,%edx
div %ecx /* %eax <- high order bits of quotient */
mov %eax,%ebx /* save high bits of quotient */
mov 16(%esp),%eax /* %edx:%eax <- remainder:lo word of dividend */
div %ecx /* %eax <- low order bits of quotient */
mov %ebx,%edx /* %edx:%eax <- quotient */
jmp L4 /* set sign, restore stack and return */
/* Here we do it the hard way. Remember, %eax contains the high word of DVSR */
L3:
mov %eax,%ebx /* %ebx:ecx <- divisor */
mov 24(%esp),%ecx
mov 20(%esp),%edx /* %edx:%eax <- dividend */
mov 16(%esp),%eax
L5:
shr $1,%ebx /* shift divisor right one bit */
rcr $1,%ecx
shr $1,%edx /* shift dividend right one bit */
rcr $1,%eax
or %ebx,%ebx
jnz L5 /* loop until divisor < 4194304K */
div %ecx /* now divide, ignore remainder */
mov %eax,%esi /* save quotient */
/* We may be off by one, so to check, we will multiply the quotient
* by the divisor and check the result against the orignal dividend
* Note that we must also check for overflow, which can occur if the
* dividend is close to 2**64 and the quotient is off by 1.
*/
mull 28(%esp) /* QUOT * HIWORD(DVSR) */
mov %eax,%ecx
mov 24(%esp),%eax
mul %esi /* QUOT * LOWORD(DVSR) */
add %ecx,%edx /* %EDX:%EAX = QUOT * DVSR */
jc L6 /* carry means Quotient is off by 1 */
/* do long compare here between original dividend and the result of the
* multiply in %edx:%eax. If original is larger or equal, we are ok, otherwise
* subtract one (1) from the quotient.
*/
cmp 20(%esp),%edx /* compare hi words of result and original */
ja L6 /* if result > original, do subtract */
jb L7 /* if result < original, we are ok */
cmp 16(%esp),%eax /* hi words are equal, compare lo words */
jbe L7 /* if less or equal we are ok, else subtract */
L6:
dec %esi /* subtract 1 from quotient */
L7:
xor %edx,%edx /* %edx:%eax <- quotient */
mov %esi,%eax
/* Just the cleanup left to do. %edx:%eax contains the quotient. Set the sign
* according to the save value, cleanup the stack, and return.
*/
L4:
dec %edi /* check to see if result is negative */
jnz L8 /* if %EDI == 0, result should be negative */
neg %edx /* otherwise, negate the result */
neg %eax
sbb $0,%edx
/* Restore the saved registers and return. */
L8:
pop %ebx
pop %esi
pop %edi
ret $16
GLOBAL_ENTRY_END(x86Div64Bit)
/*
* llrem
*
* Origin: MSDev
* Purpose: signed long remainder
* In: args are passed on the stack:
* 1st pushed: divisor (QWORD)
* 2nd pushed: dividend (QWORD)
* Out: %EDX:%EAX contains the quotient (dividend/divisor)
* Note: parameters are removed from the stack
* Uses: %ECX
*/
GLOBAL_ENTRY_START(x86Mod64Bit)
push %ebx
push %edi
/* Determine sign of the result (%edi = 0 if result is positive, non-zero
* otherwise) and make operands positive.
*/
xor %edi,%edi /* result sign assumed positive */
mov 16(%esp),%eax /* hi word of a */
or %eax,%eax /* test to see if signed */
jge LL1 /* skip rest if a is already positive */
inc %edi /* complement result sign flag bit */
mov 12(%esp),%edx /* lo word of a */
neg %eax /* make a positive */
neg %edx
sbb $0,%eax
mov %eax,16(%esp) /* save positive value */
mov %edx,12(%esp)
LL1:
mov 24(%esp),%eax /* hi word of b */
or %eax,%eax /* test to see if signed */
jge LL2 /* skip rest if b is already positive */
mov 20(%esp),%edx /* lo word of b */
neg %eax /* make b positive */
neg %edx
sbb $0,%eax
mov %eax,24(%esp) /* save positive value */
mov %edx,20(%esp)
LL2:
/* Now do the divide. First look to see if the divisor is less than 4194304K.
* If so, then we can use a simple algorithm with word divides, otherwise
* things get a little more complex.
* NOTE - %eax currently contains the high order word of DVSR
*/
or %eax,%eax /* check to see if divisor < 4194304K */
jnz LL3 /* nope, gotta do this the hard way */
mov 20(%esp),%ecx /* load divisor */
mov 16(%esp),%eax /* load high word of dividend */
xor %edx,%edx
div %ecx /* %edx <- remainder */
mov 12(%esp),%eax /* %edx:%eax <- remainder:lo word of dividend */
div %ecx /* %edx <- final remainder */
mov %edx,%eax /* %edx:%eax <- remainder */
xor %edx,%edx
dec %edi /* check result sign flag */
jns LL4 /* negate result, restore stack and return */
jmp LL8 /* result sign ok, restore stack and return */
/* Here we do it the hard way. Remember, %eax contains the high word of DVSR */
LL3:
mov %eax,%ebx /* %ebx:%ecx <- divisor */
mov 20(%esp),%ecx
mov 16(%esp),%edx /* %edx:%eax <- dividend */
mov 12(%esp),%eax
LL5:
shr $1,%ebx /* shift divisor right one bit */
rcr $1,%ecx
shr $1,%edx /* shift dividend right one bit */
rcr $1,%eax
or %ebx,%ebx
jnz LL5 /* loop until divisor < 4194304K */
div %ecx /* now divide, ignore remainder */
/* We may be off by one, so to check, we will multiply the quotient
* by the divisor and check the result against the orignal dividend
* Note that we must also check for overflow, which can occur if the
* dividend is close to 2**64 and the quotient is off by 1.
*/
mov %eax,%ecx /* save a copy of quotient in %ECX */
mull 24(%esp)
xchg %eax,%ecx /* save product, get quotient in %EAX */
mull 20(%esp)
add %ecx,%edx /* %EDX:%EAX = QUOT * DVSR */
jc LL6 /* carry means Quotient is off by 1 */
/* do long compare here between original dividend and the result of the
* multiply in %edx:%eax. If original is larger or equal, we are ok, otherwise
* subtract the original divisor from the result.
*/
cmp 16(%esp),%edx /* compare hi words of result and original */
ja LL6 /* if result > original, do subtract */
jb LL7 /* if result < original, we are ok */
cmp 12(%esp),%eax /* hi words are equal, compare lo words */
jbe LL7 /* if less or equal we are ok, else subtract */
LL6:
sub 20(%esp),%eax /* subtract divisor from result */
sbb 24(%esp),%edx
LL7:
/* Calculate remainder by subtracting the result from the original dividend.
* Since the result is already in a register, we will do the subtract in the
* opposite direction and negate the result if necessary.
*/
sub 12(%esp),%eax /* subtract dividend from result */
sbb 16(%esp),%edx
/* Now check the result sign flag to see if the result is supposed to be positive
* or negative. It is currently negated (because we subtracted in the 'wrong'
* direction), so if the sign flag is set we are done, otherwise we must negate
* the result to make it positive again.
*/
dec %edi /* check result sign flag */
jns LL8 /* result is ok, restore stack and return */
LL4:
neg %edx /* otherwise, negate the result */
neg %eax
sbb $0,%edx
/* Just the cleanup left to do. %edx:%eax contains the quotient.
* Restore the saved registers and return.
*/
LL8:
pop %edi
pop %ebx
ret $16
GLOBAL_ENTRY_END(x86Mod64Bit)
/*
* llshl
*
* Origin: MSDev. modified
* Purpose: long shift left
* In: args are passed on the stack: (FIX make fastcall)
* 1st pushed: amount (int)
* 2nd pushed: source (long)
* Out: %EDX:%EAX contains the result
* Note: parameters are removed from the stack
* Uses: %ECX, destroyed
*/
GLOBAL_ENTRY_START(x86Shl64Bit)
/* prepare from stack */
mov 4(%esp),%eax
mov 8(%esp),%edx
mov 12(%esp),%ecx
cmp $64,%cl
jae RETZERO
/* Handle shifts of between 0 and 31 bits */
cmp $32,%cl
jae MORE32
shld %eax,%edx
shl %cl,%eax
ret $12
/* Handle shifts of between 32 and 63 bits */
MORE32:
mov %eax,%edx
xor %eax,%eax
and $31,%cl
shl %cl,%edx
ret $12
/* return 0 in %edx:%eax */
RETZERO:
xor %eax,%eax
xor %edx,%edx
ret $12
GLOBAL_ENTRY_END(x86Shl64Bit)
/*
* llshr
*
* Origin: MSDev. modified
* Purpose: long shift right
* In: args are passed on the stack: (FIX make fastcall)
* 1st pushed: amount (int)
* 2nd pushed: source (long)
* Out: %EDX:%EAX contains the result
* Note: parameters are removed from the stack
* Uses: %ECX, destroyed
*/
GLOBAL_ENTRY_START(x86Shr64Bit)
/* prepare from stack */
mov 4(%esp),%eax
mov 8(%esp),%edx
mov 12(%esp),%ecx
cmp $64,%cl
jae RRETZERO
/* Handle shifts of between 0 and 31 bits */
cmp $32,%cl
jae MMORE32
shrd %edx,%eax
shr %cl,%edx
ret $12
/* Handle shifts of between 32 and 63 bits */
MMORE32:
mov %edx,%eax
xor %edx,%edx
and $31,%cl
shr %cl,%eax
ret $12
/* return 0 in %edx:%eax */
RRETZERO:
xor %eax,%eax
xor %edx,%edx
ret $12
GLOBAL_ENTRY_END(x86Shr64Bit)
/*
* llsar
*
* Origin: MSDev. modified
* Purpose: long shift right signed
* In: args are passed on the stack: (FIX make fastcall)
* 1st pushed: amount (int)
* 2nd pushed: source (long)
* Out: %EDX:%EAX contains the result
* Note: parameters are removed from the stack
* Uses: %ECX, destroyed
*/
GLOBAL_ENTRY_START(x86Sar64Bit)
/* prepare from stack */
mov 4(%esp),%eax
mov 8(%esp),%edx
mov 12(%esp),%ecx
/* Handle shifts of 64 bits or more (if shifting 64 bits or more, the result */
/* depends only on the high order bit of %edx). */
cmp $64,%cl
jae RETSIGN
/* Handle shifts of between 0 and 31 bits */
cmp $32,%cl
jae MMMORE32
shrd %edx,%eax
sar %cl,%edx
ret $12
/* Handle shifts of between 32 and 63 bits */
MMMORE32:
mov %edx,%eax
sar $31,%edx
and $31,%cl
sar %cl,%eax
ret $12
/* Return double precision 0 or -1, depending on the sign of %edx */
RETSIGN:
sar $31,%edx
mov %edx,%eax
ret $12
GLOBAL_ENTRY_END(x86Sar64Bit)

Просмотреть файл

@ -1,128 +0,0 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
/*
x86Linux_Support.cpp
*/
#include "NativeCodeCache.h"
#include <string.h>
#include "Fundamentals.h"
#include "MemoryAccess.h"
#include "x86Linux_Support.h"
void* JNIenv = 0;
extern ClassWorld world;
#ifdef DEBUG
// Pointer to the instruction after the call (used by exception handler to check
// I wanted to use:
// void* compileStubReEntryPoint = (void*) ((Uint8*)staticCompileStub + 17);
// but MSDev appears to have a bug, in that compileStubReEntryPoint will be set == (void*)staticCompileStub
// which is clearly wrong.
void* compileStubAddress = (void*)staticCompileStub;
void* compileStubReEntryPoint = (Uint8*)compileStubAddress + 17;
#endif // DEBUG
void *
generateNativeStub(NativeCodeCache& inCache, const CacheEntry& inCacheEntry, void *nativeFunction)
{
Method* method = inCacheEntry.descriptor.method;
Uint32 nWords = method->getSignature().nArguments;
assert(method->getModifiers() & CR_METHOD_NATIVE);
extern Uint32 sysInvokeNativeStubs[];
Uint8 stubSize = 10;
void* stub;
// Write out the native stub
stub = inCache.acquireMemory(stubSize);
Uint8* where = (Uint8*)stub;
*where++ = 0x68; // pushl
writeLittleWordUnaligned(where, (uint32)(nativeFunction));
where += 4;
*where++ = 0xe9; // jmp
writeLittleWordUnaligned(where, (Uint8 *) sysInvokeNativeStubs[nWords] - (where + 4));
// Return the address of the stub.
return ((void*)stub);
}
void *
generateJNIGlue(NativeCodeCache& inCache,
const CacheEntry& inCacheEntry,
void *nativeFunction)
{
void* stub;
Method* method = inCacheEntry.descriptor.method;
assert(method->getModifiers() & CR_METHOD_NATIVE);
Uint8 stubSize = 12;
// Write out the JNI compile stub
stub = inCache.acquireMemory(stubSize);
Uint8* where = (Uint8*) stub;
*where++ = 0x58; // popl %eax
*where++ = 0x68; // pushl
writeLittleWordUnaligned(where, (Uint32) JNIenv);
where += 4;
*where++ = 0xe9; // jmp
writeLittleWordUnaligned(where, reinterpret_cast<Uint32>(nativeFunction) - Uint32(where + 4));
return stub;
}
void *
generateCompileStub(NativeCodeCache& inCache, const CacheEntry& inCacheEntry)
{
void* stub;
uint8 stubSize = 10;
// Write out the dynamic compile stub
stub = inCache.acquireMemory(stubSize);
Uint8* where = (Uint8*)stub;
// movl $inCacheEntry, %eax
*where++ = 0xb8;
writeLittleWordUnaligned(where, (uint32)(&inCacheEntry));
where += 4;
*where++ = 0xe9; // jmp
writeLittleWordUnaligned(where, (Uint8 *) staticCompileStub - (where + 4));
// Return the address of the dynamic stub.
return ((void*)stub);
}
void*
backPatchMethod(void* inMethodAddress, void* inLastPC, void* /*inUserDefined*/)
{
uint32 curAddress = (uint32) inLastPC;
uint32 methodAddress = (uint32) inMethodAddress;
// Compute the relative branch
uint32* relativeBranch = ((uint32*)inLastPC)-1;
int32 offset = methodAddress - curAddress;
// Backpatch the method.
writeLittleWordUnaligned((void*)relativeBranch, offset);
return (inMethodAddress);
}

Просмотреть файл

@ -20,11 +20,11 @@
//
#include "x86StdCall.h"
#include "x86Win32Emitter.h"
#include "x86Emitter.h"
// Utility function to push one function-call argument on the stack
void
pushCallArg(DataConsumer& argument, DataNode* usesNode, InstructionDefine*& orderingDependency, Pool& inPool, x86Win32Emitter& inEmitter)
pushCallArg(DataConsumer& argument, DataNode* usesNode, InstructionDefine*& orderingDependency, Pool& inPool, x86Emitter& inEmitter)
{
// Do the first argument specially because that sets up the chain of ordering dependencies
int numOrderingUses = orderingDependency ? 1 : 0;
@ -175,7 +175,7 @@ pushCallArg(DataConsumer& argument, DataNode* usesNode, InstructionDefine*& orde
}
void x86Win32Emitter::
void x86Emitter::
emitArguments(ControlNode::BeginExtra& inBeginNode)
{
if (inBeginNode.nArguments == 0)

Просмотреть файл

@ -19,10 +19,10 @@
// x86StdCall.h - StdCall calling convention
//
#include "x86Win32Instruction.h"
#include "x86Instruction.h"
// Utility function to push one function-call argument on the stack
void pushCallArg(DataConsumer &curArgument, DataNode* usesNode, InstructionDefine*& orderingDependency, Pool& inPool, x86Win32Emitter& inEmitter);
void pushCallArg(DataConsumer &curArgument, DataNode* usesNode, InstructionDefine*& orderingDependency, Pool& inPool, x86Emitter& inEmitter);
// -> mem regarg1 regarg2 regarg3 regarg3
// <- mem [returnval]
@ -35,7 +35,7 @@ Call( DataNode* inDataNode,
Pool& inPool,
Uint8 inRegisterArguments,
bool /*inHasReturnValue*/,
x86Win32Emitter& inEmitter,
x86Emitter& inEmitter,
void (*inFunc)(),
DataNode* inUseDataNode) :
InsnUseXDefineYFromPool(inDataNode, inPool, ((inRegisterArguments != 0) ? 1 : 0) + tHasIncomingStore + tIsDynamic, 3)

Просмотреть файл

Просмотреть файл

Просмотреть файл

Просмотреть файл

Просмотреть файл

@ -1,357 +0,0 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
*
* The contents of this file are subject to the Netscape Public License
* Version 1.0 (the "NPL"); you may not use this file except in
* compliance with the NPL. You may obtain a copy of the NPL at
* http://www.mozilla.org/NPL/
*
* Software distributed under the NPL is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the NPL
* for the specific language governing rights and limitations under the
* NPL.
*
* The Initial Developer of this code under the NPL is Netscape
* Communications Corporation. Portions created by Netscape are
* Copyright (C) 1998 Netscape Communications Corporation. All Rights
* Reserved.
*/
//
// File: x86Win32_support.cpp
//
// Authors: Peter DeSantis
// Simon Holmes a Court
//
#include "NativeCodeCache.h"
#include <string.h>
#include "Fundamentals.h"
#include "MemoryAccess.h"
void* JNIenv = 0;
extern ClassWorld world;
#ifdef __GNUC__
/*
Following code written by rth@cygnus.org. Comment from fur@netscape.com.
I suspect that your code will work OK if there are no exceptions while a method is being
compiled, but you'll probably crash if the compilation terminates with an exception. That's
because the Java exception code relies on the presence of a "guard frame" when calling from
JIT'ed to native code so as to restore the callee-saved registers when unwinding the stack.
See x86ExceptionHandler.cpp for some details. This code is "temporary". What we
eventually hoped to do was to use a different calling convention (in terms of which registers
are volatile) for calls that make an exceptional return versus a normal return, so that the stack
unwinding code did not have to restore any registers. Until we can educate the register
allocator about these constraints, we'll need to retain this hacky guard frame and the various
bits of code that are used to create it. (See also x86Win32InvokeNative.cpp).
Some of the exception debugging code makes assumptions about the location of the call
instruction inside the static compile stub - assumptions that are broken by the new code. (And
the exception debugging code should probably be changed to get rid of that dependency.)
*/
/* Go through one level of extra indirection to isolate ourselves from name
mangling and cdecl vs stdcall changes. */
static void* compileStub_1(const CacheEntry&, void *) __attribute__((regparm(2), unused));
static void* compileStub_1(const CacheEntry&, void *) __asm__("compileStub_1");
static void*
compileStub_1(const CacheEntry& inCacheEntry, void *retAddr)
{
return compileAndBackPatchMethod(inCacheEntry, retAddr);
}
extern void compileStub() __asm__("compileStub");
asm("\n\
compileStub:\n\
movl 0(%esp), %edx\n\
call compileStub_1\n\
jmpl *%eax");
#else /* !__GNUC__ */
static void __declspec( naked )
compileStub()
{
_asm
{
// eax contains the cache entry.
// make frame
push ebp
mov ebp,esp
// save all volatiles (especially for exception handler)
// ??? Um, these are _non_ volatile, ie callee saved.
// We shouldn't have to do anything with them.
push edi
push esi
push ebx
// call compileAndBackPatchMethod with args
// third argument is not used
push [esp + 16] // second argument -- return address
push eax // first argument -- cacheEntry
call compileAndBackPatchMethod
// remove args
pop edx // <--- compileStubReEntryPoint
pop edx
pop ebx // Restore volatiles
pop esi
pop edi
// remove frame
mov esp,ebp
pop ebp
// jump to the compiled method
jmp eax
}
}
#endif /* __GNUC__ */
#ifdef DEBUG
// Pointer to the instruction after the call (used by exception handler to check
// I wanted to use:
// void* compileStubReEntryPoint = (void*) ((Uint8*)staticCompileStub + 17);
// but MSDev appears to have a bug, in that compileStubReEntryPoint will be set == (void*)staticCompileStub
// which is clearly wrong.
void* compileStubAddress = (void*)compileStub;
// void* compileStubReEntryPoint = (Uint8*)compileStubAddress + 15; // Correct address ?
void* compileStubReEntryPoint = NULL;
#endif // DEBUG
void *
generateNativeStub(NativeCodeCache& inCache, const CacheEntry& inCacheEntry, void *nativeFunction)
{
Method* method = inCacheEntry.descriptor.method;
//Uint32 nWords = method->getSignature().nArguments;
Uint32 nWords = method->getArgsSize()/sizeof(Int32);
assert(method->getModifiers() & CR_METHOD_NATIVE);
assert(nWords <= 256);
extern void *sysInvokeNativeStubs[];
Uint8 stubSize = 10;
void* stub;
// Write out the native stub
stub = inCache.acquireMemory(stubSize);
Uint8* where = (Uint8*)stub;
*where++ = 0x68; // pushl
writeLittleWordUnaligned(where, (uint32)(nativeFunction));
where += 4;
*where++ = 0xe9; // jmp
writeLittleWordUnaligned(where, (Uint8 *) sysInvokeNativeStubs[nWords] - (where + 4));
// Return the address of the stub.
return ((void*)stub);
}
void*
generateCompileStub(NativeCodeCache& inCache, const CacheEntry& inCacheEntry)
{
void* stub;
uint8 *where;
// Write out the dynamic compile stub
stub = inCache.acquireMemory(10);
where = (uint8 *)stub;
// movl $inCacheEntry, %eax
*where++ = 0xb8;
writeLittleWordUnaligned(where, (uint32)&inCacheEntry);
where += 4;
// jmp compileStub
*where++ = 0xe9;
writeLittleWordUnaligned(where, (Uint8 *) compileStub - (where + 4));
// Return the address of the dynamic stub.
return stub;
}
void*
backPatchMethod(void* inMethodAddress, void* inLastPC, void* /*inUserDefined*/)
{
uint32 curAddress = (uint32) inLastPC;
uint32 methodAddress = (uint32) inMethodAddress;
// Compute the relative branch
uint32* relativeBranch = ((uint32*)inLastPC)-1;
int32 offset = methodAddress - curAddress;
// Backpatch the method.
writeLittleWordUnaligned((void*)relativeBranch, offset);
return (inMethodAddress);
}
// Warning silencing stuff
// #pragma warning( disable : 4035)
// #pragma warning( default : 4035)
//================================================================================
// 64bit Arithmetic Support Functions
// x86Extract64Bit
//
// Purpose: signed right-aligned field extraction
// In: 64 bit source (on stack)
// 32 bit extraction size (on stack)
// Out: 64 bit result
// Note: Only works in range 1 <= b <= 63, b is extraction amount
int64 __stdcall
x86Extract64Bit(int64 src, int b)
{
if (b <= 32)
{
b = 32 - b;
return (int)src << b >> b;
}
else
{
b = 64 - b;
return (int)(src >> 32) << b >> b;
}
}
// 3WayCompare
//
// Purpose: compare two longs
// In: two longs on the stack
// Out: depends on condition flags:
// less = -1
// equal = 0
// greater = 1
int64 __stdcall
x86ThreeWayCMP_L(int64 a, int64 b)
{
return (a > b) - (a < b);
}
// 3WayCompare
//
// Purpose: compare two longs
// In: two longs on the stack
// Out: depends on condition flags:
// less = 1
// equal = 0
// greater = -1
int64 __stdcall
x86ThreeWayCMPC_L(int64 a, int64 b)
{
return (a < b) - (a > b);
}
// llmul
//
// Purpose: long multiply (same for signed/unsigned)
// In: args are passed on the stack:
// 1st pushed: multiplier (QWORD)
// 2nd pushed: multiplicand (QWORD)
// Out: EDX:EAX - product of multiplier and multiplicand
// Note: parameters are removed from the stack
// Uses: ECX
int64 __stdcall
x86Mul64Bit(int64 a, int64 b)
{
return a * b;
}
// lldiv
//
// Purpose: signed long divide
// In: args are passed on the stack:
// 1st pushed: divisor (QWORD)
// 2nd pushed: dividend (QWORD)
// Out: EDX:EAX contains the quotient (dividend/divisor)
// Note: parameters are removed from the stack
// Uses: ECX
int64 __stdcall
x86Div64Bit(int64 dividend, int64 divisor)
{
return dividend / divisor;
}
// llrem
//
// Purpose: signed long remainder
// In: args are passed on the stack:
// 1st pushed: divisor (QWORD)
// 2nd pushed: dividend (QWORD)
// Out: EDX:EAX contains the remainder (dividend/divisor)
// Note: parameters are removed from the stack
// Uses: ECX
int64 __stdcall
x86Mod64Bit(int64 dividend, int64 divisor)
{
return dividend % divisor;
}
// llshl
//
// Purpose: long shift left
// In: args are passed on the stack: (FIX make fastcall)
// 1st pushed: amount (int)
// 2nd pushed: source (long)
// Out: EDX:EAX contains the result
// Note: parameters are removed from the stack
// Uses: ECX, destroyed
int64 __stdcall
x86Shl64Bit(int64 src, int amount)
{
return src << amount;
}
// llshr
//
// Purpose: long shift right
// In: args are passed on the stack: (FIX make fastcall)
// 1st pushed: amount (int)
// 2nd pushed: source (long)
// Out: EDX:EAX contains the result
// Note: parameters are removed from the stack
// Uses: ECX, destroyed
uint64 __stdcall
x86Shr64Bit(uint64 src, int amount)
{
return src >> amount;
}
// llsar
//
// Purpose: long shift right signed
// In: args are passed on the stack: (FIX make fastcall)
// 1st pushed: amount (int)
// 2nd pushed: source (long)
// Out: EDX:EAX contains the result
// Note: parameters are removed from the stack
// Uses: ECX, destroyed
int64 __stdcall
x86Sar64Bit(int64 src, int amount)
{
return src >> amount;
}
//================================================================================