зеркало из https://github.com/stride3d/xkslang.git
PP: Fix #1605: Paste tokens for ## through number->letter transitions.
This commit is contained in:
Родитель
a51d3d9f22
Коммит
a84079dcf3
|
@ -0,0 +1,23 @@
|
|||
cppMerge.frag
|
||||
Shader version: 450
|
||||
0:? Sequence
|
||||
0:22 Function Definition: main( ( global void)
|
||||
0:22 Function Parameters:
|
||||
0:? Linker Objects
|
||||
0:? 'dest1' (layout( set=0 binding=0) writeonly uniform image1D)
|
||||
0:? 'dest2' (layout( set=0 binding=0) writeonly uniform image1D)
|
||||
0:? 'dest3' (layout( set=0 binding=0) writeonly uniform image1D)
|
||||
|
||||
|
||||
Linked fragment stage:
|
||||
|
||||
|
||||
Shader version: 450
|
||||
0:? Sequence
|
||||
0:22 Function Definition: main( ( global void)
|
||||
0:22 Function Parameters:
|
||||
0:? Linker Objects
|
||||
0:? 'dest1' (layout( set=0 binding=0) writeonly uniform image1D)
|
||||
0:? 'dest2' (layout( set=0 binding=0) writeonly uniform image1D)
|
||||
0:? 'dest3' (layout( set=0 binding=0) writeonly uniform image1D)
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
#version 450 core
|
||||
|
||||
#define PASTER2(type, suffix) type##suffix
|
||||
#define PASTER3(type, suffix) type## suffix
|
||||
#define MAKE_TYPE1 image1D dest ## 1;
|
||||
#define MAKE_TYPE2(type, suffix) PASTER2(type, suffix)
|
||||
#define MAKE_TYPE3(type, suffix) PASTER3(type, suffix)
|
||||
|
||||
#define PREFIX image
|
||||
#define PREFIX3 imag
|
||||
#define SUFFIX2 1D
|
||||
#define SUFFIX3 e1 D
|
||||
|
||||
#define RESOURCE_TYPE1 MAKE_TYPE1
|
||||
#define RESOURCE_TYPE2 MAKE_TYPE2(PREFIX, SUFFIX2)
|
||||
#define RESOURCE_TYPE3 MAKE_TYPE3(PREFIX3, SUFFIX3)
|
||||
|
||||
layout (set = 0, binding = 0) uniform writeonly RESOURCE_TYPE1
|
||||
layout (set = 0, binding = 0) uniform writeonly RESOURCE_TYPE2 dest2;
|
||||
layout (set = 0, binding = 0) uniform writeonly RESOURCE_TYPE3 dest3;
|
||||
|
||||
void main()
|
||||
{
|
||||
}
|
|
@ -84,6 +84,7 @@ NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <sstream>
|
||||
|
||||
#include "../ParseHelper.h"
|
||||
#include "PpTokens.h"
|
||||
|
||||
/* windows only pragma */
|
||||
#ifdef _MSC_VER
|
||||
|
@ -212,7 +213,8 @@ public:
|
|||
virtual int scan(TPpToken*) = 0;
|
||||
virtual int getch() = 0;
|
||||
virtual void ungetch() = 0;
|
||||
virtual bool peekPasting() { return false; } // true when about to see ##
|
||||
virtual bool peekPasting() { return false; } // true when about to see ##
|
||||
virtual bool peekContinuedPasting(int) { return false; } // true when non-spaced tokens can paste
|
||||
virtual bool endOfReplacementList() { return false; } // true when at the end of a macro replacement list (RHS of #define)
|
||||
virtual bool isMacroInput() { return false; }
|
||||
|
||||
|
@ -263,7 +265,9 @@ public:
|
|||
snprintf(ppToken.name, sizeof(ppToken.name), "%s", name.c_str());
|
||||
return atom;
|
||||
}
|
||||
bool isAtom(int a) { return atom == a; }
|
||||
bool isAtom(int a) const { return atom == a; }
|
||||
int getAtom() const { return atom; }
|
||||
bool nonSpaced() const { return !space; }
|
||||
protected:
|
||||
Token() {}
|
||||
int atom;
|
||||
|
@ -276,6 +280,35 @@ public:
|
|||
|
||||
void putToken(int token, TPpToken* ppToken);
|
||||
bool peekToken(int atom) { return !atEnd() && stream[currentPos].isAtom(atom); }
|
||||
bool peekContinuedPasting(int atom)
|
||||
{
|
||||
// This is basically necessary because, for example, the PP
|
||||
// tokenizer only accepts valid numeric-literals plus suffixes, so
|
||||
// separates numeric-literals plus bad suffix into two tokens, which
|
||||
// should get both pasted together as one token when token pasting.
|
||||
//
|
||||
// The following code is a bit more generalized than the above example.
|
||||
if (!atEnd() && atom == PpAtomIdentifier && stream[currentPos].nonSpaced()) {
|
||||
switch(stream[currentPos].getAtom()) {
|
||||
case PpAtomConstInt:
|
||||
case PpAtomConstUint:
|
||||
case PpAtomConstInt64:
|
||||
case PpAtomConstUint64:
|
||||
case PpAtomConstInt16:
|
||||
case PpAtomConstUint16:
|
||||
case PpAtomConstFloat:
|
||||
case PpAtomConstDouble:
|
||||
case PpAtomConstFloat16:
|
||||
case PpAtomConstString:
|
||||
case PpAtomIdentifier:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
int getToken(TParseContextBase&, TPpToken*);
|
||||
bool atEnd() { return currentPos >= stream.size(); }
|
||||
bool peekTokenizedPasting(bool lastTokenPastes);
|
||||
|
@ -344,6 +377,10 @@ protected:
|
|||
int getChar() { return inputStack.back()->getch(); }
|
||||
void ungetChar() { inputStack.back()->ungetch(); }
|
||||
bool peekPasting() { return !inputStack.empty() && inputStack.back()->peekPasting(); }
|
||||
bool peekContinuedPasting(int a)
|
||||
{
|
||||
return !inputStack.empty() && inputStack.back()->peekContinuedPasting(a);
|
||||
}
|
||||
bool endOfReplacementList() { return inputStack.empty() || inputStack.back()->endOfReplacementList(); }
|
||||
bool isMacroInput() { return inputStack.size() > 0 && inputStack.back()->isMacroInput(); }
|
||||
|
||||
|
@ -368,6 +405,7 @@ protected:
|
|||
virtual int getch() override { assert(0); return EndOfInput; }
|
||||
virtual void ungetch() override { assert(0); }
|
||||
bool peekPasting() override { return prepaste; }
|
||||
bool peekContinuedPasting(int a) override { return mac->body.peekContinuedPasting(a); }
|
||||
bool endOfReplacementList() override { return mac->body.atEnd(); }
|
||||
bool isMacroInput() override { return true; }
|
||||
|
||||
|
@ -442,14 +480,18 @@ protected:
|
|||
|
||||
class tTokenInput : public tInput {
|
||||
public:
|
||||
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : tInput(pp), tokens(t), lastTokenPastes(prepasting) { }
|
||||
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) :
|
||||
tInput(pp),
|
||||
tokens(t),
|
||||
lastTokenPastes(prepasting) { }
|
||||
virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); }
|
||||
virtual int getch() override { assert(0); return EndOfInput; }
|
||||
virtual void ungetch() override { assert(0); }
|
||||
virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
|
||||
bool peekContinuedPasting(int a) override { return tokens->peekContinuedPasting(a); }
|
||||
protected:
|
||||
TokenStream* tokens;
|
||||
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
|
||||
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
|
||||
};
|
||||
|
||||
class tUngotTokenInput : public tInput {
|
||||
|
|
|
@ -96,12 +96,19 @@ namespace glslang {
|
|||
/////////////////////////////////// Floating point constants: /////////////////////////////////
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/*
|
||||
* lFloatConst() - Scan a single- or double-precision floating point constant. Assumes that the scanner
|
||||
* has seen at least one digit, followed by either a decimal '.' or the
|
||||
* letter 'e', or a precision ending (e.g., F or LF).
|
||||
*/
|
||||
|
||||
//
|
||||
// Scan a single- or double-precision floating point constant.
|
||||
// Assumes that the scanner has seen at least one digit,
|
||||
// followed by either a decimal '.' or the letter 'e', or a
|
||||
// precision ending (e.g., F or LF).
|
||||
//
|
||||
// This is technically not correct, as the preprocessor should just
|
||||
// accept the numeric literal along with whatever suffix it has, but
|
||||
// currently, it stops on seeing a bad suffix, treating that as the
|
||||
// next token. This effects things like token pasting, where it is
|
||||
// relevant how many tokens something was broken into.
|
||||
//
|
||||
// See peekContinuedPasting().
|
||||
int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
|
||||
{
|
||||
const auto saveName = [&](int ch) {
|
||||
|
@ -435,6 +442,14 @@ int TPpContext::characterLiteral(TPpToken* ppToken)
|
|||
//
|
||||
// Scanner used to tokenize source stream.
|
||||
//
|
||||
// N.B. Invalid numeric suffixes are not consumed.//
|
||||
// This is technically not correct, as the preprocessor should just
|
||||
// accept the numeric literal along with whatever suffix it has, but
|
||||
// currently, it stops on seeing a bad suffix, treating that as the
|
||||
// next token. This effects things like token pasting, where it is
|
||||
// relevant how many tokens something was broken into.
|
||||
// See peekContinuedPasting().
|
||||
//
|
||||
int TPpContext::tStringInput::scan(TPpToken* ppToken)
|
||||
{
|
||||
int AlreadyComplained = 0;
|
||||
|
@ -1153,62 +1168,69 @@ int TPpContext::tokenPaste(int token, TPpToken& ppToken)
|
|||
break;
|
||||
}
|
||||
|
||||
// get the token after the ##
|
||||
token = scanToken(&pastedPpToken);
|
||||
// Get the token(s) after the ##.
|
||||
// Because of "space" semantics, and prior tokenization, what
|
||||
// appeared a single token, e.g. "3A", might have been tokenized
|
||||
// into two tokens "3" and "A", but the "A" will have 'space' set to
|
||||
// false. Accumulate all of these to recreate the original lexical
|
||||
// appearing token.
|
||||
do {
|
||||
token = scanToken(&pastedPpToken);
|
||||
|
||||
// This covers end of argument expansion
|
||||
if (token == tMarkerInput::marker) {
|
||||
parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", "");
|
||||
break;
|
||||
}
|
||||
// This covers end of argument expansion
|
||||
if (token == tMarkerInput::marker) {
|
||||
parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", "");
|
||||
return resultToken;
|
||||
}
|
||||
|
||||
// get the token text
|
||||
switch (resultToken) {
|
||||
case PpAtomIdentifier:
|
||||
// already have the correct text in token.names
|
||||
break;
|
||||
case '=':
|
||||
case '!':
|
||||
case '-':
|
||||
case '~':
|
||||
case '+':
|
||||
case '*':
|
||||
case '/':
|
||||
case '%':
|
||||
case '<':
|
||||
case '>':
|
||||
case '|':
|
||||
case '^':
|
||||
case '&':
|
||||
case PpAtomRight:
|
||||
case PpAtomLeft:
|
||||
case PpAtomAnd:
|
||||
case PpAtomOr:
|
||||
case PpAtomXor:
|
||||
snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(resultToken));
|
||||
snprintf(pastedPpToken.name, sizeof(pastedPpToken.name), "%s", atomStrings.getString(token));
|
||||
break;
|
||||
default:
|
||||
parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", "");
|
||||
return resultToken;
|
||||
}
|
||||
// get the token text
|
||||
switch (resultToken) {
|
||||
case PpAtomIdentifier:
|
||||
// already have the correct text in token.names
|
||||
break;
|
||||
case '=':
|
||||
case '!':
|
||||
case '-':
|
||||
case '~':
|
||||
case '+':
|
||||
case '*':
|
||||
case '/':
|
||||
case '%':
|
||||
case '<':
|
||||
case '>':
|
||||
case '|':
|
||||
case '^':
|
||||
case '&':
|
||||
case PpAtomRight:
|
||||
case PpAtomLeft:
|
||||
case PpAtomAnd:
|
||||
case PpAtomOr:
|
||||
case PpAtomXor:
|
||||
snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(resultToken));
|
||||
snprintf(pastedPpToken.name, sizeof(pastedPpToken.name), "%s", atomStrings.getString(token));
|
||||
break;
|
||||
default:
|
||||
parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", "");
|
||||
return resultToken;
|
||||
}
|
||||
|
||||
// combine the tokens
|
||||
if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) {
|
||||
parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
|
||||
return resultToken;
|
||||
}
|
||||
snprintf(&ppToken.name[0] + strlen(ppToken.name), sizeof(ppToken.name) - strlen(ppToken.name),
|
||||
"%s", pastedPpToken.name);
|
||||
// combine the tokens
|
||||
if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) {
|
||||
parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
|
||||
return resultToken;
|
||||
}
|
||||
snprintf(&ppToken.name[0] + strlen(ppToken.name), sizeof(ppToken.name) - strlen(ppToken.name),
|
||||
"%s", pastedPpToken.name);
|
||||
|
||||
// correct the kind of token we are making, if needed (identifiers stay identifiers)
|
||||
if (resultToken != PpAtomIdentifier) {
|
||||
int newToken = atomStrings.getAtom(ppToken.name);
|
||||
if (newToken > 0)
|
||||
resultToken = newToken;
|
||||
else
|
||||
parseContext.ppError(ppToken.loc, "combined token is invalid", "##", "");
|
||||
}
|
||||
// correct the kind of token we are making, if needed (identifiers stay identifiers)
|
||||
if (resultToken != PpAtomIdentifier) {
|
||||
int newToken = atomStrings.getAtom(ppToken.name);
|
||||
if (newToken > 0)
|
||||
resultToken = newToken;
|
||||
else
|
||||
parseContext.ppError(ppToken.loc, "combined token is invalid", "##", "");
|
||||
}
|
||||
} while (peekContinuedPasting(resultToken));
|
||||
}
|
||||
|
||||
return resultToken;
|
||||
|
|
|
@ -93,6 +93,7 @@ INSTANTIATE_TEST_CASE_P(
|
|||
"cppSimple.vert",
|
||||
"cppIndent.vert",
|
||||
"cppIntMinOverNegativeOne.frag",
|
||||
"cppMerge.frag",
|
||||
"cppNest.vert",
|
||||
"cppBad.vert",
|
||||
"cppBad2.vert",
|
||||
|
|
Загрузка…
Ссылка в новой задаче