2638 строки
103 KiB
Python
2638 строки
103 KiB
Python
# @ GenCfgData.py
|
|
#
|
|
# Copyright (c) 2014 - 2021, Intel Corporation. All rights reserved.<BR>
|
|
# SPDX-License-Identifier: BSD-2-Clause-Patent
|
|
#
|
|
##
|
|
|
|
import os
|
|
import re
|
|
import sys
|
|
import marshal
|
|
from functools import reduce
|
|
from datetime import date
|
|
|
|
# Generated file copyright header
|
|
|
|
__copyright_tmp__ = """/** @file
|
|
|
|
Configuration %s File.
|
|
|
|
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
|
|
SPDX-License-Identifier: BSD-2-Clause-Patent
|
|
|
|
This file is automatically generated. Please do NOT modify !!!
|
|
|
|
**/
|
|
"""
|
|
|
|
__copyright_dsc__ = """## @file
|
|
#
|
|
# Copyright (c) %04d, Intel Corporation. All rights reserved.<BR>
|
|
# SPDX-License-Identifier: BSD-2-Clause-Patent
|
|
#
|
|
##
|
|
|
|
[PcdsDynamicVpd.Upd]
|
|
#
|
|
# Global definitions in BSF
|
|
# !BSF BLOCK:{NAME:"FSP UPD Configuration", VER:"0.1"}
|
|
#
|
|
|
|
"""
|
|
|
|
|
|
def Bytes2Val(Bytes):
|
|
return reduce(lambda x, y: (x << 8) | y, Bytes[::-1])
|
|
|
|
|
|
def Bytes2Str(Bytes):
|
|
return '{ %s }' % (', '.join('0x%02X' % i for i in Bytes))
|
|
|
|
|
|
def Str2Bytes(Value, Blen):
|
|
Result = bytearray(Value[1:-1], 'utf-8') # Excluding quotes
|
|
if len(Result) < Blen:
|
|
Result.extend(b'\x00' * (Blen - len(Result)))
|
|
return Result
|
|
|
|
|
|
def Val2Bytes(Value, Blen):
|
|
return [(Value >> (i * 8) & 0xff) for i in range(Blen)]
|
|
|
|
|
|
def Array2Val(ValStr):
|
|
ValStr = ValStr.strip()
|
|
if ValStr.startswith('{'):
|
|
ValStr = ValStr[1:]
|
|
if ValStr.endswith('}'):
|
|
ValStr = ValStr[:-1]
|
|
if ValStr.startswith("'"):
|
|
ValStr = ValStr[1:]
|
|
if ValStr.endswith("'"):
|
|
ValStr = ValStr[:-1]
|
|
Value = 0
|
|
for Each in ValStr.split(',')[::-1]:
|
|
Each = Each.strip()
|
|
if Each.startswith('0x'):
|
|
Base = 16
|
|
else:
|
|
Base = 10
|
|
Value = (Value << 8) | int(Each, Base)
|
|
return Value
|
|
|
|
|
|
def GetCopyrightHeader(FileType, AllowModify=False):
|
|
FileDescription = {
|
|
'bsf': 'Boot Setting',
|
|
'dsc': 'Definition',
|
|
'dlt': 'Delta',
|
|
'inc': 'C Binary Blob',
|
|
'h': 'C Struct Header'
|
|
}
|
|
if FileType in ['bsf', 'dsc', 'dlt']:
|
|
CommentChar = '#'
|
|
else:
|
|
CommentChar = ''
|
|
Lines = __copyright_tmp__.split('\n')
|
|
|
|
if AllowModify:
|
|
Lines = [Line for Line in Lines if 'Please do NOT modify' not in Line]
|
|
|
|
CopyrightHdr = '\n'.join('%s%s' % (
|
|
CommentChar, Line) for Line in Lines)[:-1] + '\n'
|
|
|
|
return CopyrightHdr % (FileDescription[FileType], date.today().year)
|
|
|
|
|
|
class CLogicalExpression:
|
|
def __init__(self):
|
|
self.index = 0
|
|
self.string = ''
|
|
|
|
def errExit(self, err=''):
|
|
print("ERROR: Express parsing for:")
|
|
print(" %s" % self.string)
|
|
print(" %s^" % (' ' * self.index))
|
|
if err:
|
|
print("INFO : %s" % err)
|
|
raise SystemExit
|
|
|
|
def getNonNumber(self, n1, n2):
|
|
if not n1.isdigit():
|
|
return n1
|
|
if not n2.isdigit():
|
|
return n2
|
|
return None
|
|
|
|
def getCurr(self, lens=1):
|
|
try:
|
|
if lens == -1:
|
|
return self.string[self.index:]
|
|
else:
|
|
if self.index + lens > len(self.string):
|
|
lens = len(self.string) - self.index
|
|
return self.string[self.index: self.index + lens]
|
|
except Exception:
|
|
return ''
|
|
|
|
def isLast(self):
|
|
return self.index == len(self.string)
|
|
|
|
def moveNext(self, len=1):
|
|
self.index += len
|
|
|
|
def skipSpace(self):
|
|
while not self.isLast():
|
|
if self.getCurr() in ' \t':
|
|
self.moveNext()
|
|
else:
|
|
return
|
|
|
|
def normNumber(self, val):
|
|
return True if val else False
|
|
|
|
def getNumber(self, var):
|
|
var = var.strip()
|
|
if re.match('^0x[a-fA-F0-9]+$', var):
|
|
value = int(var, 16)
|
|
elif re.match('^[+-]?\\d+$', var):
|
|
value = int(var, 10)
|
|
else:
|
|
value = None
|
|
return value
|
|
|
|
def parseValue(self):
|
|
self.skipSpace()
|
|
var = ''
|
|
while not self.isLast():
|
|
char = self.getCurr()
|
|
if re.match('^[\\w.]', char):
|
|
var += char
|
|
self.moveNext()
|
|
else:
|
|
break
|
|
val = self.getNumber(var)
|
|
if val is None:
|
|
value = var
|
|
else:
|
|
value = "%d" % val
|
|
return value
|
|
|
|
def parseSingleOp(self):
|
|
self.skipSpace()
|
|
if re.match('^NOT\\W', self.getCurr(-1)):
|
|
self.moveNext(3)
|
|
op = self.parseBrace()
|
|
val = self.getNumber(op)
|
|
if val is None:
|
|
self.errExit("'%s' is not a number" % op)
|
|
return "%d" % (not self.normNumber(int(op)))
|
|
else:
|
|
return self.parseValue()
|
|
|
|
def parseBrace(self):
|
|
self.skipSpace()
|
|
char = self.getCurr()
|
|
if char == '(':
|
|
self.moveNext()
|
|
value = self.parseExpr()
|
|
self.skipSpace()
|
|
if self.getCurr() != ')':
|
|
self.errExit("Expecting closing brace or operator")
|
|
self.moveNext()
|
|
return value
|
|
else:
|
|
value = self.parseSingleOp()
|
|
return value
|
|
|
|
def parseCompare(self):
|
|
value = self.parseBrace()
|
|
while True:
|
|
self.skipSpace()
|
|
char = self.getCurr()
|
|
if char in ['<', '>']:
|
|
self.moveNext()
|
|
next = self.getCurr()
|
|
if next == '=':
|
|
op = char + next
|
|
self.moveNext()
|
|
else:
|
|
op = char
|
|
result = self.parseBrace()
|
|
test = self.getNonNumber(result, value)
|
|
if test is None:
|
|
value = "%d" % self.normNumber(eval(value + op + result))
|
|
else:
|
|
self.errExit("'%s' is not a valid number for comparision"
|
|
% test)
|
|
elif char in ['=', '!']:
|
|
op = self.getCurr(2)
|
|
if op in ['==', '!=']:
|
|
self.moveNext(2)
|
|
result = self.parseBrace()
|
|
test = self.getNonNumber(result, value)
|
|
if test is None:
|
|
value = "%d" % self.normNumber((eval(value + op
|
|
+ result)))
|
|
else:
|
|
value = "%d" % self.normNumber(eval("'" + value +
|
|
"'" + op + "'" +
|
|
result + "'"))
|
|
else:
|
|
break
|
|
else:
|
|
break
|
|
return value
|
|
|
|
def parseAnd(self):
|
|
value = self.parseCompare()
|
|
while True:
|
|
self.skipSpace()
|
|
if re.match('^AND\\W', self.getCurr(-1)):
|
|
self.moveNext(3)
|
|
result = self.parseCompare()
|
|
test = self.getNonNumber(result, value)
|
|
if test is None:
|
|
value = "%d" % self.normNumber(int(value) & int(result))
|
|
else:
|
|
self.errExit("'%s' is not a valid op number for AND" %
|
|
test)
|
|
else:
|
|
break
|
|
return value
|
|
|
|
def parseOrXor(self):
|
|
value = self.parseAnd()
|
|
op = None
|
|
while True:
|
|
self.skipSpace()
|
|
op = None
|
|
if re.match('^XOR\\W', self.getCurr(-1)):
|
|
self.moveNext(3)
|
|
op = '^'
|
|
elif re.match('^OR\\W', self.getCurr(-1)):
|
|
self.moveNext(2)
|
|
op = '|'
|
|
else:
|
|
break
|
|
if op:
|
|
result = self.parseAnd()
|
|
test = self.getNonNumber(result, value)
|
|
if test is None:
|
|
value = "%d" % self.normNumber(eval(value + op + result))
|
|
else:
|
|
self.errExit("'%s' is not a valid op number for XOR/OR" %
|
|
test)
|
|
return value
|
|
|
|
def parseExpr(self):
|
|
return self.parseOrXor()
|
|
|
|
def getResult(self):
|
|
value = self.parseExpr()
|
|
self.skipSpace()
|
|
if not self.isLast():
|
|
self.errExit("Unexpected character found '%s'" % self.getCurr())
|
|
test = self.getNumber(value)
|
|
if test is None:
|
|
self.errExit("Result '%s' is not a number" % value)
|
|
return int(value)
|
|
|
|
def evaluateExpress(self, Expr):
|
|
self.index = 0
|
|
self.string = Expr
|
|
if self.getResult():
|
|
Result = True
|
|
else:
|
|
Result = False
|
|
return Result
|
|
|
|
|
|
class CFspBsf2Dsc:
|
|
|
|
def __init__(self, bsf_file):
|
|
self.cfg_list = CFspBsf2Dsc.parse_bsf(bsf_file)
|
|
|
|
def get_dsc_lines(self):
|
|
return CFspBsf2Dsc.generate_dsc(self.cfg_list)
|
|
|
|
def save_dsc(self, dsc_file):
|
|
return CFspBsf2Dsc.generate_dsc(self.cfg_list, dsc_file)
|
|
|
|
@staticmethod
|
|
def parse_bsf(bsf_file):
|
|
|
|
fd = open(bsf_file, 'r')
|
|
bsf_txt = fd.read()
|
|
fd.close()
|
|
|
|
find_list = []
|
|
regex = re.compile(r'\s+Find\s+"(.*?)"(.*?)^\s+(\$(.*?)|Skip)\s+',
|
|
re.S | re.MULTILINE)
|
|
for match in regex.finditer(bsf_txt):
|
|
find = match.group(1)
|
|
name = match.group(3)
|
|
line = bsf_txt[:match.end()].count("\n")
|
|
find_list.append((name, find, line))
|
|
|
|
idx = 0
|
|
count = 0
|
|
prefix = ''
|
|
chk_dict = {}
|
|
cfg_list = []
|
|
cfg_temp = {'find': '', 'cname': '', 'length': 0, 'value': '0',
|
|
'type': 'Reserved', 'isbit': False,
|
|
'embed': '', 'page': '', 'option': '', 'instance': 0}
|
|
regex = re.compile(
|
|
r'^\s+(\$(.*?)|Skip)\s+(\d+)\s+(bits|bytes)(\s+\$_DEFAULT_\s'
|
|
r'+=\s+(.+?))?$', re.S |
|
|
re.MULTILINE)
|
|
|
|
for match in regex.finditer(bsf_txt):
|
|
dlen = int(match.group(3))
|
|
if match.group(1) == 'Skip':
|
|
key = 'gPlatformFspPkgTokenSpaceGuid_BsfSkip%d' % idx
|
|
val = ', '.join(['%02X' % ord(i) for i in '\x00' * dlen])
|
|
idx += 1
|
|
option = '$SKIP'
|
|
else:
|
|
key = match.group(2)
|
|
val = match.group(6)
|
|
option = ''
|
|
is_bit = True if match.group(4) == 'bits' else False
|
|
|
|
cfg_item = dict(cfg_temp)
|
|
line = bsf_txt[:match.end()].count("\n")
|
|
finds = [i for i in find_list if line >= i[2]]
|
|
if len(finds) > 0:
|
|
prefix = finds[0][1]
|
|
cfg_item['embed'] = '%s:TAG_%03X:START' % \
|
|
(prefix, ord(prefix[-1]))
|
|
cfg_item['find'] = prefix
|
|
cfg_item['cname'] = 'Signature'
|
|
cfg_item['length'] = len(finds[0][1])
|
|
str2byte = Str2Bytes("'" + finds[0][1] + "'",
|
|
len(finds[0][1]))
|
|
cfg_item['value'] = '0x%X' % Bytes2Val(str2byte)
|
|
|
|
cfg_list.append(dict(cfg_item))
|
|
cfg_item = dict(cfg_temp)
|
|
find_list.pop(0)
|
|
count = 0
|
|
|
|
cfg_item['cname'] = key
|
|
cfg_item['length'] = dlen
|
|
cfg_item['value'] = val
|
|
cfg_item['option'] = option
|
|
cfg_item['isbit'] = is_bit
|
|
|
|
if key not in chk_dict.keys():
|
|
chk_dict[key] = 0
|
|
else:
|
|
chk_dict[key] += 1
|
|
cfg_item['instance'] = chk_dict[key]
|
|
|
|
cfg_list.append(cfg_item)
|
|
count += 1
|
|
|
|
if prefix:
|
|
cfg_item = dict(cfg_temp)
|
|
cfg_item['cname'] = 'Dummy'
|
|
cfg_item['embed'] = '%s:%03X:END' % (prefix, ord(prefix[-1]))
|
|
cfg_list.append(cfg_item)
|
|
|
|
option_dict = {}
|
|
selreg = re.compile(
|
|
r'\s+Selection\s*(.+?)\s*,\s*"(.*?)"$', re.S |
|
|
re.MULTILINE)
|
|
regex = re.compile(
|
|
r'^List\s&(.+?)$(.+?)^EndList$', re.S | re.MULTILINE)
|
|
for match in regex.finditer(bsf_txt):
|
|
key = match.group(1)
|
|
option_dict[key] = []
|
|
for select in selreg.finditer(match.group(2)):
|
|
option_dict[key].append(
|
|
(int(select.group(1), 0), select.group(2)))
|
|
|
|
chk_dict = {}
|
|
pagereg = re.compile(
|
|
r'^Page\s"(.*?)"$(.+?)^EndPage$', re.S | re.MULTILINE)
|
|
for match in pagereg.finditer(bsf_txt):
|
|
page = match.group(1)
|
|
for line in match.group(2).splitlines():
|
|
match = re.match(
|
|
r'\s+(Combo|EditNum)\s\$(.+?),\s"(.*?)",\s(.+?),$', line)
|
|
if match:
|
|
cname = match.group(2)
|
|
if cname not in chk_dict.keys():
|
|
chk_dict[cname] = 0
|
|
else:
|
|
chk_dict[cname] += 1
|
|
instance = chk_dict[cname]
|
|
cfg_idxs = [i for i, j in enumerate(cfg_list)
|
|
if j['cname'] == cname and
|
|
j['instance'] == instance]
|
|
if len(cfg_idxs) != 1:
|
|
raise Exception(
|
|
"Multiple CFG item '%s' found !" % cname)
|
|
cfg_item = cfg_list[cfg_idxs[0]]
|
|
cfg_item['page'] = page
|
|
cfg_item['type'] = match.group(1)
|
|
cfg_item['prompt'] = match.group(3)
|
|
cfg_item['range'] = None
|
|
if cfg_item['type'] == 'Combo':
|
|
cfg_item['option'] = option_dict[match.group(4)[1:]]
|
|
elif cfg_item['type'] == 'EditNum':
|
|
cfg_item['option'] = match.group(4)
|
|
match = re.match(r'\s+ Help\s"(.*?)"$', line)
|
|
if match:
|
|
cfg_item['help'] = match.group(1)
|
|
|
|
match = re.match(r'\s+"Valid\srange:\s(.*)"$', line)
|
|
if match:
|
|
parts = match.group(1).split()
|
|
cfg_item['option'] = (
|
|
(int(parts[0], 0), int(parts[2], 0),
|
|
cfg_item['option']))
|
|
|
|
return cfg_list
|
|
|
|
@staticmethod
|
|
def generate_dsc(option_list, dsc_file=None):
|
|
dsc_lines = []
|
|
header = '%s' % (__copyright_dsc__ % date.today().year)
|
|
dsc_lines.extend(header.splitlines())
|
|
|
|
pages = []
|
|
for cfg_item in option_list:
|
|
if cfg_item['page'] and (cfg_item['page'] not in pages):
|
|
pages.append(cfg_item['page'])
|
|
|
|
page_id = 0
|
|
for page in pages:
|
|
dsc_lines.append(' # !BSF PAGES:{PG%02X::"%s"}' % (page_id, page))
|
|
page_id += 1
|
|
dsc_lines.append('')
|
|
|
|
last_page = ''
|
|
|
|
is_bit = False
|
|
dlen = 0
|
|
dval = 0
|
|
bit_fields = []
|
|
for idx, option in enumerate(option_list):
|
|
if not is_bit and option['isbit']:
|
|
is_bit = True
|
|
dlen = 0
|
|
dval = 0
|
|
idxs = idx
|
|
if is_bit and not option['isbit']:
|
|
is_bit = False
|
|
if dlen % 8 != 0:
|
|
raise Exception("Bit fields are not aligned at "
|
|
"byte boundary !")
|
|
bit_fields.append((idxs, idx, dlen, dval))
|
|
if is_bit:
|
|
blen = option['length']
|
|
bval = int(option['value'], 0)
|
|
dval = dval + ((bval & ((1 << blen) - 1)) << dlen)
|
|
print(dlen, blen, bval, hex(dval))
|
|
dlen += blen
|
|
|
|
struct_idx = 0
|
|
for idx, option in enumerate(option_list):
|
|
dsc_lines.append('')
|
|
default = option['value']
|
|
pos = option['cname'].find('_')
|
|
name = option['cname'][pos + 1:]
|
|
|
|
for start_idx, end_idx, bits_len, bits_val in bit_fields:
|
|
if idx == start_idx:
|
|
val_str = Bytes2Str(Val2Bytes(bits_val, bits_len // 8))
|
|
dsc_lines.append(' # !HDR STRUCT:{BIT_FIELD_DATA_%d}'
|
|
% struct_idx)
|
|
dsc_lines.append(' # !BSF NAME:{BIT_FIELD_STRUCT}')
|
|
dsc_lines.append(' gCfgData.BitFiledStruct%d '
|
|
' | * | 0x%04X | %s' %
|
|
(struct_idx, bits_len // 8, val_str))
|
|
dsc_lines.append('')
|
|
struct_idx += 1
|
|
|
|
if option['find']:
|
|
dsc_lines.append(' # !BSF FIND:{%s}' % option['find'])
|
|
dsc_lines.append('')
|
|
|
|
if option['instance'] > 0:
|
|
name = name + '_%s' % option['instance']
|
|
|
|
if option['embed']:
|
|
dsc_lines.append(' # !HDR EMBED:{%s}' % option['embed'])
|
|
|
|
if option['type'] == 'Reserved':
|
|
dsc_lines.append(' # !BSF NAME:{Reserved} TYPE:{Reserved}')
|
|
if option['option'] == '$SKIP':
|
|
dsc_lines.append(' # !BSF OPTION:{$SKIP}')
|
|
else:
|
|
prompt = option['prompt']
|
|
|
|
if last_page != option['page']:
|
|
last_page = option['page']
|
|
dsc_lines.append(' # !BSF PAGE:{PG%02X}' %
|
|
(pages.index(option['page'])))
|
|
|
|
if option['type'] == 'Combo':
|
|
dsc_lines.append(' # !BSF NAME:{%s} TYPE:{%s}' %
|
|
(prompt, option['type']))
|
|
ops = []
|
|
for val, text in option['option']:
|
|
ops.append('0x%x:%s' % (val, text))
|
|
dsc_lines.append(' # !BSF OPTION:{%s}' % (', '.join(ops)))
|
|
elif option['type'] == 'EditNum':
|
|
cfg_len = option['length']
|
|
if ',' in default and cfg_len > 8:
|
|
dsc_lines.append(' # !BSF NAME:{%s} TYPE:{Table}' %
|
|
(prompt))
|
|
if cfg_len > 16:
|
|
cfg_len = 16
|
|
ops = []
|
|
for i in range(cfg_len):
|
|
ops.append('%X:1:HEX' % i)
|
|
dsc_lines.append(' # !BSF OPTION:{%s}' %
|
|
(', '.join(ops)))
|
|
else:
|
|
dsc_lines.append(
|
|
' # !BSF NAME:{%s} TYPE:{%s, %s, (0x%X, 0x%X)}' %
|
|
(prompt, option['type'], option['option'][2],
|
|
option['option'][0], option['option'][1]))
|
|
dsc_lines.append(' # !BSF HELP:{%s}' % option['help'])
|
|
|
|
if ',' in default:
|
|
default = '{%s}' % default
|
|
|
|
if option['isbit']:
|
|
dsc_lines.append(' # !BSF FIELD:{%s:%db}'
|
|
% (name, option['length']))
|
|
else:
|
|
dsc_lines.append(' gCfgData.%-30s | * | 0x%04X | %s' %
|
|
(name, option['length'], default))
|
|
|
|
if dsc_file:
|
|
fd = open(dsc_file, 'w')
|
|
fd.write('\n'.join(dsc_lines))
|
|
fd.close()
|
|
|
|
return dsc_lines
|
|
|
|
|
|
class CGenCfgData:
|
|
def __init__(self, Mode=''):
|
|
self.Debug = False
|
|
self.Error = ''
|
|
self.ReleaseMode = True
|
|
self.Mode = Mode
|
|
self._GlobalDataDef = """
|
|
GlobalDataDef
|
|
SKUID = 0, "DEFAULT"
|
|
EndGlobalData
|
|
|
|
"""
|
|
self._BuidinOptionTxt = """
|
|
List &EN_DIS
|
|
Selection 0x1 , "Enabled"
|
|
Selection 0x0 , "Disabled"
|
|
EndList
|
|
|
|
"""
|
|
self._StructType = ['UINT8', 'UINT16', 'UINT32', 'UINT64']
|
|
self._BsfKeyList = ['FIND', 'NAME', 'HELP', 'TYPE', 'PAGE', 'PAGES',
|
|
'BLOCK', 'OPTION', 'CONDITION', 'ORDER', 'MARKER',
|
|
'SUBT']
|
|
self._HdrKeyList = ['HEADER', 'STRUCT', 'EMBED', 'COMMENT']
|
|
self._BuidinOption = {'$EN_DIS': 'EN_DIS'}
|
|
|
|
self._MacroDict = {}
|
|
self._VarDict = {}
|
|
self._PcdsDict = {}
|
|
self._CfgBlkDict = {}
|
|
self._CfgPageDict = {}
|
|
self._CfgOptsDict = {}
|
|
self._BsfTempDict = {}
|
|
self._CfgItemList = []
|
|
self._DscLines = []
|
|
self._DscFile = ''
|
|
self._CfgPageTree = {}
|
|
|
|
self._MapVer = 0
|
|
self._MinCfgTagId = 0x100
|
|
|
|
def ParseMacros(self, MacroDefStr):
|
|
# ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build']
|
|
self._MacroDict = {}
|
|
IsExpression = False
|
|
for Macro in MacroDefStr:
|
|
if Macro.startswith('-D'):
|
|
IsExpression = True
|
|
if len(Macro) > 2:
|
|
Macro = Macro[2:]
|
|
else:
|
|
continue
|
|
if IsExpression:
|
|
IsExpression = False
|
|
Match = re.match("(\\w+)=(.+)", Macro)
|
|
if Match:
|
|
self._MacroDict[Match.group(1)] = Match.group(2)
|
|
else:
|
|
Match = re.match("(\\w+)", Macro)
|
|
if Match:
|
|
self._MacroDict[Match.group(1)] = ''
|
|
if len(self._MacroDict) == 0:
|
|
Error = 1
|
|
else:
|
|
Error = 0
|
|
if self.Debug:
|
|
print("INFO : Macro dictionary:")
|
|
for Each in self._MacroDict:
|
|
print(" $(%s) = [ %s ]" % (Each,
|
|
self._MacroDict[Each]))
|
|
return Error
|
|
|
|
def EvaulateIfdef(self, Macro):
|
|
Result = Macro in self._MacroDict
|
|
if self.Debug:
|
|
print("INFO : Eval Ifdef [%s] : %s" % (Macro, Result))
|
|
return Result
|
|
|
|
def ExpandMacros(self, Input, Preserve=False):
|
|
Line = Input
|
|
Match = re.findall("\\$\\(\\w+\\)", Input)
|
|
if Match:
|
|
for Each in Match:
|
|
Variable = Each[2:-1]
|
|
if Variable in self._MacroDict:
|
|
Line = Line.replace(Each, self._MacroDict[Variable])
|
|
else:
|
|
if self.Debug:
|
|
print("WARN : %s is not defined" % Each)
|
|
if not Preserve:
|
|
Line = Line.replace(Each, Each[2:-1])
|
|
return Line
|
|
|
|
def ExpandPcds(self, Input):
|
|
Line = Input
|
|
Match = re.findall("(\\w+\\.\\w+)", Input)
|
|
if Match:
|
|
for PcdName in Match:
|
|
if PcdName in self._PcdsDict:
|
|
Line = Line.replace(PcdName, self._PcdsDict[PcdName])
|
|
else:
|
|
if self.Debug:
|
|
print("WARN : %s is not defined" % PcdName)
|
|
return Line
|
|
|
|
def EvaluateExpress(self, Expr):
|
|
ExpExpr = self.ExpandPcds(Expr)
|
|
ExpExpr = self.ExpandMacros(ExpExpr)
|
|
LogExpr = CLogicalExpression()
|
|
Result = LogExpr.evaluateExpress(ExpExpr)
|
|
if self.Debug:
|
|
print("INFO : Eval Express [%s] : %s" % (Expr, Result))
|
|
return Result
|
|
|
|
def ValueToByteArray(self, ValueStr, Length):
|
|
Match = re.match("\\{\\s*FILE:(.+)\\}", ValueStr)
|
|
if Match:
|
|
FileList = Match.group(1).split(',')
|
|
Result = bytearray()
|
|
for File in FileList:
|
|
File = File.strip()
|
|
BinPath = os.path.join(os.path.dirname(self._DscFile), File)
|
|
Result.extend(bytearray(open(BinPath, 'rb').read()))
|
|
else:
|
|
try:
|
|
Result = bytearray(self.ValueToList(ValueStr, Length))
|
|
except ValueError:
|
|
raise Exception("Bytes in '%s' must be in range 0~255 !" %
|
|
ValueStr)
|
|
if len(Result) < Length:
|
|
Result.extend(b'\x00' * (Length - len(Result)))
|
|
elif len(Result) > Length:
|
|
raise Exception("Value '%s' is too big to fit into %d bytes !" %
|
|
(ValueStr, Length))
|
|
|
|
return Result[:Length]
|
|
|
|
def ValueToList(self, ValueStr, Length):
|
|
if ValueStr[0] == '{':
|
|
Result = []
|
|
BinList = ValueStr[1:-1].split(',')
|
|
InBitField = False
|
|
LastInBitField = False
|
|
Value = 0
|
|
BitLen = 0
|
|
for Element in BinList:
|
|
InBitField = False
|
|
Each = Element.strip()
|
|
if len(Each) == 0:
|
|
pass
|
|
else:
|
|
if Each[0] in ['"', "'"]:
|
|
Result.extend(list(bytearray(Each[1:-1], 'utf-8')))
|
|
elif ':' in Each:
|
|
Match = re.match("(.+):(\\d+)b", Each)
|
|
if Match is None:
|
|
raise Exception("Invald value list format '%s' !"
|
|
% Each)
|
|
InBitField = True
|
|
CurrentBitLen = int(Match.group(2))
|
|
CurrentValue = ((self.EvaluateExpress(Match.group(1))
|
|
& (1 << CurrentBitLen) - 1)) << BitLen
|
|
else:
|
|
Result.append(self.EvaluateExpress(Each.strip()))
|
|
if InBitField:
|
|
Value += CurrentValue
|
|
BitLen += CurrentBitLen
|
|
if LastInBitField and ((not InBitField) or (Element ==
|
|
BinList[-1])):
|
|
if BitLen % 8 != 0:
|
|
raise Exception("Invald bit field length!")
|
|
Result.extend(Val2Bytes(Value, BitLen // 8))
|
|
Value = 0
|
|
BitLen = 0
|
|
LastInBitField = InBitField
|
|
elif ValueStr.startswith("'") and ValueStr.endswith("'"):
|
|
Result = Str2Bytes(ValueStr, Length)
|
|
elif ValueStr.startswith('"') and ValueStr.endswith('"'):
|
|
Result = Str2Bytes(ValueStr, Length)
|
|
else:
|
|
Result = Val2Bytes(self.EvaluateExpress(ValueStr), Length)
|
|
return Result
|
|
|
|
def FormatDeltaValue(self, ConfigDict):
|
|
ValStr = ConfigDict['value']
|
|
if ValStr[0] == "'":
|
|
# Remove padding \x00 in the value string
|
|
ValStr = "'%s'" % ValStr[1:-1].rstrip('\x00')
|
|
|
|
Struct = ConfigDict['struct']
|
|
if Struct in self._StructType:
|
|
# Format the array using its struct type
|
|
Unit = int(Struct[4:]) // 8
|
|
Value = Array2Val(ConfigDict['value'])
|
|
Loop = ConfigDict['length'] // Unit
|
|
Values = []
|
|
for Each in range(Loop):
|
|
Values.append(Value & ((1 << (Unit * 8)) - 1))
|
|
Value = Value >> (Unit * 8)
|
|
ValStr = '{ ' + ', '.join([('0x%%0%dX' % (Unit * 2)) %
|
|
x for x in Values]) + ' }'
|
|
|
|
return ValStr
|
|
|
|
def FormatListValue(self, ConfigDict):
|
|
Struct = ConfigDict['struct']
|
|
if Struct not in self._StructType:
|
|
return
|
|
|
|
DataList = self.ValueToList(ConfigDict['value'], ConfigDict['length'])
|
|
Unit = int(Struct[4:]) // 8
|
|
if int(ConfigDict['length']) != Unit * len(DataList):
|
|
# Fallback to byte array
|
|
Unit = 1
|
|
if int(ConfigDict['length']) != len(DataList):
|
|
raise Exception("Array size is not proper for '%s' !" %
|
|
ConfigDict['cname'])
|
|
|
|
ByteArray = []
|
|
for Value in DataList:
|
|
for Loop in range(Unit):
|
|
ByteArray.append("0x%02X" % (Value & 0xFF))
|
|
Value = Value >> 8
|
|
NewValue = '{' + ','.join(ByteArray) + '}'
|
|
ConfigDict['value'] = NewValue
|
|
|
|
return ""
|
|
|
|
def GetOrderNumber(self, Offset, Order, BitOff=0):
|
|
if isinstance(Order, int):
|
|
if Order == -1:
|
|
Order = Offset << 16
|
|
else:
|
|
(Major, Minor) = Order.split('.')
|
|
Order = (int(Major, 16) << 16) + ((int(Minor, 16) & 0xFF) << 8)
|
|
return Order + (BitOff & 0xFF)
|
|
|
|
def SubtituteLine(self, Line, Args):
|
|
Args = Args.strip()
|
|
Vars = Args.split(':')
|
|
Line = self.ExpandMacros(Line, True)
|
|
for Idx in range(len(Vars)-1, 0, -1):
|
|
Line = Line.replace('$(%d)' % Idx, Vars[Idx].strip())
|
|
return Line
|
|
|
|
def CfgDuplicationCheck(self, CfgDict, Name):
|
|
if not self.Debug:
|
|
return
|
|
|
|
if Name == 'Dummy':
|
|
return
|
|
|
|
if Name not in CfgDict:
|
|
CfgDict[Name] = 1
|
|
else:
|
|
print("WARNING: Duplicated item found '%s' !" %
|
|
CfgDict['cname'])
|
|
|
|
def AddBsfChildPage(self, Child, Parent='root'):
|
|
def AddBsfChildPageRecursive(PageTree, Parent, Child):
|
|
Key = next(iter(PageTree))
|
|
if Parent == Key:
|
|
PageTree[Key].append({Child: []})
|
|
return True
|
|
else:
|
|
Result = False
|
|
for Each in PageTree[Key]:
|
|
if AddBsfChildPageRecursive(Each, Parent, Child):
|
|
Result = True
|
|
break
|
|
return Result
|
|
|
|
return AddBsfChildPageRecursive(self._CfgPageTree, Parent, Child)
|
|
|
|
def ParseDscFile(self, DscFile):
|
|
self._DscLines = []
|
|
self._CfgItemList = []
|
|
self._CfgPageDict = {}
|
|
self._CfgBlkDict = {}
|
|
self._BsfTempDict = {}
|
|
self._CfgPageTree = {'root': []}
|
|
|
|
CfgDict = {}
|
|
|
|
SectionNameList = ["Defines".lower(), "PcdsFeatureFlag".lower(),
|
|
"PcdsDynamicVpd.Tmp".lower(),
|
|
"PcdsDynamicVpd.Upd".lower()]
|
|
|
|
IsDefSect = False
|
|
IsPcdSect = False
|
|
IsUpdSect = False
|
|
IsTmpSect = False
|
|
|
|
TemplateName = ''
|
|
|
|
IfStack = []
|
|
ElifStack = []
|
|
Error = 0
|
|
ConfigDict = {}
|
|
|
|
if type(DscFile) is list:
|
|
# it is DSC lines already
|
|
DscLines = DscFile
|
|
self._DscFile = '.'
|
|
else:
|
|
DscFd = open(DscFile, "r")
|
|
DscLines = DscFd.readlines()
|
|
DscFd.close()
|
|
self._DscFile = DscFile
|
|
|
|
BsfRegExp = re.compile("(%s):{(.+?)}(?:$|\\s+)" % '|'.
|
|
join(self._BsfKeyList))
|
|
HdrRegExp = re.compile("(%s):{(.+?)}" % '|'.join(self._HdrKeyList))
|
|
CfgRegExp = re.compile("^([_a-zA-Z0-9]+)\\s*\\|\\s*\
|
|
(0x[0-9A-F]+|\\*)\\s*\\|\\s*(\\d+|0x[0-9a-fA-F]+)\\s*\\|\\s*(.+)")
|
|
TksRegExp = re.compile("^(g[_a-zA-Z0-9]+\\.)(.+)")
|
|
SkipLines = 0
|
|
while len(DscLines):
|
|
DscLine = DscLines.pop(0).strip()
|
|
if SkipLines == 0:
|
|
self._DscLines.append(DscLine)
|
|
else:
|
|
SkipLines = SkipLines - 1
|
|
if len(DscLine) == 0:
|
|
continue
|
|
|
|
Handle = False
|
|
Match = re.match("^\\[(.+)\\]", DscLine)
|
|
if Match is not None:
|
|
IsDefSect = False
|
|
IsPcdSect = False
|
|
IsUpdSect = False
|
|
IsTmpSect = False
|
|
SectionName = Match.group(1).lower()
|
|
if SectionName == SectionNameList[0]:
|
|
IsDefSect = True
|
|
if SectionName == SectionNameList[1]:
|
|
IsPcdSect = True
|
|
elif SectionName == SectionNameList[2]:
|
|
IsTmpSect = True
|
|
elif SectionName == SectionNameList[3]:
|
|
ConfigDict = {
|
|
'header': 'ON',
|
|
'page': '',
|
|
'name': '',
|
|
'find': '',
|
|
'struct': '',
|
|
'embed': '',
|
|
'marker': '',
|
|
'option': '',
|
|
'comment': '',
|
|
'condition': '',
|
|
'order': -1,
|
|
'subreg': []
|
|
}
|
|
IsUpdSect = True
|
|
Offset = 0
|
|
else:
|
|
if IsDefSect or IsPcdSect or IsUpdSect or IsTmpSect:
|
|
Match = False if DscLine[0] != '!' else True
|
|
if Match:
|
|
Match = re.match("^!(else|endif|ifdef|ifndef|if|elseif\
|
|
|include)\\s*(.+)?$", DscLine.split("#")[0])
|
|
Keyword = Match.group(1) if Match else ''
|
|
Remaining = Match.group(2) if Match else ''
|
|
Remaining = '' if Remaining is None else Remaining.strip()
|
|
|
|
if Keyword in ['if', 'elseif', 'ifdef', 'ifndef', 'include'
|
|
] and not Remaining:
|
|
raise Exception("ERROR: Expression is expected after \
|
|
'!if' or !elseif' for line '%s'" % DscLine)
|
|
|
|
if Keyword == 'else':
|
|
if IfStack:
|
|
IfStack[-1] = not IfStack[-1]
|
|
else:
|
|
raise Exception("ERROR: No paired '!if' found for \
|
|
'!else' for line '%s'" % DscLine)
|
|
elif Keyword == 'endif':
|
|
if IfStack:
|
|
IfStack.pop()
|
|
Level = ElifStack.pop()
|
|
if Level > 0:
|
|
del IfStack[-Level:]
|
|
else:
|
|
raise Exception("ERROR: No paired '!if' found for \
|
|
'!endif' for line '%s'" % DscLine)
|
|
elif Keyword == 'ifdef' or Keyword == 'ifndef':
|
|
Result = self.EvaulateIfdef(Remaining)
|
|
if Keyword == 'ifndef':
|
|
Result = not Result
|
|
IfStack.append(Result)
|
|
ElifStack.append(0)
|
|
elif Keyword == 'if' or Keyword == 'elseif':
|
|
Result = self.EvaluateExpress(Remaining)
|
|
if Keyword == "if":
|
|
ElifStack.append(0)
|
|
IfStack.append(Result)
|
|
else: # elseif
|
|
if IfStack:
|
|
IfStack[-1] = not IfStack[-1]
|
|
IfStack.append(Result)
|
|
ElifStack[-1] = ElifStack[-1] + 1
|
|
else:
|
|
raise Exception("ERROR: No paired '!if' found for \
|
|
'!elif' for line '%s'" % DscLine)
|
|
else:
|
|
if IfStack:
|
|
Handle = reduce(lambda x, y: x and y, IfStack)
|
|
else:
|
|
Handle = True
|
|
if Handle:
|
|
if Keyword == 'include':
|
|
Remaining = self.ExpandMacros(Remaining)
|
|
# Relative to DSC filepath
|
|
IncludeFilePath = os.path.join(
|
|
os.path.dirname(self._DscFile), Remaining)
|
|
if not os.path.exists(IncludeFilePath):
|
|
# Relative to repository to find \
|
|
# dsc in common platform
|
|
IncludeFilePath = os.path.join(
|
|
os.path.dirname(self._DscFile), "..",
|
|
Remaining)
|
|
|
|
try:
|
|
IncludeDsc = open(IncludeFilePath, "r")
|
|
except Exception:
|
|
raise Exception("ERROR: Cannot open \
|
|
file '%s'." % IncludeFilePath)
|
|
NewDscLines = IncludeDsc.readlines()
|
|
IncludeDsc.close()
|
|
DscLines = NewDscLines + DscLines
|
|
del self._DscLines[-1]
|
|
else:
|
|
if DscLine.startswith('!'):
|
|
raise Exception("ERROR: Unrecoginized \
|
|
directive for line '%s'" % DscLine)
|
|
|
|
if not Handle:
|
|
del self._DscLines[-1]
|
|
continue
|
|
|
|
if IsDefSect:
|
|
Match = re.match("^\\s*(?:DEFINE\\s+)*(\\w+)\\s*=\\s*(.+)",
|
|
DscLine)
|
|
if Match:
|
|
self._MacroDict[Match.group(1)] = Match.group(2)
|
|
if self.Debug:
|
|
print("INFO : DEFINE %s = [ %s ]" % (Match.group(1),
|
|
Match.group(2)))
|
|
|
|
elif IsPcdSect:
|
|
Match = re.match("^\\s*([\\w\\.]+)\\s*\\|\\s*(\\w+)", DscLine)
|
|
if Match:
|
|
self._PcdsDict[Match.group(1)] = Match.group(2)
|
|
if self.Debug:
|
|
print("INFO : PCD %s = [ %s ]" % (Match.group(1),
|
|
Match.group(2)))
|
|
|
|
elif IsTmpSect:
|
|
# !BSF DEFT:{GPIO_TMPL:START}
|
|
Match = re.match("^\\s*#\\s+(!BSF)\\s+DEFT:{(.+?):\
|
|
(START|END)}", DscLine)
|
|
if Match:
|
|
if Match.group(3) == 'START' and not TemplateName:
|
|
TemplateName = Match.group(2).strip()
|
|
self._BsfTempDict[TemplateName] = []
|
|
if Match.group(3) == 'END' and (
|
|
TemplateName == Match.group(2).strip()
|
|
) and TemplateName:
|
|
TemplateName = ''
|
|
else:
|
|
if TemplateName:
|
|
Match = re.match("^!include\\s*(.+)?$", DscLine)
|
|
if Match:
|
|
continue
|
|
self._BsfTempDict[TemplateName].append(DscLine)
|
|
|
|
else:
|
|
Match = re.match("^\\s*#\\s+(!BSF|!HDR)\\s+(.+)", DscLine)
|
|
if Match:
|
|
Remaining = Match.group(2)
|
|
if Match.group(1) == '!BSF':
|
|
Result = BsfRegExp.findall(Remaining)
|
|
if Result:
|
|
for Each in Result:
|
|
Key = Each[0]
|
|
Remaining = Each[1]
|
|
|
|
if Key == 'BLOCK':
|
|
Match = re.match(
|
|
"NAME:\"(.+)\"\\s*,\\s*\
|
|
VER:\"(.+)\"\\s*", Remaining)
|
|
if Match:
|
|
self._CfgBlkDict['name'] = \
|
|
Match.group(1)
|
|
self._CfgBlkDict['ver'] = Match.group(2
|
|
)
|
|
|
|
elif Key == 'SUBT':
|
|
# GPIO_TMPL:1:2:3
|
|
Remaining = Remaining.strip()
|
|
Match = re.match("(\\w+)\\s*:", Remaining)
|
|
if Match:
|
|
TemplateName = Match.group(1)
|
|
for Line in self._BsfTempDict[
|
|
TemplateName][::-1]:
|
|
NewLine = self.SubtituteLine(
|
|
Line, Remaining)
|
|
DscLines.insert(0, NewLine)
|
|
SkipLines += 1
|
|
|
|
elif Key == 'PAGES':
|
|
# !BSF PAGES:{HSW:"Haswell System Agent", \
|
|
# LPT:"Lynx Point PCH"}
|
|
PageList = Remaining.split(',')
|
|
for Page in PageList:
|
|
Page = Page.strip()
|
|
Match = re.match('(\\w+):\
|
|
(\\w*:)?\\"(.+)\\"', Page)
|
|
if Match:
|
|
PageName = Match.group(1)
|
|
ParentName = Match.group(2)
|
|
if not ParentName or \
|
|
ParentName == ':':
|
|
ParentName = 'root'
|
|
else:
|
|
ParentName = ParentName[:-1]
|
|
if not self.AddBsfChildPage(
|
|
PageName, ParentName):
|
|
raise Exception("Cannot find \
|
|
parent page '%s'!" % ParentName)
|
|
self._CfgPageDict[
|
|
PageName] = Match.group(3)
|
|
else:
|
|
raise Exception("Invalid page \
|
|
definitions '%s'!" % Page)
|
|
|
|
elif Key in ['NAME', 'HELP', 'OPTION'
|
|
] and Remaining.startswith('+'):
|
|
# Allow certain options to be extended \
|
|
# to multiple lines
|
|
ConfigDict[Key.lower()] += Remaining[1:]
|
|
|
|
else:
|
|
if Key == 'NAME':
|
|
Remaining = Remaining.strip()
|
|
elif Key == 'CONDITION':
|
|
Remaining = self.ExpandMacros(
|
|
Remaining.strip())
|
|
ConfigDict[Key.lower()] = Remaining
|
|
else:
|
|
Match = HdrRegExp.match(Remaining)
|
|
if Match:
|
|
Key = Match.group(1)
|
|
Remaining = Match.group(2)
|
|
if Key == 'EMBED':
|
|
Parts = Remaining.split(':')
|
|
Names = Parts[0].split(',')
|
|
DummyDict = ConfigDict.copy()
|
|
if len(Names) > 1:
|
|
Remaining = Names[0] + ':' + ':'.join(
|
|
Parts[1:])
|
|
DummyDict['struct'] = Names[1]
|
|
else:
|
|
DummyDict['struct'] = Names[0]
|
|
DummyDict['cname'] = 'Dummy'
|
|
DummyDict['name'] = ''
|
|
DummyDict['embed'] = Remaining
|
|
DummyDict['offset'] = Offset
|
|
DummyDict['length'] = 0
|
|
DummyDict['value'] = '0'
|
|
DummyDict['type'] = 'Reserved'
|
|
DummyDict['help'] = ''
|
|
DummyDict['subreg'] = []
|
|
self._CfgItemList.append(DummyDict)
|
|
else:
|
|
ConfigDict[Key.lower()] = Remaining
|
|
# Check CFG line
|
|
# gCfgData.VariableName | * | 0x01 | 0x1
|
|
Clear = False
|
|
|
|
Match = TksRegExp.match(DscLine)
|
|
if Match:
|
|
DscLine = 'gCfgData.%s' % Match.group(2)
|
|
|
|
if DscLine.startswith('gCfgData.'):
|
|
Match = CfgRegExp.match(DscLine[9:])
|
|
else:
|
|
Match = None
|
|
if Match:
|
|
ConfigDict['space'] = 'gCfgData'
|
|
ConfigDict['cname'] = Match.group(1)
|
|
if Match.group(2) != '*':
|
|
Offset = int(Match.group(2), 16)
|
|
ConfigDict['offset'] = Offset
|
|
ConfigDict['order'] = self.GetOrderNumber(
|
|
ConfigDict['offset'], ConfigDict['order'])
|
|
|
|
Value = Match.group(4).strip()
|
|
if Match.group(3).startswith("0x"):
|
|
Length = int(Match.group(3), 16)
|
|
else:
|
|
Length = int(Match.group(3))
|
|
|
|
Offset += Length
|
|
|
|
ConfigDict['length'] = Length
|
|
Match = re.match("\\$\\((\\w+)\\)", Value)
|
|
if Match:
|
|
if Match.group(1) in self._MacroDict:
|
|
Value = self._MacroDict[Match.group(1)]
|
|
|
|
ConfigDict['value'] = Value
|
|
if re.match("\\{\\s*FILE:(.+)\\}", Value):
|
|
# Expand embedded binary file
|
|
ValArray = self.ValueToByteArray(ConfigDict['value'],
|
|
ConfigDict['length'])
|
|
NewValue = Bytes2Str(ValArray)
|
|
self._DscLines[-1] = re.sub(r'(.*)(\{\s*FILE:.+\})',
|
|
r'\1 %s' % NewValue,
|
|
self._DscLines[-1])
|
|
ConfigDict['value'] = NewValue
|
|
|
|
if ConfigDict['name'] == '':
|
|
# Clear BSF specific items
|
|
ConfigDict['bsfname'] = ''
|
|
ConfigDict['help'] = ''
|
|
ConfigDict['type'] = ''
|
|
ConfigDict['option'] = ''
|
|
|
|
self.CfgDuplicationCheck(CfgDict, ConfigDict['cname'])
|
|
self._CfgItemList.append(ConfigDict.copy())
|
|
Clear = True
|
|
|
|
else:
|
|
# It could be a virtual item as below
|
|
# !BSF FIELD:{SerialDebugPortAddress0:1}
|
|
# or
|
|
# @Bsf FIELD:{SerialDebugPortAddress0:1b}
|
|
Match = re.match(r"^\s*#\s+(!BSF)\s+FIELD:{(.+)}", DscLine)
|
|
if Match:
|
|
BitFieldTxt = Match.group(2)
|
|
Match = re.match("(.+):(\\d+)b([BWDQ])?", BitFieldTxt)
|
|
if not Match:
|
|
raise Exception("Incorrect bit field \
|
|
format '%s' !" % BitFieldTxt)
|
|
UnitBitLen = 1
|
|
SubCfgDict = ConfigDict.copy()
|
|
SubCfgDict['cname'] = Match.group(1)
|
|
SubCfgDict['bitlength'] = int(
|
|
Match.group(2)) * UnitBitLen
|
|
if SubCfgDict['bitlength'] > 0:
|
|
LastItem = self._CfgItemList[-1]
|
|
if len(LastItem['subreg']) == 0:
|
|
SubOffset = 0
|
|
else:
|
|
SubOffset = \
|
|
LastItem['subreg'][-1]['bitoffset'] \
|
|
+ LastItem['subreg'][-1]['bitlength']
|
|
if Match.group(3) == 'B':
|
|
SubCfgDict['bitunit'] = 1
|
|
elif Match.group(3) == 'W':
|
|
SubCfgDict['bitunit'] = 2
|
|
elif Match.group(3) == 'Q':
|
|
SubCfgDict['bitunit'] = 8
|
|
else:
|
|
SubCfgDict['bitunit'] = 4
|
|
SubCfgDict['bitoffset'] = SubOffset
|
|
SubCfgDict['order'] = self.GetOrderNumber(
|
|
SubCfgDict['offset'], SubCfgDict['order'],
|
|
SubOffset)
|
|
SubCfgDict['value'] = ''
|
|
SubCfgDict['cname'] = '%s_%s' % (LastItem['cname'],
|
|
Match.group(1))
|
|
self.CfgDuplicationCheck(CfgDict,
|
|
SubCfgDict['cname'])
|
|
LastItem['subreg'].append(SubCfgDict.copy())
|
|
Clear = True
|
|
|
|
if Clear:
|
|
ConfigDict['name'] = ''
|
|
ConfigDict['find'] = ''
|
|
ConfigDict['struct'] = ''
|
|
ConfigDict['embed'] = ''
|
|
ConfigDict['marker'] = ''
|
|
ConfigDict['comment'] = ''
|
|
ConfigDict['order'] = -1
|
|
ConfigDict['subreg'] = []
|
|
ConfigDict['option'] = ''
|
|
ConfigDict['condition'] = ''
|
|
|
|
return Error
|
|
|
|
def GetBsfBitFields(self, subitem, bytes):
|
|
start = subitem['bitoffset']
|
|
end = start + subitem['bitlength']
|
|
bitsvalue = ''.join('{0:08b}'.format(i) for i in bytes[::-1])
|
|
bitsvalue = bitsvalue[::-1]
|
|
bitslen = len(bitsvalue)
|
|
if start > bitslen or end > bitslen:
|
|
raise Exception("Invalid bits offset [%d,%d] %d for %s" %
|
|
(start, end, bitslen, subitem['name']))
|
|
return '0x%X' % (int(bitsvalue[start:end][::-1], 2))
|
|
|
|
def UpdateBsfBitFields(self, SubItem, NewValue, ValueArray):
|
|
Start = SubItem['bitoffset']
|
|
End = Start + SubItem['bitlength']
|
|
Blen = len(ValueArray)
|
|
BitsValue = ''.join('{0:08b}'.format(i) for i in ValueArray[::-1])
|
|
BitsValue = BitsValue[::-1]
|
|
BitsLen = len(BitsValue)
|
|
if Start > BitsLen or End > BitsLen:
|
|
raise Exception("Invalid bits offset [%d,%d] %d for %s" %
|
|
(Start, End, BitsLen, SubItem['name']))
|
|
BitsValue = BitsValue[:Start] + '{0:0{1}b}'.format(
|
|
NewValue, SubItem['bitlength'])[::-1] + BitsValue[End:]
|
|
ValueArray[:] = bytearray.fromhex(
|
|
'{0:0{1}x}'.format(int(BitsValue[::-1], 2), Blen * 2))[::-1]
|
|
|
|
def CreateVarDict(self):
|
|
Error = 0
|
|
self._VarDict = {}
|
|
if len(self._CfgItemList) > 0:
|
|
Item = self._CfgItemList[-1]
|
|
self._VarDict['_LENGTH_'] = '%d' % (Item['offset'] +
|
|
Item['length'])
|
|
for Item in self._CfgItemList:
|
|
Embed = Item['embed']
|
|
Match = re.match("^(\\w+):(\\w+):(START|END)", Embed)
|
|
if Match:
|
|
StructName = Match.group(1)
|
|
VarName = '_%s_%s_' % (Match.group(3), StructName)
|
|
if Match.group(3) == 'END':
|
|
self._VarDict[VarName] = Item['offset'] + Item['length']
|
|
self._VarDict['_LENGTH_%s_' % StructName] = \
|
|
self._VarDict['_END_%s_' % StructName] - \
|
|
self._VarDict['_START_%s_' % StructName]
|
|
if Match.group(2).startswith('TAG_'):
|
|
if (self.Mode != 'FSP') and (self._VarDict
|
|
['_LENGTH_%s_' %
|
|
StructName] % 4):
|
|
raise Exception("Size of structure '%s' is %d, \
|
|
not DWORD aligned !" % (StructName, self._VarDict['_LENGTH_%s_' % StructName]))
|
|
self._VarDict['_TAG_%s_' % StructName] = int(
|
|
Match.group(2)[4:], 16) & 0xFFF
|
|
else:
|
|
self._VarDict[VarName] = Item['offset']
|
|
if Item['marker']:
|
|
self._VarDict['_OFFSET_%s_' % Item['marker'].strip()] = \
|
|
Item['offset']
|
|
return Error
|
|
|
|
def UpdateBsfBitUnit(self, Item):
|
|
BitTotal = 0
|
|
BitOffset = 0
|
|
StartIdx = 0
|
|
Unit = None
|
|
UnitDec = {1: 'BYTE', 2: 'WORD', 4: 'DWORD', 8: 'QWORD'}
|
|
for Idx, SubItem in enumerate(Item['subreg']):
|
|
if Unit is None:
|
|
Unit = SubItem['bitunit']
|
|
BitLength = SubItem['bitlength']
|
|
BitTotal += BitLength
|
|
BitOffset += BitLength
|
|
|
|
if BitOffset > 64 or BitOffset > Unit * 8:
|
|
break
|
|
|
|
if BitOffset == Unit * 8:
|
|
for SubIdx in range(StartIdx, Idx + 1):
|
|
Item['subreg'][SubIdx]['bitunit'] = Unit
|
|
BitOffset = 0
|
|
StartIdx = Idx + 1
|
|
Unit = None
|
|
|
|
if BitOffset > 0:
|
|
raise Exception("Bit fields cannot fit into %s for \
|
|
'%s.%s' !" % (UnitDec[Unit], Item['cname'], SubItem['cname']))
|
|
|
|
ExpectedTotal = Item['length'] * 8
|
|
if Item['length'] * 8 != BitTotal:
|
|
raise Exception("Bit fields total length (%d) does not match \
|
|
length (%d) of '%s' !" % (BitTotal, ExpectedTotal, Item['cname']))
|
|
|
|
def UpdateDefaultValue(self):
|
|
Error = 0
|
|
for Idx, Item in enumerate(self._CfgItemList):
|
|
if len(Item['subreg']) == 0:
|
|
Value = Item['value']
|
|
if (len(Value) > 0) and (Value[0] == '{' or Value[0] == "'" or
|
|
Value[0] == '"'):
|
|
# {XXX} or 'XXX' strings
|
|
self.FormatListValue(self._CfgItemList[Idx])
|
|
else:
|
|
Match = re.match("(0x[0-9a-fA-F]+|[0-9]+)", Value)
|
|
if not Match:
|
|
NumValue = self.EvaluateExpress(Value)
|
|
Item['value'] = '0x%X' % NumValue
|
|
else:
|
|
ValArray = self.ValueToByteArray(Item['value'], Item['length'])
|
|
for SubItem in Item['subreg']:
|
|
SubItem['value'] = self.GetBsfBitFields(SubItem, ValArray)
|
|
self.UpdateBsfBitUnit(Item)
|
|
return Error
|
|
|
|
@staticmethod
|
|
def ExpandIncludeFiles(FilePath, CurDir=''):
|
|
if CurDir == '':
|
|
CurDir = os.path.dirname(FilePath)
|
|
FilePath = os.path.basename(FilePath)
|
|
|
|
InputFilePath = os.path.join(CurDir, FilePath)
|
|
File = open(InputFilePath, "r")
|
|
Lines = File.readlines()
|
|
File.close()
|
|
|
|
NewLines = []
|
|
for LineNum, Line in enumerate(Lines):
|
|
Match = re.match("^!include\\s*(.+)?$", Line)
|
|
if Match:
|
|
IncPath = Match.group(1)
|
|
TmpPath = os.path.join(CurDir, IncPath)
|
|
OrgPath = TmpPath
|
|
if not os.path.exists(TmpPath):
|
|
CurDir = os.path.join(os.path.dirname(
|
|
os.path.realpath(__file__)), "..", "..")
|
|
TmpPath = os.path.join(CurDir, IncPath)
|
|
if not os.path.exists(TmpPath):
|
|
raise Exception("ERROR: Cannot open include file '%s'." %
|
|
OrgPath)
|
|
else:
|
|
NewLines.append(('# Included from file: %s\n' %
|
|
IncPath, TmpPath, 0))
|
|
NewLines.append(('# %s\n' % ('=' * 80), TmpPath, 0))
|
|
NewLines.extend(CGenCfgData.ExpandIncludeFiles
|
|
(IncPath, CurDir))
|
|
else:
|
|
NewLines.append((Line, InputFilePath, LineNum))
|
|
|
|
return NewLines
|
|
|
|
def OverrideDefaultValue(self, DltFile):
|
|
Error = 0
|
|
DltLines = CGenCfgData.ExpandIncludeFiles(DltFile)
|
|
|
|
PlatformId = None
|
|
for Line, FilePath, LineNum in DltLines:
|
|
Line = Line.strip()
|
|
if not Line or Line.startswith('#'):
|
|
continue
|
|
Match = re.match("\\s*(\\w+)\\.(\\w+)(\\.\\w+)?\\s*\\|\\s*(.+)",
|
|
Line)
|
|
if not Match:
|
|
raise Exception("Unrecognized line '%s' (File:'%s' Line:%d) !"
|
|
% (Line, FilePath, LineNum + 1))
|
|
|
|
Found = False
|
|
InScope = False
|
|
for Idx, Item in enumerate(self._CfgItemList):
|
|
if not InScope:
|
|
if not (Item['embed'].endswith(':START') and
|
|
Item['embed'].startswith(Match.group(1))):
|
|
continue
|
|
InScope = True
|
|
if Item['cname'] == Match.group(2):
|
|
Found = True
|
|
break
|
|
if Item['embed'].endswith(':END') and \
|
|
Item['embed'].startswith(Match.group(1)):
|
|
break
|
|
Name = '%s.%s' % (Match.group(1), Match.group(2))
|
|
if not Found:
|
|
ErrItem = Match.group(2) if InScope else Match.group(1)
|
|
raise Exception("Invalid configuration '%s' in '%s' \
|
|
(File:'%s' Line:%d) !" % (ErrItem, Name, FilePath, LineNum + 1))
|
|
|
|
ValueStr = Match.group(4).strip()
|
|
if Match.group(3) is not None:
|
|
# This is a subregion item
|
|
BitField = Match.group(3)[1:]
|
|
Found = False
|
|
if len(Item['subreg']) > 0:
|
|
for SubItem in Item['subreg']:
|
|
if SubItem['cname'] == '%s_%s' % \
|
|
(Item['cname'], BitField):
|
|
Found = True
|
|
break
|
|
if not Found:
|
|
raise Exception("Invalid configuration bit field \
|
|
'%s' in '%s.%s' (File:'%s' Line:%d) !" % (BitField, Name, BitField,
|
|
FilePath, LineNum + 1))
|
|
|
|
try:
|
|
Value = int(ValueStr, 16) if ValueStr.startswith('0x') \
|
|
else int(ValueStr, 10)
|
|
except Exception:
|
|
raise Exception("Invalid value '%s' for bit field '%s.%s' \
|
|
(File:'%s' Line:%d) !" % (ValueStr, Name, BitField, FilePath, LineNum + 1))
|
|
|
|
if Value >= 2 ** SubItem['bitlength']:
|
|
raise Exception("Invalid configuration bit field value \
|
|
'%s' for '%s.%s' (File:'%s' Line:%d) !" % (Value, Name, BitField,
|
|
FilePath, LineNum + 1))
|
|
|
|
ValArray = self.ValueToByteArray(Item['value'], Item['length'])
|
|
self.UpdateBsfBitFields(SubItem, Value, ValArray)
|
|
|
|
if Item['value'].startswith('{'):
|
|
Item['value'] = '{' + ', '.join('0x%02X' % i
|
|
for i in ValArray) + '}'
|
|
else:
|
|
BitsValue = ''.join('{0:08b}'.format(i)
|
|
for i in ValArray[::-1])
|
|
Item['value'] = '0x%X' % (int(BitsValue, 2))
|
|
else:
|
|
if Item['value'].startswith('{') and \
|
|
not ValueStr.startswith('{'):
|
|
raise Exception("Data array required for '%s' \
|
|
(File:'%s' Line:%d) !" % (Name, FilePath, LineNum + 1))
|
|
Item['value'] = ValueStr
|
|
|
|
if Name == 'PLATFORMID_CFG_DATA.PlatformId':
|
|
PlatformId = ValueStr
|
|
|
|
if (PlatformId is None) and (self.Mode != 'FSP'):
|
|
raise Exception("PLATFORMID_CFG_DATA.PlatformId is missing \
|
|
in file '%s' !" % (DltFile))
|
|
|
|
return Error
|
|
|
|
def ProcessMultilines(self, String, MaxCharLength):
|
|
Multilines = ''
|
|
StringLength = len(String)
|
|
CurrentStringStart = 0
|
|
StringOffset = 0
|
|
BreakLineDict = []
|
|
if len(String) <= MaxCharLength:
|
|
while (StringOffset < StringLength):
|
|
if StringOffset >= 1:
|
|
if String[StringOffset - 1] == '\\' and \
|
|
String[StringOffset] == 'n':
|
|
BreakLineDict.append(StringOffset + 1)
|
|
StringOffset += 1
|
|
if BreakLineDict != []:
|
|
for Each in BreakLineDict:
|
|
Multilines += " %s\n" % String[CurrentStringStart:Each].\
|
|
lstrip()
|
|
CurrentStringStart = Each
|
|
if StringLength - CurrentStringStart > 0:
|
|
Multilines += " %s\n" % String[CurrentStringStart:].\
|
|
lstrip()
|
|
else:
|
|
Multilines = " %s\n" % String
|
|
else:
|
|
NewLineStart = 0
|
|
NewLineCount = 0
|
|
FoundSpaceChar = False
|
|
while(StringOffset < StringLength):
|
|
if StringOffset >= 1:
|
|
if NewLineCount >= MaxCharLength - 1:
|
|
if String[StringOffset] == ' ' and \
|
|
StringLength - StringOffset > 10:
|
|
BreakLineDict.append(NewLineStart + NewLineCount)
|
|
NewLineStart = NewLineStart + NewLineCount
|
|
NewLineCount = 0
|
|
FoundSpaceChar = True
|
|
elif StringOffset == StringLength - 1 \
|
|
and FoundSpaceChar is False:
|
|
BreakLineDict.append(0)
|
|
if String[StringOffset - 1] == '\\' and \
|
|
String[StringOffset] == 'n':
|
|
BreakLineDict.append(StringOffset + 1)
|
|
NewLineStart = StringOffset + 1
|
|
NewLineCount = 0
|
|
StringOffset += 1
|
|
NewLineCount += 1
|
|
if BreakLineDict != []:
|
|
BreakLineDict.sort()
|
|
for Each in BreakLineDict:
|
|
if Each > 0:
|
|
Multilines += " %s\n" % String[
|
|
CurrentStringStart:Each].lstrip()
|
|
CurrentStringStart = Each
|
|
if StringLength - CurrentStringStart > 0:
|
|
Multilines += " %s\n" % String[CurrentStringStart:].\
|
|
lstrip()
|
|
return Multilines
|
|
|
|
def CreateField(self, Item, Name, Length, Offset, Struct,
|
|
BsfName, Help, Option, BitsLength=None):
|
|
PosName = 28
|
|
NameLine = ''
|
|
HelpLine = ''
|
|
OptionLine = ''
|
|
|
|
if Length == 0 and Name == 'Dummy':
|
|
return '\n'
|
|
|
|
IsArray = False
|
|
if Length in [1, 2, 4, 8]:
|
|
Type = "UINT%d" % (Length * 8)
|
|
else:
|
|
IsArray = True
|
|
Type = "UINT8"
|
|
|
|
if Item and Item['value'].startswith('{'):
|
|
Type = "UINT8"
|
|
IsArray = True
|
|
|
|
if Struct != '':
|
|
Type = Struct
|
|
if Struct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
|
|
IsArray = True
|
|
Unit = int(Type[4:]) // 8
|
|
Length = Length / Unit
|
|
else:
|
|
IsArray = False
|
|
|
|
if IsArray:
|
|
Name = Name + '[%d]' % Length
|
|
|
|
if len(Type) < PosName:
|
|
Space1 = PosName - len(Type)
|
|
else:
|
|
Space1 = 1
|
|
|
|
if BsfName != '':
|
|
NameLine = " %s\n" % BsfName
|
|
else:
|
|
NameLine = "\n"
|
|
|
|
if Help != '':
|
|
HelpLine = self.ProcessMultilines(Help, 80)
|
|
|
|
if Option != '':
|
|
OptionLine = self.ProcessMultilines(Option, 80)
|
|
|
|
if BitsLength is None:
|
|
BitsLength = ''
|
|
else:
|
|
BitsLength = ' : %d' % BitsLength
|
|
|
|
return "\n/** %s%s%s**/\n %s%s%s%s;\n" % \
|
|
(NameLine, HelpLine, OptionLine, Type, ' ' * Space1, Name,
|
|
BitsLength)
|
|
|
|
def SplitTextBody(self, TextBody):
|
|
Marker1 = '{ /* _COMMON_STRUCT_START_ */'
|
|
Marker2 = '; /* _COMMON_STRUCT_END_ */'
|
|
ComBody = []
|
|
TxtBody = []
|
|
IsCommon = False
|
|
for Line in TextBody:
|
|
if Line.strip().endswith(Marker1):
|
|
Line = Line.replace(Marker1[1:], '')
|
|
IsCommon = True
|
|
if Line.strip().endswith(Marker2):
|
|
Line = Line.replace(Marker2[1:], '')
|
|
if IsCommon:
|
|
ComBody.append(Line)
|
|
IsCommon = False
|
|
continue
|
|
if IsCommon:
|
|
ComBody.append(Line)
|
|
else:
|
|
TxtBody.append(Line)
|
|
return ComBody, TxtBody
|
|
|
|
def GetStructArrayInfo(self, Input):
|
|
ArrayStr = Input.split('[')
|
|
Name = ArrayStr[0]
|
|
if len(ArrayStr) > 1:
|
|
NumStr = ''.join(c for c in ArrayStr[-1] if c.isdigit())
|
|
NumStr = '1000' if len(NumStr) == 0 else NumStr
|
|
ArrayNum = int(NumStr)
|
|
else:
|
|
ArrayNum = 0
|
|
return Name, ArrayNum
|
|
|
|
def PostProcessBody(self, TextBody, IncludeEmbedOnly=True):
|
|
NewTextBody = []
|
|
OldTextBody = []
|
|
IncTextBody = []
|
|
StructBody = []
|
|
IncludeLine = False
|
|
EmbedFound = False
|
|
StructName = ''
|
|
ArrayVarName = ''
|
|
VariableName = ''
|
|
Count = 0
|
|
Level = 0
|
|
IsCommonStruct = False
|
|
|
|
for Line in TextBody:
|
|
if Line.startswith('#define '):
|
|
IncTextBody.append(Line)
|
|
continue
|
|
|
|
if not Line.startswith('/* EMBED_STRUCT:'):
|
|
Match = False
|
|
else:
|
|
Match = re.match("^/\\*\\sEMBED_STRUCT:([\\w\\[\\]\\*]+):\
|
|
([\\w\\[\\]\\*]+):(\\w+):(START|END)([\\s\\d]+)\\*/([\\s\\S]*)", Line)
|
|
|
|
if Match:
|
|
ArrayMarker = Match.group(5)
|
|
if Match.group(4) == 'END':
|
|
Level -= 1
|
|
if Level == 0:
|
|
Line = Match.group(6)
|
|
else: # 'START'
|
|
Level += 1
|
|
if Level == 1:
|
|
Line = Match.group(6)
|
|
else:
|
|
EmbedFound = True
|
|
TagStr = Match.group(3)
|
|
if TagStr.startswith('TAG_'):
|
|
try:
|
|
TagVal = int(TagStr[4:], 16)
|
|
except Exception:
|
|
TagVal = -1
|
|
if (TagVal >= 0) and (TagVal < self._MinCfgTagId):
|
|
IsCommonStruct = True
|
|
|
|
if Level == 1:
|
|
if IsCommonStruct:
|
|
Suffix = ' /* _COMMON_STRUCT_START_ */'
|
|
else:
|
|
Suffix = ''
|
|
StructBody = ['typedef struct {%s' % Suffix]
|
|
StructName = Match.group(1)
|
|
StructType = Match.group(2)
|
|
VariableName = Match.group(3)
|
|
MatchOffset = re.search('/\\*\\*\\sOffset\\s0x\
|
|
([a-fA-F0-9]+)', Line)
|
|
if MatchOffset:
|
|
Offset = int(MatchOffset.group(1), 16)
|
|
else:
|
|
Offset = None
|
|
IncludeLine = True
|
|
|
|
ModifiedStructType = StructType.rstrip()
|
|
if ModifiedStructType.endswith(']'):
|
|
Idx = ModifiedStructType.index('[')
|
|
if ArrayMarker != ' ':
|
|
# Auto array size
|
|
OldTextBody.append('')
|
|
ArrayVarName = VariableName
|
|
if int(ArrayMarker) == 1000:
|
|
Count = 1
|
|
else:
|
|
Count = int(ArrayMarker) + 1000
|
|
else:
|
|
if Count < 1000:
|
|
Count += 1
|
|
|
|
VariableTemp = ArrayVarName + '[%d]' % (
|
|
Count if Count < 1000 else Count - 1000)
|
|
OldTextBody[-1] = self.CreateField(
|
|
None, VariableTemp, 0, Offset,
|
|
ModifiedStructType[:Idx], '',
|
|
'Structure Array', '')
|
|
else:
|
|
ArrayVarName = ''
|
|
OldTextBody.append(self.CreateField(
|
|
None, VariableName, 0, Offset,
|
|
ModifiedStructType, '', '', ''))
|
|
|
|
if IncludeLine:
|
|
StructBody.append(Line)
|
|
else:
|
|
OldTextBody.append(Line)
|
|
|
|
if Match and Match.group(4) == 'END':
|
|
if Level == 0:
|
|
if (StructType != Match.group(2)) or \
|
|
(VariableName != Match.group(3)):
|
|
print("Unmatched struct name '%s' and '%s' !" %
|
|
(StructName, Match.group(2)))
|
|
else:
|
|
if IsCommonStruct:
|
|
Suffix = ' /* _COMMON_STRUCT_END_ */'
|
|
else:
|
|
Suffix = ''
|
|
Line = '} %s;%s\n\n\n' % (StructName, Suffix)
|
|
StructBody.append(Line)
|
|
if (Line not in NewTextBody) and \
|
|
(Line not in OldTextBody):
|
|
NewTextBody.extend(StructBody)
|
|
IncludeLine = False
|
|
IsCommonStruct = False
|
|
|
|
if not IncludeEmbedOnly:
|
|
NewTextBody.extend(OldTextBody)
|
|
|
|
if EmbedFound:
|
|
NewTextBody = self.PostProcessBody(NewTextBody, False)
|
|
|
|
NewTextBody = IncTextBody + NewTextBody
|
|
return NewTextBody
|
|
|
|
def WriteHeaderFile(self, TxtBody, FileName, Type='h'):
|
|
FileNameDef = os.path.basename(FileName).replace('.', '_')
|
|
FileNameDef = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', FileNameDef)
|
|
FileNameDef = re.sub('([a-z0-9])([A-Z])', r'\1_\2',
|
|
FileNameDef).upper()
|
|
|
|
Lines = []
|
|
Lines.append("%s\n" % GetCopyrightHeader(Type))
|
|
Lines.append("#ifndef __%s__\n" % FileNameDef)
|
|
Lines.append("#define __%s__\n\n" % FileNameDef)
|
|
if Type == 'h':
|
|
Lines.append("#pragma pack(1)\n\n")
|
|
Lines.extend(TxtBody)
|
|
if Type == 'h':
|
|
Lines.append("#pragma pack()\n\n")
|
|
Lines.append("#endif\n")
|
|
|
|
# Don't rewrite if the contents are the same
|
|
Create = True
|
|
if os.path.exists(FileName):
|
|
HdrFile = open(FileName, "r")
|
|
OrgTxt = HdrFile.read()
|
|
HdrFile.close()
|
|
|
|
NewTxt = ''.join(Lines)
|
|
if OrgTxt == NewTxt:
|
|
Create = False
|
|
|
|
if Create:
|
|
HdrFile = open(FileName, "w")
|
|
HdrFile.write(''.join(Lines))
|
|
HdrFile.close()
|
|
|
|
def CreateHeaderFile(self, HdrFileName, ComHdrFileName=''):
|
|
LastStruct = ''
|
|
SpaceIdx = 0
|
|
Offset = 0
|
|
FieldIdx = 0
|
|
LastFieldIdx = 0
|
|
ResvOffset = 0
|
|
ResvIdx = 0
|
|
TxtBody = []
|
|
LineBuffer = []
|
|
CfgTags = []
|
|
LastVisible = True
|
|
|
|
TxtBody.append("typedef struct {\n")
|
|
for Item in self._CfgItemList:
|
|
# Search for CFGDATA tags
|
|
Embed = Item["embed"].upper()
|
|
if Embed.endswith(':START'):
|
|
Match = re.match(r'(\w+)_CFG_DATA:TAG_([0-9A-F]+):START',
|
|
Embed)
|
|
if Match:
|
|
TagName = Match.group(1)
|
|
TagId = int(Match.group(2), 16)
|
|
CfgTags.append((TagId, TagName))
|
|
|
|
# Only process visible items
|
|
NextVisible = LastVisible
|
|
|
|
if LastVisible and (Item['header'] == 'OFF'):
|
|
NextVisible = False
|
|
ResvOffset = Item['offset']
|
|
elif (not LastVisible) and Item['header'] == 'ON':
|
|
NextVisible = True
|
|
Name = "ReservedUpdSpace%d" % ResvIdx
|
|
ResvIdx = ResvIdx + 1
|
|
TxtBody.append(self.CreateField(
|
|
Item, Name, Item["offset"] - ResvOffset,
|
|
ResvOffset, '', '', '', ''))
|
|
FieldIdx += 1
|
|
|
|
if Offset < Item["offset"]:
|
|
if LastVisible:
|
|
Name = "UnusedUpdSpace%d" % SpaceIdx
|
|
LineBuffer.append(self.CreateField
|
|
(Item, Name, Item["offset"] -
|
|
Offset, Offset, '', '', '', ''))
|
|
FieldIdx += 1
|
|
SpaceIdx = SpaceIdx + 1
|
|
Offset = Item["offset"]
|
|
|
|
LastVisible = NextVisible
|
|
|
|
Offset = Offset + Item["length"]
|
|
if LastVisible:
|
|
for Each in LineBuffer:
|
|
TxtBody.append(Each)
|
|
LineBuffer = []
|
|
Embed = Item["embed"].upper()
|
|
if Embed.endswith(':START') or Embed.endswith(':END'):
|
|
# EMBED_STRUCT: StructName : \
|
|
# ItemName : VariableName : START|END
|
|
Name, ArrayNum = self.GetStructArrayInfo(Item["struct"])
|
|
Remaining = Item["embed"]
|
|
if (LastFieldIdx + 1 == FieldIdx) and (LastStruct == Name):
|
|
ArrayMarker = ' '
|
|
else:
|
|
ArrayMarker = '%d' % ArrayNum
|
|
LastFieldIdx = FieldIdx
|
|
LastStruct = Name
|
|
Marker = '/* EMBED_STRUCT:%s:%s%s*/ ' % (Name, Remaining,
|
|
ArrayMarker)
|
|
# if Embed.endswith(':START') and Comment != '':
|
|
# Marker = '/* COMMENT:%s */ \n' % Item["comment"] + Marker
|
|
else:
|
|
if Embed == '':
|
|
Marker = ''
|
|
else:
|
|
self.Error = "Invalid embedded structure \
|
|
format '%s'!\n" % Item["embed"]
|
|
return 4
|
|
|
|
# Generate bit fields for structure
|
|
if len(Item['subreg']) > 0 and Item["struct"]:
|
|
StructType = Item["struct"]
|
|
StructName, ArrayNum = self.GetStructArrayInfo(StructType)
|
|
if (LastFieldIdx + 1 == FieldIdx) and \
|
|
(LastStruct == Item["struct"]):
|
|
ArrayMarker = ' '
|
|
else:
|
|
ArrayMarker = '%d' % ArrayNum
|
|
TxtBody.append('/* EMBED_STRUCT:%s:%s:%s:START%s*/\n' %
|
|
(StructName, StructType, Item["cname"],
|
|
ArrayMarker))
|
|
for SubItem in Item['subreg']:
|
|
Name = SubItem["cname"]
|
|
if Name.startswith(Item["cname"]):
|
|
Name = Name[len(Item["cname"]) + 1:]
|
|
Line = self.CreateField(
|
|
SubItem, Name, SubItem["bitunit"],
|
|
SubItem["offset"], SubItem['struct'],
|
|
SubItem['name'], SubItem['help'],
|
|
SubItem['option'], SubItem['bitlength'])
|
|
TxtBody.append(Line)
|
|
TxtBody.append('/* EMBED_STRUCT:%s:%s:%s:END%s*/\n' %
|
|
(StructName, StructType, Item["cname"],
|
|
ArrayMarker))
|
|
LastFieldIdx = FieldIdx
|
|
LastStruct = Item["struct"]
|
|
FieldIdx += 1
|
|
else:
|
|
FieldIdx += 1
|
|
Line = Marker + self.CreateField(
|
|
Item, Item["cname"], Item["length"], Item["offset"],
|
|
Item['struct'], Item['name'], Item['help'],
|
|
Item['option'])
|
|
TxtBody.append(Line)
|
|
|
|
TxtBody.append("}\n\n")
|
|
|
|
# Handle the embedded data structure
|
|
TxtBody = self.PostProcessBody(TxtBody)
|
|
ComBody, TxtBody = self.SplitTextBody(TxtBody)
|
|
|
|
# Prepare TAG defines
|
|
PltTagDefTxt = ['\n']
|
|
ComTagDefTxt = ['\n']
|
|
for TagId, TagName in sorted(CfgTags):
|
|
TagLine = '#define %-30s 0x%03X\n' % ('CDATA_%s_TAG' %
|
|
TagName, TagId)
|
|
if TagId < self._MinCfgTagId:
|
|
# TAG ID < 0x100, it is a generic TAG
|
|
ComTagDefTxt.append(TagLine)
|
|
else:
|
|
PltTagDefTxt.append(TagLine)
|
|
PltTagDefTxt.append('\n\n')
|
|
ComTagDefTxt.append('\n\n')
|
|
|
|
# Write file back
|
|
self.WriteHeaderFile(PltTagDefTxt + TxtBody, HdrFileName)
|
|
if ComHdrFileName:
|
|
self.WriteHeaderFile(ComTagDefTxt + ComBody, ComHdrFileName)
|
|
|
|
return 0
|
|
|
|
def UpdateConfigItemValue(self, Item, ValueStr):
|
|
IsArray = True if Item['value'].startswith('{') else False
|
|
IsString = True if Item['value'].startswith("'") else False
|
|
Bytes = self.ValueToByteArray(ValueStr, Item['length'])
|
|
if IsString:
|
|
NewValue = "'%s'" % Bytes.decode("utf-8")
|
|
elif IsArray:
|
|
NewValue = Bytes2Str(Bytes)
|
|
else:
|
|
Fmt = '0x%X' if Item['value'].startswith('0x') else '%d'
|
|
NewValue = Fmt % Bytes2Val(Bytes)
|
|
Item['value'] = NewValue
|
|
|
|
def LoadDefaultFromBinaryArray(self, BinDat, IgnoreFind=False):
|
|
FindOff = 0
|
|
StartOff = 0
|
|
for Item in self._CfgItemList:
|
|
if Item['length'] == 0:
|
|
continue
|
|
if not IgnoreFind and Item['find']:
|
|
FindBin = Item['find'].encode()
|
|
Offset = BinDat.find(FindBin)
|
|
if Offset >= 0:
|
|
TestOff = BinDat[Offset+len(FindBin):].find(FindBin)
|
|
if TestOff >= 0:
|
|
raise Exception('Multiple match found for "%s" !' %
|
|
Item['find'])
|
|
FindOff = Offset + len(FindBin)
|
|
StartOff = Item['offset']
|
|
else:
|
|
raise Exception('Could not find "%s" !' % Item['find'])
|
|
if Item['offset'] + Item['length'] > len(BinDat):
|
|
raise Exception('Mismatching format between DSC \
|
|
and BIN files !')
|
|
Offset = FindOff + (Item['offset'] - StartOff)
|
|
ValStr = Bytes2Str(BinDat[Offset: Offset + Item['length']])
|
|
self.UpdateConfigItemValue(Item, ValStr)
|
|
|
|
self.UpdateDefaultValue()
|
|
|
|
def PatchBinaryArray(self, BinDat):
|
|
FileOff = 0
|
|
Offset = 0
|
|
FindOff = 0
|
|
|
|
PatchList = []
|
|
CfgBin = bytearray()
|
|
for Item in self._CfgItemList:
|
|
if Item['length'] == 0:
|
|
continue
|
|
|
|
if Item['find']:
|
|
if len(CfgBin) > 0:
|
|
PatchList.append((FileOff, CfgBin))
|
|
FindBin = Item['find'].encode()
|
|
FileOff = BinDat.find(FindBin)
|
|
if FileOff < 0:
|
|
raise Exception('Could not find "%s" !' % Item['find'])
|
|
else:
|
|
TestOff = BinDat[FileOff+len(FindBin):].find(FindBin)
|
|
if TestOff >= 0:
|
|
raise Exception('Multiple match found for "%s" !' %
|
|
Item['find'])
|
|
FileOff += len(FindBin)
|
|
Offset = Item['offset']
|
|
FindOff = Offset
|
|
CfgBin = bytearray()
|
|
|
|
if Item['offset'] > Offset:
|
|
Gap = Item['offset'] - Offset
|
|
CfgBin.extend(b'\x00' * Gap)
|
|
|
|
if Item['type'] == 'Reserved' and Item['option'] == '$SKIP':
|
|
# keep old data
|
|
NewOff = FileOff + (Offset - FindOff)
|
|
FileData = bytearray(BinDat[NewOff: NewOff + Item['length']])
|
|
CfgBin.extend(FileData)
|
|
else:
|
|
CfgBin.extend(self.ValueToByteArray(Item['value'],
|
|
Item['length']))
|
|
Offset = Item['offset'] + Item['length']
|
|
|
|
if len(CfgBin) > 0:
|
|
PatchList.append((FileOff, CfgBin))
|
|
|
|
for FileOff, CfgBin in PatchList:
|
|
Length = len(CfgBin)
|
|
if FileOff + Length < len(BinDat):
|
|
BinDat[FileOff:FileOff+Length] = CfgBin[:]
|
|
|
|
return BinDat
|
|
|
|
def GenerateBinaryArray(self):
|
|
Offset = 0
|
|
BinDat = bytearray()
|
|
for Item in self._CfgItemList:
|
|
if Item['offset'] > Offset:
|
|
Gap = Item['offset'] - Offset
|
|
BinDat.extend(b'\x00' * Gap)
|
|
BinDat.extend(self.ValueToByteArray(Item['value'], Item['length']))
|
|
Offset = Item['offset'] + Item['length']
|
|
return BinDat
|
|
|
|
def GenerateBinary(self, BinFileName):
|
|
BinFile = open(BinFileName, "wb")
|
|
BinFile.write(self.GenerateBinaryArray())
|
|
BinFile.close()
|
|
return 0
|
|
|
|
def GenerateDataIncFile(self, DatIncFileName, BinFile=None):
|
|
# Put a prefix GUID before CFGDATA so that it can be located later on
|
|
Prefix = b'\xa7\xbd\x7f\x73\x20\x1e\x46\xd6\xbe\x8f\
|
|
x64\x12\x05\x8d\x0a\xa8'
|
|
if BinFile:
|
|
Fin = open(BinFile, 'rb')
|
|
BinDat = Prefix + bytearray(Fin.read())
|
|
Fin.close()
|
|
else:
|
|
BinDat = Prefix + self.GenerateBinaryArray()
|
|
|
|
FileName = os.path.basename(DatIncFileName).upper()
|
|
FileName = FileName.replace('.', '_')
|
|
|
|
TxtLines = []
|
|
|
|
TxtLines.append("UINT8 mConfigDataBlob[%d] = {\n" % len(BinDat))
|
|
Count = 0
|
|
Line = [' ']
|
|
for Each in BinDat:
|
|
Line.append('0x%02X, ' % Each)
|
|
Count = Count + 1
|
|
if (Count & 0x0F) == 0:
|
|
Line.append('\n')
|
|
TxtLines.append(''.join(Line))
|
|
Line = [' ']
|
|
if len(Line) > 1:
|
|
TxtLines.append(''.join(Line) + '\n')
|
|
|
|
TxtLines.append("};\n\n")
|
|
|
|
self.WriteHeaderFile(TxtLines, DatIncFileName, 'inc')
|
|
|
|
return 0
|
|
|
|
def CheckCfgData(self):
|
|
# Check if CfgData contains any duplicated name
|
|
def AddItem(Item, ChkList):
|
|
Name = Item['cname']
|
|
if Name in ChkList:
|
|
return Item
|
|
if Name not in ['Dummy', 'Reserved', 'CfgHeader', 'CondValue']:
|
|
ChkList.append(Name)
|
|
return None
|
|
|
|
Duplicate = None
|
|
ChkList = []
|
|
for Item in self._CfgItemList:
|
|
Duplicate = AddItem(Item, ChkList)
|
|
if not Duplicate:
|
|
for SubItem in Item['subreg']:
|
|
Duplicate = AddItem(SubItem, ChkList)
|
|
if Duplicate:
|
|
break
|
|
if Duplicate:
|
|
break
|
|
if Duplicate:
|
|
self.Error = "Duplicated CFGDATA '%s' found !\n" % \
|
|
Duplicate['cname']
|
|
return -1
|
|
return 0
|
|
|
|
def PrintData(self):
|
|
for Item in self._CfgItemList:
|
|
if not Item['length']:
|
|
continue
|
|
print("%-10s @Offset:0x%04X Len:%3d Val:%s" %
|
|
(Item['cname'], Item['offset'], Item['length'],
|
|
Item['value']))
|
|
for SubItem in Item['subreg']:
|
|
print(" %-20s BitOff:0x%04X BitLen:%-3d Val:%s" %
|
|
(SubItem['cname'], SubItem['bitoffset'],
|
|
SubItem['bitlength'], SubItem['value']))
|
|
|
|
def FormatArrayValue(self, Input, Length):
|
|
Dat = self.ValueToByteArray(Input, Length)
|
|
return ','.join('0x%02X' % Each for Each in Dat)
|
|
|
|
def GetItemOptionList(self, Item):
|
|
TmpList = []
|
|
if Item['type'] == "Combo":
|
|
if not Item['option'] in self._BuidinOption:
|
|
OptList = Item['option'].split(',')
|
|
for Option in OptList:
|
|
Option = Option.strip()
|
|
try:
|
|
(OpVal, OpStr) = Option.split(':')
|
|
except Exception:
|
|
raise Exception("Invalide option format '%s' !" %
|
|
Option)
|
|
TmpList.append((OpVal, OpStr))
|
|
return TmpList
|
|
|
|
def WriteBsfStruct(self, BsfFd, Item):
|
|
if Item['type'] == "None":
|
|
Space = "gPlatformFspPkgTokenSpaceGuid"
|
|
else:
|
|
Space = Item['space']
|
|
Line = " $%s_%s" % (Space, Item['cname'])
|
|
Match = re.match("\\s*(\\{.+\\})\\s*", Item['value'])
|
|
if Match:
|
|
DefaultValue = self.FormatArrayValue(Match.group(1).strip(),
|
|
Item['length'])
|
|
else:
|
|
DefaultValue = Item['value'].strip()
|
|
if 'bitlength' in Item:
|
|
if Item['bitlength']:
|
|
BsfFd.write(" %s%s%4d bits $_DEFAULT_ = %s\n" %
|
|
(Line, ' ' * (64 - len(Line)), Item['bitlength'],
|
|
DefaultValue))
|
|
else:
|
|
if Item['length']:
|
|
BsfFd.write(" %s%s%4d bytes $_DEFAULT_ = %s\n" %
|
|
(Line, ' ' * (64 - len(Line)), Item['length'],
|
|
DefaultValue))
|
|
|
|
return self.GetItemOptionList(Item)
|
|
|
|
def GetBsfOption(self, OptionName):
|
|
if OptionName in self._CfgOptsDict:
|
|
return self._CfgOptsDict[OptionName]
|
|
else:
|
|
return OptionName
|
|
|
|
def WriteBsfOption(self, BsfFd, Item):
|
|
PcdName = Item['space'] + '_' + Item['cname']
|
|
WriteHelp = 0
|
|
BsfLines = []
|
|
if Item['type'] == "Combo":
|
|
if Item['option'] in self._BuidinOption:
|
|
Options = self._BuidinOption[Item['option']]
|
|
else:
|
|
Options = self.GetBsfOption(PcdName)
|
|
BsfLines.append(' %s $%s, "%s", &%s,\n' % (
|
|
Item['type'], PcdName, Item['name'], Options))
|
|
WriteHelp = 1
|
|
elif Item['type'].startswith("EditNum"):
|
|
Match = re.match("EditNum\\s*,\\s*(HEX|DEC)\\s*,\\s*\\(\
|
|
(\\d+|0x[0-9A-Fa-f]+)\\s*,\\s*(\\d+|0x[0-9A-Fa-f]+)\\)", Item['type'])
|
|
if Match:
|
|
BsfLines.append(' EditNum $%s, "%s", %s,\n' % (
|
|
PcdName, Item['name'], Match.group(1)))
|
|
WriteHelp = 2
|
|
elif Item['type'].startswith("EditText"):
|
|
BsfLines.append(' %s $%s, "%s",\n' % (Item['type'], PcdName,
|
|
Item['name']))
|
|
WriteHelp = 1
|
|
elif Item['type'] == "Table":
|
|
Columns = Item['option'].split(',')
|
|
if len(Columns) != 0:
|
|
BsfLines.append(' %s $%s "%s",' % (Item['type'], PcdName,
|
|
Item['name']))
|
|
for Col in Columns:
|
|
Fmt = Col.split(':')
|
|
if len(Fmt) != 3:
|
|
raise Exception("Column format '%s' is invalid !" %
|
|
Fmt)
|
|
try:
|
|
Dtype = int(Fmt[1].strip())
|
|
except Exception:
|
|
raise Exception("Column size '%s' is invalid !" %
|
|
Fmt[1])
|
|
BsfLines.append('\n Column "%s", %d bytes, %s' %
|
|
(Fmt[0].strip(), Dtype, Fmt[2].strip()))
|
|
BsfLines.append(',\n')
|
|
WriteHelp = 1
|
|
|
|
if WriteHelp > 0:
|
|
HelpLines = Item['help'].split('\\n\\r')
|
|
FirstLine = True
|
|
for HelpLine in HelpLines:
|
|
if FirstLine:
|
|
FirstLine = False
|
|
BsfLines.append(' Help "%s"\n' % (HelpLine))
|
|
else:
|
|
BsfLines.append(' "%s"\n' % (HelpLine))
|
|
if WriteHelp == 2:
|
|
BsfLines.append(' "Valid range: %s ~ %s"\n' %
|
|
(Match.group(2), Match.group(3)))
|
|
|
|
if len(Item['condition']) > 4:
|
|
CondList = Item['condition'].split(',')
|
|
Idx = 0
|
|
for Cond in CondList:
|
|
Cond = Cond.strip()
|
|
if Cond.startswith('#'):
|
|
BsfLines.insert(Idx, Cond + '\n')
|
|
Idx += 1
|
|
elif Cond.startswith('@#'):
|
|
BsfLines.append(Cond[1:] + '\n')
|
|
|
|
for Line in BsfLines:
|
|
BsfFd.write(Line)
|
|
|
|
def WriteBsfPages(self, PageTree, BsfFd):
|
|
BsfFd.write('\n')
|
|
Key = next(iter(PageTree))
|
|
for Page in PageTree[Key]:
|
|
PageName = next(iter(Page))
|
|
BsfFd.write('Page "%s"\n' % self._CfgPageDict[PageName])
|
|
if len(PageTree[Key]):
|
|
self.WriteBsfPages(Page, BsfFd)
|
|
|
|
BsfItems = []
|
|
for Item in self._CfgItemList:
|
|
if Item['name'] != '':
|
|
if Item['page'] != PageName:
|
|
continue
|
|
if len(Item['subreg']) > 0:
|
|
for SubItem in Item['subreg']:
|
|
if SubItem['name'] != '':
|
|
BsfItems.append(SubItem)
|
|
else:
|
|
BsfItems.append(Item)
|
|
|
|
BsfItems.sort(key=lambda x: x['order'])
|
|
|
|
for Item in BsfItems:
|
|
self.WriteBsfOption(BsfFd, Item)
|
|
BsfFd.write("EndPage\n\n")
|
|
|
|
def GenerateBsfFile(self, BsfFile):
|
|
|
|
if BsfFile == '':
|
|
self.Error = "BSF output file '%s' is invalid" % BsfFile
|
|
return 1
|
|
|
|
Error = 0
|
|
OptionDict = {}
|
|
BsfFd = open(BsfFile, "w")
|
|
BsfFd.write("%s\n" % GetCopyrightHeader('bsf'))
|
|
BsfFd.write("%s\n" % self._GlobalDataDef)
|
|
BsfFd.write("StructDef\n")
|
|
NextOffset = -1
|
|
for Item in self._CfgItemList:
|
|
if Item['find'] != '':
|
|
BsfFd.write('\n Find "%s"\n' % Item['find'])
|
|
NextOffset = Item['offset'] + Item['length']
|
|
if Item['name'] != '':
|
|
if NextOffset != Item['offset']:
|
|
BsfFd.write(" Skip %d bytes\n" %
|
|
(Item['offset'] - NextOffset))
|
|
if len(Item['subreg']) > 0:
|
|
NextOffset = Item['offset']
|
|
BitsOffset = NextOffset * 8
|
|
for SubItem in Item['subreg']:
|
|
BitsOffset += SubItem['bitlength']
|
|
if SubItem['name'] == '':
|
|
if 'bitlength' in SubItem:
|
|
BsfFd.write(" Skip %d bits\n" %
|
|
(SubItem['bitlength']))
|
|
else:
|
|
BsfFd.write(" Skip %d bytes\n" %
|
|
(SubItem['length']))
|
|
else:
|
|
Options = self.WriteBsfStruct(BsfFd, SubItem)
|
|
if len(Options) > 0:
|
|
OptionDict[SubItem
|
|
['space']+'_'+SubItem
|
|
['cname']] = Options
|
|
|
|
NextBitsOffset = (Item['offset'] + Item['length']) * 8
|
|
if NextBitsOffset > BitsOffset:
|
|
BitsGap = NextBitsOffset - BitsOffset
|
|
BitsRemain = BitsGap % 8
|
|
if BitsRemain:
|
|
BsfFd.write(" Skip %d bits\n" % BitsRemain)
|
|
BitsGap -= BitsRemain
|
|
BytesRemain = BitsGap // 8
|
|
if BytesRemain:
|
|
BsfFd.write(" Skip %d bytes\n" %
|
|
BytesRemain)
|
|
NextOffset = Item['offset'] + Item['length']
|
|
else:
|
|
NextOffset = Item['offset'] + Item['length']
|
|
Options = self.WriteBsfStruct(BsfFd, Item)
|
|
if len(Options) > 0:
|
|
OptionDict[Item['space']+'_'+Item['cname']] = Options
|
|
BsfFd.write("\nEndStruct\n\n")
|
|
|
|
BsfFd.write("%s" % self._BuidinOptionTxt)
|
|
|
|
NameList = []
|
|
OptionList = []
|
|
for Each in sorted(OptionDict):
|
|
if OptionDict[Each] not in OptionList:
|
|
NameList.append(Each)
|
|
OptionList.append(OptionDict[Each])
|
|
BsfFd.write("List &%s\n" % Each)
|
|
for Item in OptionDict[Each]:
|
|
BsfFd.write(' Selection %s , "%s"\n' %
|
|
(self.EvaluateExpress(Item[0]), Item[1]))
|
|
BsfFd.write("EndList\n\n")
|
|
else:
|
|
# Item has idential options as other item
|
|
# Try to reuse the previous options instead
|
|
Idx = OptionList.index(OptionDict[Each])
|
|
self._CfgOptsDict[Each] = NameList[Idx]
|
|
|
|
BsfFd.write("BeginInfoBlock\n")
|
|
BsfFd.write(' PPVer "%s"\n' % (self._CfgBlkDict['ver']))
|
|
BsfFd.write(' Description "%s"\n' % (self._CfgBlkDict['name']))
|
|
BsfFd.write("EndInfoBlock\n\n")
|
|
|
|
self.WriteBsfPages(self._CfgPageTree, BsfFd)
|
|
|
|
BsfFd.close()
|
|
return Error
|
|
|
|
def WriteDeltaLine(self, OutLines, Name, ValStr, IsArray):
|
|
if IsArray:
|
|
Output = '%s | { %s }' % (Name, ValStr)
|
|
else:
|
|
Output = '%s | 0x%X' % (Name, Array2Val(ValStr))
|
|
OutLines.append(Output)
|
|
|
|
def WriteDeltaFile(self, OutFile, PlatformId, OutLines):
|
|
DltFd = open(OutFile, "w")
|
|
DltFd.write("%s\n" % GetCopyrightHeader('dlt', True))
|
|
if PlatformId is not None:
|
|
DltFd.write('#\n')
|
|
DltFd.write('# Delta configuration values \
|
|
for platform ID 0x%04X\n' % PlatformId)
|
|
DltFd.write('#\n\n')
|
|
for Line in OutLines:
|
|
DltFd.write('%s\n' % Line)
|
|
DltFd.close()
|
|
|
|
def GenerateDeltaFile(self, OutFile, AbsfFile):
|
|
# Parse ABSF Build in dict
|
|
if not os.path.exists(AbsfFile):
|
|
Lines = []
|
|
else:
|
|
with open(AbsfFile) as Fin:
|
|
Lines = Fin.readlines()
|
|
|
|
AbsfBuiltValDict = {}
|
|
Process = False
|
|
for Line in Lines:
|
|
Line = Line.strip()
|
|
if Line.startswith('StructDef'):
|
|
Process = True
|
|
if Line.startswith('EndStruct'):
|
|
break
|
|
if not Process:
|
|
continue
|
|
Match = re.match('\\s*\\$gCfgData_(\\w+)\\s+\
|
|
(\\d+)\\s+(bits|bytes)\\s+\\$_AS_BUILT_\\s+=\\s+(.+)\\$', Line)
|
|
if Match:
|
|
if Match.group(1) not in AbsfBuiltValDict:
|
|
AbsfBuiltValDict[Match.group(1)] = Match.group(4).strip()
|
|
else:
|
|
raise Exception("Duplicated configuration \
|
|
name '%s' found !", Match.group(1))
|
|
|
|
# Match config item in DSC
|
|
PlatformId = None
|
|
OutLines = []
|
|
TagName = ''
|
|
Level = 0
|
|
for Item in self._CfgItemList:
|
|
Name = None
|
|
if Level == 0 and Item['embed'].endswith(':START'):
|
|
TagName = Item['embed'].split(':')[0]
|
|
Level += 1
|
|
if Item['cname'] in AbsfBuiltValDict:
|
|
ValStr = AbsfBuiltValDict[Item['cname']]
|
|
Name = '%s.%s' % (TagName, Item['cname'])
|
|
if not Item['subreg'] and Item['value'].startswith('{'):
|
|
Value = Array2Val(Item['value'])
|
|
IsArray = True
|
|
else:
|
|
Value = int(Item['value'], 16)
|
|
IsArray = False
|
|
AbsfVal = Array2Val(ValStr)
|
|
if AbsfVal != Value:
|
|
if 'PLATFORMID_CFG_DATA.PlatformId' == Name:
|
|
PlatformId = AbsfVal
|
|
self.WriteDeltaLine(OutLines, Name, ValStr, IsArray)
|
|
else:
|
|
if 'PLATFORMID_CFG_DATA.PlatformId' == Name:
|
|
raise Exception("'PlatformId' has the \
|
|
same value as DSC default !")
|
|
|
|
if Item['subreg']:
|
|
for SubItem in Item['subreg']:
|
|
if SubItem['cname'] in AbsfBuiltValDict:
|
|
ValStr = AbsfBuiltValDict[SubItem['cname']]
|
|
if Array2Val(ValStr) == int(SubItem['value'], 16):
|
|
continue
|
|
Name = '%s.%s.%s' % (TagName, Item['cname'],
|
|
SubItem['cname'])
|
|
self.WriteDeltaLine(OutLines, Name, ValStr, False)
|
|
|
|
if Item['embed'].endswith(':END'):
|
|
Level -= 1
|
|
|
|
if PlatformId is None and Lines:
|
|
raise Exception("'PlatformId' configuration \
|
|
is missing in ABSF file!")
|
|
else:
|
|
PlatformId = 0
|
|
|
|
self.WriteDeltaFile(OutFile, PlatformId, Lines)
|
|
|
|
return 0
|
|
|
|
def GenerateDscFile(self, OutFile):
|
|
DscFd = open(OutFile, "w")
|
|
for Line in self._DscLines:
|
|
DscFd.write(Line + '\n')
|
|
DscFd.close()
|
|
return 0
|
|
|
|
|
|
def Usage():
|
|
print('\n'.join([
|
|
"GenCfgData Version 0.01",
|
|
"Usage:",
|
|
" GenCfgData GENINC BinFile \
|
|
IncOutFile [-D Macros]",
|
|
" GenCfgData GENPKL DscFile \
|
|
PklOutFile [-D Macros]",
|
|
" GenCfgData GENINC DscFile[;DltFile] \
|
|
IncOutFile [-D Macros]",
|
|
" GenCfgData GENBIN DscFile[;DltFile] \
|
|
BinOutFile [-D Macros]",
|
|
" GenCfgData GENBSF DscFile[;DltFile] \
|
|
BsfOutFile [-D Macros]",
|
|
" GenCfgData GENDLT DscFile[;AbsfFile] \
|
|
DltOutFile [-D Macros]",
|
|
" GenCfgData GENDSC DscFile \
|
|
DscOutFile [-D Macros]",
|
|
" GenCfgData GENHDR DscFile[;DltFile] \
|
|
HdrOutFile[;ComHdrOutFile] [-D Macros]"
|
|
]))
|
|
|
|
|
|
def Main():
|
|
#
|
|
# Parse the options and args
|
|
#
|
|
argc = len(sys.argv)
|
|
if argc < 4:
|
|
Usage()
|
|
return 1
|
|
|
|
GenCfgData = CGenCfgData()
|
|
Command = sys.argv[1].upper()
|
|
OutFile = sys.argv[3]
|
|
|
|
if argc > 5 and GenCfgData.ParseMacros(sys.argv[4:]) != 0:
|
|
raise Exception("ERROR: Macro parsing failed !")
|
|
|
|
FileList = sys.argv[2].split(';')
|
|
if len(FileList) == 2:
|
|
DscFile = FileList[0]
|
|
DltFile = FileList[1]
|
|
elif len(FileList) == 1:
|
|
DscFile = FileList[0]
|
|
DltFile = ''
|
|
else:
|
|
raise Exception("ERROR: Invalid parameter '%s' !" % sys.argv[2])
|
|
|
|
if Command == "GENDLT" and DscFile.endswith('.dlt'):
|
|
# It needs to expand an existing DLT file
|
|
DltFile = DscFile
|
|
Lines = CGenCfgData.ExpandIncludeFiles(DltFile)
|
|
OutTxt = ''.join([x[0] for x in Lines])
|
|
OutFile = open(OutFile, "w")
|
|
OutFile.write(OutTxt)
|
|
OutFile.close()
|
|
return 0
|
|
|
|
if not os.path.exists(DscFile):
|
|
raise Exception("ERROR: Cannot open file '%s' !" % DscFile)
|
|
|
|
CfgBinFile = ''
|
|
if DltFile:
|
|
if not os.path.exists(DltFile):
|
|
raise Exception("ERROR: Cannot open file '%s' !" % DltFile)
|
|
if Command == "GENDLT":
|
|
CfgBinFile = DltFile
|
|
DltFile = ''
|
|
|
|
BinFile = ''
|
|
if (DscFile.lower().endswith('.bin')) and (Command == "GENINC"):
|
|
# It is binary file
|
|
BinFile = DscFile
|
|
DscFile = ''
|
|
|
|
if BinFile:
|
|
if GenCfgData.GenerateDataIncFile(OutFile, BinFile) != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
return 0
|
|
|
|
if DscFile.lower().endswith('.pkl'):
|
|
with open(DscFile, "rb") as PklFile:
|
|
GenCfgData.__dict__ = marshal.load(PklFile)
|
|
else:
|
|
if GenCfgData.ParseDscFile(DscFile) != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
# if GenCfgData.CheckCfgData() != 0:
|
|
# raise Exception(GenCfgData.Error)
|
|
|
|
if GenCfgData.CreateVarDict() != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
if Command == 'GENPKL':
|
|
with open(OutFile, "wb") as PklFile:
|
|
marshal.dump(GenCfgData.__dict__, PklFile)
|
|
return 0
|
|
|
|
if DltFile and Command in ['GENHDR', 'GENBIN', 'GENINC', 'GENBSF']:
|
|
if GenCfgData.OverrideDefaultValue(DltFile) != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
if GenCfgData.UpdateDefaultValue() != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
# GenCfgData.PrintData ()
|
|
|
|
if sys.argv[1] == "GENBIN":
|
|
if GenCfgData.GenerateBinary(OutFile) != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
elif sys.argv[1] == "GENHDR":
|
|
OutFiles = OutFile.split(';')
|
|
BrdOutFile = OutFiles[0].strip()
|
|
if len(OutFiles) > 1:
|
|
ComOutFile = OutFiles[1].strip()
|
|
else:
|
|
ComOutFile = ''
|
|
if GenCfgData.CreateHeaderFile(BrdOutFile, ComOutFile) != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
elif sys.argv[1] == "GENBSF":
|
|
if GenCfgData.GenerateBsfFile(OutFile) != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
elif sys.argv[1] == "GENINC":
|
|
if GenCfgData.GenerateDataIncFile(OutFile) != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
elif sys.argv[1] == "GENDLT":
|
|
if GenCfgData.GenerateDeltaFile(OutFile, CfgBinFile) != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
elif sys.argv[1] == "GENDSC":
|
|
if GenCfgData.GenerateDscFile(OutFile) != 0:
|
|
raise Exception(GenCfgData.Error)
|
|
|
|
else:
|
|
raise Exception("Unsuported command '%s' !" % Command)
|
|
|
|
return 0
|
|
|
|
|
|
if __name__ == '__main__':
|
|
sys.exit(Main())
|