2009-08-25 20:15:38 +04:00
|
|
|
#!/usr/bin/env ruby
|
|
|
|
|
2016-07-03 00:01:04 +03:00
|
|
|
# Creates the data structures needed by Oniguruma to map Unicode codepoints to
|
2009-08-25 20:15:38 +04:00
|
|
|
# property names and POSIX character classes
|
2009-10-08 22:07:08 +04:00
|
|
|
#
|
2010-10-28 15:14:05 +04:00
|
|
|
# To use this, get UnicodeData.txt, Scripts.txt, PropList.txt,
|
2010-11-08 08:32:45 +03:00
|
|
|
# PropertyAliases.txt, PropertyValueAliases.txt, DerivedCoreProperties.txt,
|
2012-02-17 11:42:23 +04:00
|
|
|
# DerivedAge.txt and Blocks.txt from unicode.org.
|
2010-10-28 15:14:05 +04:00
|
|
|
# (http://unicode.org/Public/UNIDATA/) And run following command.
|
|
|
|
# ruby1.9 tool/enc-unicode.rb data_dir > enc/unicode/name2ctype.kwd
|
|
|
|
# You can get source file for gperf. After this, simply make ruby.
|
2009-08-25 20:15:38 +04:00
|
|
|
|
2015-11-20 07:03:10 +03:00
|
|
|
if ARGV[0] == "--header"
|
|
|
|
header = true
|
|
|
|
ARGV.shift
|
|
|
|
end
|
2017-12-02 06:12:51 +03:00
|
|
|
unless ARGV.size == 2
|
|
|
|
abort "Usage: #{$0} data_directory emoji_data_directory"
|
2009-08-25 20:15:38 +04:00
|
|
|
end
|
|
|
|
|
2018-11-27 09:44:02 +03:00
|
|
|
pat = /(?:\A|\/)([.\d]+)\z/
|
|
|
|
$versions = {
|
|
|
|
:Unicode => ARGV[0][pat, 1],
|
|
|
|
:Emoji => ARGV[1][pat, 1],
|
|
|
|
}
|
2016-07-14 19:32:13 +03:00
|
|
|
|
2016-12-10 20:47:04 +03:00
|
|
|
POSIX_NAMES = %w[NEWLINE Alpha Blank Cntrl Digit Graph Lower Print XPosixPunct Space Upper XDigit Word Alnum ASCII Punct]
|
2009-08-25 20:15:38 +04:00
|
|
|
|
|
|
|
def pair_codepoints(codepoints)
|
|
|
|
|
|
|
|
# We have a sorted Array of codepoints that we wish to partition into
|
|
|
|
# ranges such that the start- and endpoints form an inclusive set of
|
|
|
|
# codepoints with property _property_. Note: It is intended that some ranges
|
|
|
|
# will begin with the value with which they end, e.g. 0x0020 -> 0x0020
|
|
|
|
|
2009-10-08 22:07:08 +04:00
|
|
|
codepoints.sort!
|
2009-08-25 20:15:38 +04:00
|
|
|
last_cp = codepoints.first
|
|
|
|
pairs = [[last_cp, nil]]
|
|
|
|
codepoints[1..-1].each do |codepoint|
|
2009-10-08 22:07:08 +04:00
|
|
|
next if last_cp == codepoint
|
2009-08-25 20:15:38 +04:00
|
|
|
|
|
|
|
# If the current codepoint does not follow directly on from the last
|
|
|
|
# codepoint, the last codepoint represents the end of the current range,
|
|
|
|
# and the current codepoint represents the start of the next range.
|
|
|
|
if last_cp.next != codepoint
|
|
|
|
pairs[-1][-1] = last_cp
|
|
|
|
pairs << [codepoint, nil]
|
|
|
|
end
|
|
|
|
last_cp = codepoint
|
|
|
|
end
|
|
|
|
|
|
|
|
# The final pair has as its endpoint the last codepoint for this property
|
|
|
|
pairs[-1][-1] = codepoints.last
|
|
|
|
pairs
|
|
|
|
end
|
|
|
|
|
|
|
|
def parse_unicode_data(file)
|
|
|
|
last_cp = 0
|
2009-10-14 20:51:52 +04:00
|
|
|
data = {'Any' => (0x0000..0x10ffff).to_a, 'Assigned' => [],
|
|
|
|
'ASCII' => (0..0x007F).to_a, 'NEWLINE' => [0x0a], 'Cn' => []}
|
2009-10-08 06:49:11 +04:00
|
|
|
beg_cp = nil
|
2009-08-25 20:15:38 +04:00
|
|
|
IO.foreach(file) do |line|
|
|
|
|
fields = line.split(';')
|
|
|
|
cp = fields[0].to_i(16)
|
|
|
|
|
2009-10-08 06:49:11 +04:00
|
|
|
case fields[1]
|
|
|
|
when /\A<(.*),\s*First>\z/
|
|
|
|
beg_cp = cp
|
|
|
|
next
|
|
|
|
when /\A<(.*),\s*Last>\z/
|
|
|
|
cps = (beg_cp..cp).to_a
|
|
|
|
else
|
|
|
|
beg_cp = cp
|
|
|
|
cps = [cp]
|
|
|
|
end
|
|
|
|
|
2009-08-25 20:15:38 +04:00
|
|
|
# The Cn category represents unassigned characters. These are not listed in
|
|
|
|
# UnicodeData.txt so we must derive them by looking for 'holes' in the range
|
|
|
|
# of listed codepoints. We increment the last codepoint seen and compare it
|
|
|
|
# with the current codepoint. If the current codepoint is less than
|
|
|
|
# last_cp.next we have found a hole, so we add the missing codepoint to the
|
|
|
|
# Cn category.
|
2009-10-08 06:49:11 +04:00
|
|
|
data['Cn'].concat((last_cp.next...beg_cp).to_a)
|
2009-08-25 20:15:38 +04:00
|
|
|
|
2009-10-08 22:07:08 +04:00
|
|
|
# Assigned - Defined in unicode.c; interpreted as every character in the
|
|
|
|
# Unicode range minus the unassigned characters
|
|
|
|
data['Assigned'].concat(cps)
|
|
|
|
|
2009-08-25 20:15:38 +04:00
|
|
|
# The third field denotes the 'General' category, e.g. Lu
|
2009-10-08 06:49:11 +04:00
|
|
|
(data[fields[2]] ||= []).concat(cps)
|
2009-08-25 20:15:38 +04:00
|
|
|
|
|
|
|
# The 'Major' category is the first letter of the 'General' category, e.g.
|
|
|
|
# 'Lu' -> 'L'
|
2009-10-08 06:49:11 +04:00
|
|
|
(data[fields[2][0,1]] ||= []).concat(cps)
|
2009-08-25 20:15:38 +04:00
|
|
|
last_cp = cp
|
|
|
|
end
|
|
|
|
|
|
|
|
# The last Cn codepoint should be 0x10ffff. If it's not, append the missing
|
|
|
|
# codepoints to Cn and C
|
2009-10-08 22:07:08 +04:00
|
|
|
cn_remainder = (last_cp.next..0x10ffff).to_a
|
2009-08-25 20:15:38 +04:00
|
|
|
data['Cn'] += cn_remainder
|
2012-02-17 11:42:23 +04:00
|
|
|
data['C'] += data['Cn']
|
|
|
|
|
|
|
|
# Special case for LC (Cased_Letter). LC = Ll + Lt + Lu
|
|
|
|
data['LC'] = data['Ll'] + data['Lt'] + data['Lu']
|
2009-08-25 20:15:38 +04:00
|
|
|
|
2009-10-08 22:07:08 +04:00
|
|
|
# Define General Category properties
|
2009-10-14 20:51:52 +04:00
|
|
|
gcps = data.keys.sort - POSIX_NAMES
|
2009-10-08 22:07:08 +04:00
|
|
|
|
2009-10-14 20:51:52 +04:00
|
|
|
# Returns General Category Property names and the data
|
|
|
|
[gcps, data]
|
|
|
|
end
|
|
|
|
|
|
|
|
def define_posix_props(data)
|
2009-08-25 20:15:38 +04:00
|
|
|
# We now derive the character classes (POSIX brackets), e.g. [[:alpha:]]
|
|
|
|
#
|
|
|
|
|
2009-10-14 20:51:52 +04:00
|
|
|
data['Alpha'] = data['Alphabetic']
|
|
|
|
data['Upper'] = data['Uppercase']
|
|
|
|
data['Lower'] = data['Lowercase']
|
|
|
|
data['Punct'] = data['Punctuation']
|
2016-12-10 20:47:04 +03:00
|
|
|
data['XPosixPunct'] = data['Punctuation'] + [0x24, 0x2b, 0x3c, 0x3d, 0x3e, 0x5e, 0x60, 0x7c, 0x7e]
|
2009-10-14 20:51:52 +04:00
|
|
|
data['Digit'] = data['Decimal_Number']
|
2009-08-25 20:15:38 +04:00
|
|
|
data['XDigit'] = (0x0030..0x0039).to_a + (0x0041..0x0046).to_a +
|
|
|
|
(0x0061..0x0066).to_a
|
2009-10-14 20:51:52 +04:00
|
|
|
data['Alnum'] = data['Alpha'] + data['Digit']
|
|
|
|
data['Space'] = data['White_Space']
|
2012-02-17 11:42:23 +04:00
|
|
|
data['Blank'] = data['Space_Separator'] + [0x0009]
|
2009-10-14 20:51:52 +04:00
|
|
|
data['Cntrl'] = data['Cc']
|
|
|
|
data['Word'] = data['Alpha'] + data['Mark'] + data['Digit'] + data['Connector_Punctuation']
|
|
|
|
data['Graph'] = data['Any'] - data['Space'] - data['Cntrl'] -
|
|
|
|
data['Surrogate'] - data['Unassigned']
|
2012-02-17 11:42:23 +04:00
|
|
|
data['Print'] = data['Graph'] + data['Space_Separator']
|
2009-08-25 20:15:38 +04:00
|
|
|
end
|
|
|
|
|
2012-02-17 11:42:23 +04:00
|
|
|
def parse_scripts(data, categories)
|
2009-10-13 16:27:00 +04:00
|
|
|
files = [
|
2012-02-18 06:47:41 +04:00
|
|
|
{:fn => 'DerivedCoreProperties.txt', :title => 'Derived Property'},
|
|
|
|
{:fn => 'Scripts.txt', :title => 'Script'},
|
2017-12-01 16:50:13 +03:00
|
|
|
{:fn => 'PropList.txt', :title => 'Binary Property'},
|
2021-07-04 11:11:22 +03:00
|
|
|
{:fn => 'emoji/emoji-data.txt', :title => 'Emoji'}
|
2009-10-13 16:27:00 +04:00
|
|
|
]
|
|
|
|
current = nil
|
2009-10-14 20:51:52 +04:00
|
|
|
cps = []
|
2010-10-29 05:03:21 +04:00
|
|
|
names = {}
|
2009-10-13 16:27:00 +04:00
|
|
|
files.each do |file|
|
2016-07-14 19:32:13 +03:00
|
|
|
data_foreach(file[:fn]) do |line|
|
2017-12-01 16:50:13 +03:00
|
|
|
if /^# Total (?:code points|elements): / =~ line
|
2009-10-14 20:51:52 +04:00
|
|
|
data[current] = cps
|
2012-02-17 11:42:23 +04:00
|
|
|
categories[current] = file[:title]
|
2010-10-29 05:03:21 +04:00
|
|
|
(names[file[:title]] ||= []) << current
|
2009-10-14 20:51:52 +04:00
|
|
|
cps = []
|
2016-11-30 20:29:19 +03:00
|
|
|
elsif /^([0-9a-fA-F]+)(?:\.\.([0-9a-fA-F]+))?\s*;\s*(\w+)/ =~ line
|
2009-10-13 16:27:00 +04:00
|
|
|
current = $3
|
2009-10-14 20:51:52 +04:00
|
|
|
$2 ? cps.concat(($1.to_i(16)..$2.to_i(16)).to_a) : cps.push($1.to_i(16))
|
2009-10-13 16:27:00 +04:00
|
|
|
end
|
2009-08-25 20:15:38 +04:00
|
|
|
end
|
|
|
|
end
|
2010-10-29 05:03:21 +04:00
|
|
|
# All code points not explicitly listed for Script
|
|
|
|
# have the value Unknown (Zzzz).
|
|
|
|
data['Unknown'] = (0..0x10ffff).to_a - data.values_at(*names['Script']).flatten
|
2012-02-17 11:42:23 +04:00
|
|
|
categories['Unknown'] = 'Script'
|
2010-10-29 05:03:21 +04:00
|
|
|
names.values.flatten << 'Unknown'
|
2009-08-25 20:15:38 +04:00
|
|
|
end
|
|
|
|
|
2009-10-14 20:51:52 +04:00
|
|
|
def parse_aliases(data)
|
2009-10-13 16:27:00 +04:00
|
|
|
kv = {}
|
2016-07-14 19:32:13 +03:00
|
|
|
data_foreach('PropertyAliases.txt') do |line|
|
2009-10-13 16:27:00 +04:00
|
|
|
next unless /^(\w+)\s*; (\w+)/ =~ line
|
2009-10-14 20:51:52 +04:00
|
|
|
data[$1] = data[$2]
|
2009-10-13 16:27:00 +04:00
|
|
|
kv[normalize_propname($1)] = normalize_propname($2)
|
|
|
|
end
|
2016-07-14 19:32:13 +03:00
|
|
|
data_foreach('PropertyValueAliases.txt') do |line|
|
2009-10-13 16:27:00 +04:00
|
|
|
next unless /^(sc|gc)\s*; (\w+)\s*; (\w+)(?:\s*; (\w+))?/ =~ line
|
|
|
|
if $1 == 'gc'
|
2009-10-14 20:51:52 +04:00
|
|
|
data[$3] = data[$2]
|
|
|
|
data[$4] = data[$2]
|
2009-10-13 16:27:00 +04:00
|
|
|
kv[normalize_propname($3)] = normalize_propname($2)
|
|
|
|
kv[normalize_propname($4)] = normalize_propname($2) if $4
|
|
|
|
else
|
2009-10-14 20:51:52 +04:00
|
|
|
data[$2] = data[$3]
|
|
|
|
data[$4] = data[$3]
|
2009-10-13 16:27:00 +04:00
|
|
|
kv[normalize_propname($2)] = normalize_propname($3)
|
|
|
|
kv[normalize_propname($4)] = normalize_propname($3) if $4
|
|
|
|
end
|
|
|
|
end
|
|
|
|
kv
|
|
|
|
end
|
|
|
|
|
2010-11-08 08:32:45 +03:00
|
|
|
# According to Unicode6.0.0/ch03.pdf, Section 3.1, "An update version
|
|
|
|
# never involves any additions to the character repertoire." Versions
|
|
|
|
# in DerivedAge.txt should always be /\d+\.\d+/
|
|
|
|
def parse_age(data)
|
|
|
|
current = nil
|
|
|
|
last_constname = nil
|
|
|
|
cps = []
|
|
|
|
ages = []
|
2016-07-14 19:32:13 +03:00
|
|
|
data_foreach('DerivedAge.txt') do |line|
|
2010-11-08 08:32:45 +03:00
|
|
|
if /^# Total code points: / =~ line
|
|
|
|
constname = constantize_agename(current)
|
2013-03-11 07:46:55 +04:00
|
|
|
# each version matches all previous versions
|
2010-11-08 08:32:45 +03:00
|
|
|
cps.concat(data[last_constname]) if last_constname
|
|
|
|
data[constname] = cps
|
|
|
|
make_const(constname, cps, "Derived Age #{current}")
|
|
|
|
ages << current
|
|
|
|
last_constname = constname
|
|
|
|
cps = []
|
2016-11-30 20:29:19 +03:00
|
|
|
elsif /^([0-9a-fA-F]+)(?:\.\.([0-9a-fA-F]+))?\s*;\s*(\d+\.\d+)/ =~ line
|
|
|
|
current = $3
|
|
|
|
$2 ? cps.concat(($1.to_i(16)..$2.to_i(16)).to_a) : cps.push($1.to_i(16))
|
|
|
|
end
|
|
|
|
end
|
|
|
|
ages
|
|
|
|
end
|
|
|
|
|
|
|
|
def parse_GraphemeBreakProperty(data)
|
|
|
|
current = nil
|
|
|
|
cps = []
|
|
|
|
ages = []
|
2017-03-23 18:49:10 +03:00
|
|
|
data_foreach('auxiliary/GraphemeBreakProperty.txt') do |line|
|
2016-11-30 20:29:19 +03:00
|
|
|
if /^# Total code points: / =~ line
|
|
|
|
constname = constantize_Grapheme_Cluster_Break(current)
|
|
|
|
data[constname] = cps
|
|
|
|
make_const(constname, cps, "Grapheme_Cluster_Break=#{current}")
|
|
|
|
ages << current
|
|
|
|
cps = []
|
|
|
|
elsif /^([0-9a-fA-F]+)(?:\.\.([0-9a-fA-F]+))?\s*;\s*(\w+)/ =~ line
|
2010-11-08 08:32:45 +03:00
|
|
|
current = $3
|
|
|
|
$2 ? cps.concat(($1.to_i(16)..$2.to_i(16)).to_a) : cps.push($1.to_i(16))
|
|
|
|
end
|
|
|
|
end
|
|
|
|
ages
|
|
|
|
end
|
|
|
|
|
2012-02-17 11:42:23 +04:00
|
|
|
def parse_block(data)
|
|
|
|
cps = []
|
|
|
|
blocks = []
|
2016-07-14 19:32:13 +03:00
|
|
|
data_foreach('Blocks.txt') do |line|
|
2012-02-17 11:42:23 +04:00
|
|
|
if /^([0-9a-fA-F]+)\.\.([0-9a-fA-F]+);\s*(.*)/ =~ line
|
|
|
|
cps = ($1.to_i(16)..$2.to_i(16)).to_a
|
|
|
|
constname = constantize_blockname($3)
|
|
|
|
data[constname] = cps
|
|
|
|
make_const(constname, cps, "Block")
|
|
|
|
blocks << constname
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# All code points not belonging to any of the named blocks
|
|
|
|
# have the value No_Block.
|
|
|
|
no_block = (0..0x10ffff).to_a - data.values_at(*blocks).flatten
|
|
|
|
constname = constantize_blockname("No_Block")
|
|
|
|
make_const(constname, no_block, "Block")
|
|
|
|
blocks << constname
|
|
|
|
end
|
|
|
|
|
2013-03-01 20:36:37 +04:00
|
|
|
# shim for Ruby 1.8
|
|
|
|
unless {}.respond_to?(:key)
|
|
|
|
class Hash
|
|
|
|
alias key index
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-10-14 20:51:52 +04:00
|
|
|
$const_cache = {}
|
2009-08-25 20:15:38 +04:00
|
|
|
# make_const(property, pairs, name): Prints a 'static const' structure for a
|
|
|
|
# given property, group of paired codepoints, and a human-friendly name for
|
|
|
|
# the group
|
2009-10-14 20:51:52 +04:00
|
|
|
def make_const(prop, data, name)
|
2016-12-10 20:47:04 +03:00
|
|
|
if name.empty?
|
|
|
|
puts "\n/* '#{prop}' */"
|
|
|
|
else
|
|
|
|
puts "\n/* '#{prop}': #{name} */"
|
|
|
|
end
|
2013-03-01 20:36:37 +04:00
|
|
|
if origprop = $const_cache.key(data)
|
2009-10-14 20:51:52 +04:00
|
|
|
puts "#define CR_#{prop} CR_#{origprop}"
|
|
|
|
else
|
|
|
|
$const_cache[prop] = data
|
|
|
|
pairs = pair_codepoints(data)
|
|
|
|
puts "static const OnigCodePoint CR_#{prop}[] = {"
|
|
|
|
# The first element of the constant is the number of pairs of codepoints
|
|
|
|
puts "\t#{pairs.size},"
|
|
|
|
pairs.each do |pair|
|
|
|
|
pair.map! { |c| c == 0 ? '0x0000' : sprintf("%0#6x", c) }
|
|
|
|
puts "\t#{pair.first}, #{pair.last},"
|
|
|
|
end
|
|
|
|
puts "}; /* CR_#{prop} */"
|
2009-08-25 20:15:38 +04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2009-09-11 02:54:01 +04:00
|
|
|
def normalize_propname(name)
|
|
|
|
name = name.downcase
|
2009-10-13 16:27:00 +04:00
|
|
|
name.delete!('- _')
|
2009-09-11 02:54:01 +04:00
|
|
|
name
|
|
|
|
end
|
|
|
|
|
2010-11-08 08:32:45 +03:00
|
|
|
def constantize_agename(name)
|
|
|
|
"Age_#{name.sub(/\./, '_')}"
|
|
|
|
end
|
|
|
|
|
2016-11-30 20:29:19 +03:00
|
|
|
def constantize_Grapheme_Cluster_Break(name)
|
|
|
|
"Grapheme_Cluster_Break_#{name}"
|
|
|
|
end
|
|
|
|
|
2012-02-17 11:42:23 +04:00
|
|
|
def constantize_blockname(name)
|
|
|
|
"In_#{name.gsub(/\W/, '_')}"
|
|
|
|
end
|
|
|
|
|
2009-10-13 16:27:00 +04:00
|
|
|
def get_file(name)
|
2021-07-04 11:11:22 +03:00
|
|
|
File.join(ARGV[name.start_with?("emoji-[stz]") ? 1 : 0], name)
|
2009-10-13 16:27:00 +04:00
|
|
|
end
|
|
|
|
|
2016-07-14 19:32:13 +03:00
|
|
|
def data_foreach(name, &block)
|
|
|
|
fn = get_file(name)
|
|
|
|
warn "Reading #{name}"
|
2016-08-16 16:01:30 +03:00
|
|
|
File.open(fn, 'rb') do |f|
|
2022-09-14 12:03:48 +03:00
|
|
|
if /^emoji/ =~ name
|
|
|
|
line = f.gets("")
|
|
|
|
# Headers till Emoji 13
|
|
|
|
version = line[/^# #{Regexp.quote(File.basename(name))}.*^# Version: ([\d.]+)/m, 1]
|
|
|
|
type = :Emoji
|
|
|
|
else
|
|
|
|
# Headers since Emoji 14 or other Unicode data
|
|
|
|
line = f.gets("\n")
|
|
|
|
type = :Unicode
|
|
|
|
end
|
|
|
|
version ||= line[/^# #{File.basename(name).sub(/\./, '-([\\d.]+)\\.')}/, 1]
|
|
|
|
unless version
|
2018-11-27 09:44:02 +03:00
|
|
|
raise ArgumentError, <<-ERROR
|
|
|
|
#{name}: no #{type} version
|
|
|
|
#{line.gsub(/^/, '> ')}
|
|
|
|
ERROR
|
|
|
|
end
|
|
|
|
if !(v = $versions[type])
|
|
|
|
$versions[type] = version
|
2022-09-14 12:03:48 +03:00
|
|
|
elsif v != version and "#{v}.0" != version
|
2018-11-27 09:44:02 +03:00
|
|
|
raise ArgumentError, <<-ERROR
|
|
|
|
#{name}: #{type} version mismatch: #{version} to #{v}
|
|
|
|
#{line.gsub(/^/, '> ')}
|
|
|
|
ERROR
|
2016-07-14 19:32:13 +03:00
|
|
|
end
|
|
|
|
f.each(&block)
|
|
|
|
end
|
|
|
|
end
|
2009-10-13 16:27:00 +04:00
|
|
|
|
|
|
|
# Write Data
|
2015-11-20 07:03:10 +03:00
|
|
|
class Unifdef
|
|
|
|
attr_accessor :output, :top, :stack, :stdout, :kwdonly
|
|
|
|
def initialize(out)
|
|
|
|
@top = @output = []
|
|
|
|
@stack = []
|
|
|
|
$stdout, @stdout = self, out
|
|
|
|
end
|
|
|
|
def restore
|
|
|
|
$stdout = @stdout
|
|
|
|
end
|
|
|
|
def ifdef(sym)
|
|
|
|
if @kwdonly
|
|
|
|
@stdout.puts "#ifdef #{sym}"
|
2017-03-23 10:59:57 +03:00
|
|
|
else
|
|
|
|
@stack << @top
|
|
|
|
@top << tmp = [sym]
|
|
|
|
@top = tmp
|
|
|
|
end
|
|
|
|
if block_given?
|
|
|
|
begin
|
|
|
|
return yield
|
|
|
|
ensure
|
|
|
|
endif(sym)
|
|
|
|
end
|
2015-11-20 07:03:10 +03:00
|
|
|
end
|
|
|
|
end
|
|
|
|
def endif(sym)
|
|
|
|
if @kwdonly
|
|
|
|
@stdout.puts "#endif /* #{sym} */"
|
2017-03-23 10:59:57 +03:00
|
|
|
else
|
|
|
|
unless sym == @top[0]
|
|
|
|
restore
|
|
|
|
raise ArgumentError, "#{sym} unmatch to #{@top[0]}"
|
|
|
|
end
|
|
|
|
@top = @stack.pop
|
2015-11-20 07:03:10 +03:00
|
|
|
end
|
|
|
|
end
|
|
|
|
def show(dest, *syms)
|
|
|
|
_show(dest, @output, syms)
|
|
|
|
end
|
|
|
|
def _show(dest, ary, syms)
|
|
|
|
if Symbol === (sym = ary[0])
|
|
|
|
unless syms.include?(sym)
|
|
|
|
return
|
|
|
|
end
|
|
|
|
end
|
|
|
|
ary.each do |e|
|
|
|
|
case e
|
|
|
|
when Array
|
|
|
|
_show(dest, e, syms)
|
|
|
|
when String
|
|
|
|
dest.print e
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
def write(str)
|
|
|
|
if @kwdonly
|
|
|
|
@stdout.write(str)
|
|
|
|
else
|
|
|
|
@top << str
|
|
|
|
end
|
|
|
|
self
|
|
|
|
end
|
|
|
|
alias << write
|
|
|
|
end
|
|
|
|
|
|
|
|
output = Unifdef.new($stdout)
|
|
|
|
output.kwdonly = !header
|
|
|
|
|
2009-08-25 20:15:38 +04:00
|
|
|
puts '%{'
|
2009-10-13 16:27:00 +04:00
|
|
|
props, data = parse_unicode_data(get_file('UnicodeData.txt'))
|
2012-02-17 11:42:23 +04:00
|
|
|
categories = {}
|
|
|
|
props.concat parse_scripts(data, categories)
|
|
|
|
aliases = parse_aliases(data)
|
2017-03-23 10:59:57 +03:00
|
|
|
ages = blocks = graphemeBreaks = nil
|
2012-02-17 11:42:23 +04:00
|
|
|
define_posix_props(data)
|
|
|
|
POSIX_NAMES.each do |name|
|
2016-12-10 20:47:04 +03:00
|
|
|
if name == 'XPosixPunct'
|
|
|
|
make_const(name, data[name], "[[:Punct:]]")
|
|
|
|
elsif name == 'Punct'
|
|
|
|
make_const(name, data[name], "")
|
|
|
|
else
|
|
|
|
make_const(name, data[name], "[[:#{name}:]]")
|
|
|
|
end
|
2012-02-17 11:42:23 +04:00
|
|
|
end
|
2017-03-23 10:59:57 +03:00
|
|
|
output.ifdef :USE_UNICODE_PROPERTIES do
|
|
|
|
props.each do |name|
|
|
|
|
category = categories[name] ||
|
|
|
|
case name.size
|
|
|
|
when 1 then 'Major Category'
|
|
|
|
when 2 then 'General Category'
|
|
|
|
else '-'
|
|
|
|
end
|
|
|
|
make_const(name, data[name], category)
|
|
|
|
end
|
|
|
|
output.ifdef :USE_UNICODE_AGE_PROPERTIES do
|
|
|
|
ages = parse_age(data)
|
|
|
|
end
|
|
|
|
graphemeBreaks = parse_GraphemeBreakProperty(data)
|
|
|
|
blocks = parse_block(data)
|
2009-10-14 20:51:52 +04:00
|
|
|
end
|
2009-10-13 16:27:00 +04:00
|
|
|
puts(<<'__HEREDOC')
|
2009-08-25 20:15:38 +04:00
|
|
|
|
2009-10-13 16:27:00 +04:00
|
|
|
static const OnigCodePoint* const CodeRanges[] = {
|
|
|
|
__HEREDOC
|
2009-08-25 20:15:38 +04:00
|
|
|
POSIX_NAMES.each{|name|puts" CR_#{name},"}
|
2017-03-23 10:59:57 +03:00
|
|
|
output.ifdef :USE_UNICODE_PROPERTIES do
|
|
|
|
props.each{|name| puts" CR_#{name},"}
|
|
|
|
output.ifdef :USE_UNICODE_AGE_PROPERTIES do
|
|
|
|
ages.each{|name| puts" CR_#{constantize_agename(name)},"}
|
|
|
|
end
|
|
|
|
graphemeBreaks.each{|name| puts" CR_#{constantize_Grapheme_Cluster_Break(name)},"}
|
|
|
|
blocks.each{|name|puts" CR_#{name},"}
|
|
|
|
end
|
2009-08-25 20:15:38 +04:00
|
|
|
|
|
|
|
puts(<<'__HEREDOC')
|
2009-10-13 16:27:00 +04:00
|
|
|
};
|
2009-08-25 20:15:38 +04:00
|
|
|
struct uniname2ctype_struct {
|
2016-12-01 03:34:42 +03:00
|
|
|
short name;
|
|
|
|
unsigned short ctype;
|
2009-08-25 20:15:38 +04:00
|
|
|
};
|
2017-03-23 10:59:56 +03:00
|
|
|
#define uniname2ctype_offset(str) offsetof(struct uniname2ctype_pool_t, uniname2ctype_pool_##str)
|
2009-08-25 20:15:38 +04:00
|
|
|
|
2018-11-16 09:52:00 +03:00
|
|
|
static const struct uniname2ctype_struct *uniname2ctype_p(
|
|
|
|
#if !(/*ANSI*/+0) /* if ANSI, old style not to conflict with generated prototype */
|
|
|
|
const char *, unsigned int
|
2017-12-08 08:51:19 +03:00
|
|
|
#endif
|
2018-11-16 09:52:00 +03:00
|
|
|
);
|
2009-08-25 20:15:38 +04:00
|
|
|
%}
|
|
|
|
struct uniname2ctype_struct;
|
|
|
|
%%
|
|
|
|
__HEREDOC
|
2015-11-20 07:03:10 +03:00
|
|
|
|
2009-08-25 20:15:38 +04:00
|
|
|
i = -1
|
2009-10-13 16:27:00 +04:00
|
|
|
name_to_index = {}
|
|
|
|
POSIX_NAMES.each do |name|
|
|
|
|
i += 1
|
2012-02-17 11:42:23 +04:00
|
|
|
next if name == 'NEWLINE'
|
2009-10-13 16:27:00 +04:00
|
|
|
name = normalize_propname(name)
|
|
|
|
name_to_index[name] = i
|
|
|
|
puts"%-40s %3d" % [name + ',', i]
|
|
|
|
end
|
2017-03-23 10:59:57 +03:00
|
|
|
output.ifdef :USE_UNICODE_PROPERTIES do
|
|
|
|
props.each do |name|
|
|
|
|
i += 1
|
|
|
|
name = normalize_propname(name)
|
|
|
|
name_to_index[name] = i
|
|
|
|
puts "%-40s %3d" % [name + ',', i]
|
|
|
|
end
|
|
|
|
aliases.each_pair do |k, v|
|
|
|
|
next if name_to_index[k]
|
|
|
|
next unless v = name_to_index[v]
|
|
|
|
puts "%-40s %3d" % [k + ',', v]
|
|
|
|
end
|
|
|
|
output.ifdef :USE_UNICODE_AGE_PROPERTIES do
|
|
|
|
ages.each do |name|
|
|
|
|
i += 1
|
|
|
|
name = "age=#{name}"
|
|
|
|
name_to_index[name] = i
|
|
|
|
puts "%-40s %3d" % [name + ',', i]
|
|
|
|
end
|
|
|
|
end
|
|
|
|
graphemeBreaks.each do |name|
|
|
|
|
i += 1
|
|
|
|
name = "graphemeclusterbreak=#{name.delete('_').downcase}"
|
|
|
|
name_to_index[name] = i
|
|
|
|
puts "%-40s %3d" % [name + ',', i]
|
|
|
|
end
|
|
|
|
blocks.each do |name|
|
|
|
|
i += 1
|
|
|
|
name = normalize_propname(name)
|
|
|
|
name_to_index[name] = i
|
|
|
|
puts "%-40s %3d" % [name + ',', i]
|
|
|
|
end
|
2012-02-17 11:42:23 +04:00
|
|
|
end
|
2009-08-25 20:15:38 +04:00
|
|
|
puts(<<'__HEREDOC')
|
|
|
|
%%
|
|
|
|
static int
|
|
|
|
uniname2ctype(const UChar *name, unsigned int len)
|
|
|
|
{
|
|
|
|
const struct uniname2ctype_struct *p = uniname2ctype_p((const char *)name, len);
|
|
|
|
if (p) return p->ctype;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
__HEREDOC
|
2018-11-27 09:44:02 +03:00
|
|
|
$versions.each do |type, ver|
|
|
|
|
name = type == :Unicode ? "ONIG_UNICODE_VERSION" : "ONIG_UNICODE_EMOJI_VERSION"
|
|
|
|
versions = ver.scan(/\d+/)
|
|
|
|
print("#if defined #{name}_STRING && !( \\\n")
|
|
|
|
versions.zip(%w[MAJOR MINOR TEENY]) do |v, n|
|
|
|
|
print(" #{name}_#{n} == #{v} && \\\n")
|
|
|
|
end
|
|
|
|
print(" 1)\n")
|
|
|
|
print("# error #{name}_STRING mismatch\n")
|
|
|
|
print("#endif\n")
|
|
|
|
print("#define #{name}_STRING #{ver.dump}\n")
|
|
|
|
versions.zip(%w[MAJOR MINOR TEENY]) do |v, n|
|
|
|
|
print("#define #{name}_#{n} #{v}\n")
|
|
|
|
end
|
2016-07-15 03:53:50 +03:00
|
|
|
end
|
2015-11-20 07:03:10 +03:00
|
|
|
|
|
|
|
output.restore
|
|
|
|
|
|
|
|
if header
|
|
|
|
require 'tempfile'
|
|
|
|
|
|
|
|
NAME2CTYPE = %w[gperf -7 -c -j1 -i1 -t -C -P -T -H uniname2ctype_hash -Q uniname2ctype_pool -N uniname2ctype_p]
|
|
|
|
|
|
|
|
fds = []
|
|
|
|
syms = %i[USE_UNICODE_PROPERTIES USE_UNICODE_AGE_PROPERTIES]
|
|
|
|
begin
|
|
|
|
fds << (tmp = Tempfile.new(%w"name2ctype .h"))
|
|
|
|
IO.popen([*NAME2CTYPE, out: tmp], "w") {|f| output.show(f, *syms)}
|
|
|
|
end while syms.pop
|
|
|
|
fds.each(&:close)
|
2018-09-22 23:39:35 +03:00
|
|
|
ff = nil
|
2015-11-20 07:03:10 +03:00
|
|
|
IO.popen(%W[diff -DUSE_UNICODE_AGE_PROPERTIES #{fds[1].path} #{fds[0].path}], "r") {|age|
|
2016-12-01 03:34:42 +03:00
|
|
|
IO.popen(%W[diff -DUSE_UNICODE_PROPERTIES #{fds[2].path} -], "r", in: age) {|f|
|
2017-12-08 08:51:19 +03:00
|
|
|
ansi = false
|
2016-12-01 03:34:42 +03:00
|
|
|
f.each {|line|
|
2017-12-08 08:51:19 +03:00
|
|
|
if /ANSI-C code produced by gperf/ =~ line
|
|
|
|
ansi = true
|
|
|
|
end
|
|
|
|
line.sub!(/\/\*ANSI\*\//, '1') if ansi
|
2017-05-17 08:38:37 +03:00
|
|
|
line.gsub!(/\(int\)\((?:long|size_t)\)&\(\(struct uniname2ctype_pool_t \*\)0\)->uniname2ctype_pool_(str\d+),\s+/,
|
2017-03-23 10:59:56 +03:00
|
|
|
'uniname2ctype_offset(\1), ')
|
2018-09-22 23:39:35 +03:00
|
|
|
if ff = (!ff ? /^(uniname2ctype_hash) /=~line : /^\}/!~line) # no line can match both, exclusive flip-flop
|
2017-12-08 08:51:19 +03:00
|
|
|
line.sub!(/^( *(?:register\s+)?(.*\S)\s+hval\s*=\s*)(?=len;)/, '\1(\2)')
|
|
|
|
end
|
2016-12-01 03:34:42 +03:00
|
|
|
puts line
|
|
|
|
}
|
|
|
|
}
|
2015-11-20 07:03:10 +03:00
|
|
|
}
|
|
|
|
end
|