зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1842991 - Update harfbuzz to 8.0.1 r=gfx-reviewers,lsalzman
Differential Revision: https://phabricator.services.mozilla.com/D184487
This commit is contained in:
Родитель
e7ab74f30a
Коммит
5f01f95435
|
@ -1,3 +1,14 @@
|
|||
Overview of changes leading to 8.0.1
|
||||
Wednesday, July 12, 2023
|
||||
====================================
|
||||
- Build fix on 32-bit arm.
|
||||
|
||||
- More speed optimizations:
|
||||
- 60% speedup in retaingids subsetting SourceHanSans-VF.
|
||||
- 38% speed up in subsetting (beyond-64k) mega-merged Noto.
|
||||
- 16% speed up in retain-gid (used for IFT) subsetting of NotoSansCJKkr.
|
||||
|
||||
|
||||
Overview of changes leading to 8.0.0
|
||||
Sunday, July 9, 2023
|
||||
====================================
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
AC_PREREQ([2.64])
|
||||
AC_INIT([HarfBuzz],
|
||||
[8.0.0],
|
||||
[8.0.1],
|
||||
[https://github.com/harfbuzz/harfbuzz/issues/new],
|
||||
[harfbuzz],
|
||||
[http://harfbuzz.org/])
|
||||
|
|
|
@ -20,11 +20,11 @@ origin:
|
|||
|
||||
# Human-readable identifier for this version/release
|
||||
# Generally "version NNN", "tag SSS", "bookmark SSS"
|
||||
release: 8.0.0 (2023-07-09T02:54:30+03:00).
|
||||
release: 8.0.1 (2023-07-12T08:27:25+03:00).
|
||||
|
||||
# Revision to pull in
|
||||
# Must be a long or short commit SHA (long preferred)
|
||||
revision: 8.0.0
|
||||
revision: 8.0.1
|
||||
|
||||
# The package's license, where possible using the mnemonic from
|
||||
# https://spdx.org/licenses/
|
||||
|
|
|
@ -22,7 +22,8 @@ struct PairValueRecord
|
|||
ValueRecord values; /* Positioning data for the first glyph
|
||||
* followed by for second glyph */
|
||||
public:
|
||||
DEFINE_SIZE_ARRAY (Types::size, values);
|
||||
DEFINE_SIZE_ARRAY (Types::HBGlyphID::static_size, values);
|
||||
DEFINE_SIZE_MAX (Types::HBGlyphID::static_size + 2 * Value::static_size * 8 * sizeof (ValueFormat));
|
||||
|
||||
int cmp (hb_codepoint_t k) const
|
||||
{ return secondGlyph.cmp (k); }
|
||||
|
|
|
@ -283,8 +283,8 @@ HB_FUNCOBJ (hb_bool);
|
|||
// Compression function for Merkle-Damgard construction.
|
||||
// This function is generated using the framework provided.
|
||||
#define mix(h) ( \
|
||||
void((h) ^= (h) >> 23), \
|
||||
void((h) *= 0x2127599bf4325c37ULL), \
|
||||
(h) ^= (h) >> 23, \
|
||||
(h) *= 0x2127599bf4325c37ULL, \
|
||||
(h) ^= (h) >> 47)
|
||||
|
||||
static inline uint64_t fasthash64(const void *buf, size_t len, uint64_t seed)
|
||||
|
@ -362,10 +362,10 @@ struct
|
|||
// https://github.com/harfbuzz/harfbuzz/pull/4228#issuecomment-1565079537
|
||||
template <typename T,
|
||||
hb_enable_if (std::is_integral<T>::value && sizeof (T) <= sizeof (uint32_t))> constexpr auto
|
||||
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, v * 2654435761u /* Knuh's multiplicative hash */)
|
||||
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, (uint32_t) v * 2654435761u /* Knuh's multiplicative hash */)
|
||||
template <typename T,
|
||||
hb_enable_if (std::is_integral<T>::value && sizeof (T) > sizeof (uint32_t))> constexpr auto
|
||||
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, (v ^ (v >> 32)) * 2654435761u /* Knuth's multiplicative hash */)
|
||||
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, (uint32_t) (v ^ (v >> 32)) * 2654435761u /* Knuth's multiplicative hash */)
|
||||
|
||||
template <typename T> constexpr auto
|
||||
impl (const T& v, hb_priority<0>) const HB_RETURN (uint32_t, std::hash<hb_decay<decltype (hb_deref (v))>>{} (hb_deref (v)))
|
||||
|
|
|
@ -143,6 +143,7 @@ struct hb_inc_bimap_t
|
|||
hb_codepoint_t skip (unsigned count)
|
||||
{
|
||||
hb_codepoint_t start = back_map.length;
|
||||
back_map.alloc (back_map.length + count);
|
||||
for (unsigned i = 0; i < count; i++)
|
||||
back_map.push (HB_MAP_VALUE_INVALID);
|
||||
return start;
|
||||
|
|
|
@ -89,14 +89,17 @@ struct hb_vector_size_t
|
|||
|
||||
struct hb_bit_page_t
|
||||
{
|
||||
void init0 () { v.init0 (); }
|
||||
void init1 () { v.init1 (); }
|
||||
void init0 () { v.init0 (); population = 0; }
|
||||
void init1 () { v.init1 (); population = PAGE_BITS; }
|
||||
|
||||
void dirty () { population = UINT_MAX; }
|
||||
|
||||
static inline constexpr unsigned len ()
|
||||
{ return ARRAY_LENGTH_CONST (v); }
|
||||
|
||||
bool is_empty () const
|
||||
{
|
||||
if (has_population ()) return !population;
|
||||
return
|
||||
+ hb_iter (v)
|
||||
| hb_none
|
||||
|
@ -107,8 +110,8 @@ struct hb_bit_page_t
|
|||
return hb_bytes_t ((const char *) &v, sizeof (v)).hash ();
|
||||
}
|
||||
|
||||
void add (hb_codepoint_t g) { elt (g) |= mask (g); }
|
||||
void del (hb_codepoint_t g) { elt (g) &= ~mask (g); }
|
||||
void add (hb_codepoint_t g) { elt (g) |= mask (g); dirty (); }
|
||||
void del (hb_codepoint_t g) { elt (g) &= ~mask (g); dirty (); }
|
||||
void set (hb_codepoint_t g, bool value) { if (value) add (g); else del (g); }
|
||||
bool get (hb_codepoint_t g) const { return elt (g) & mask (g); }
|
||||
|
||||
|
@ -120,20 +123,21 @@ struct hb_bit_page_t
|
|||
*la |= (mask (b) << 1) - mask(a);
|
||||
else
|
||||
{
|
||||
*la |= ~(mask (a) - 1);
|
||||
*la |= ~(mask (a) - 1llu);
|
||||
la++;
|
||||
|
||||
hb_memset (la, 0xff, (char *) lb - (char *) la);
|
||||
|
||||
*lb |= ((mask (b) << 1) - 1);
|
||||
*lb |= ((mask (b) << 1) - 1llu);
|
||||
}
|
||||
dirty ();
|
||||
}
|
||||
void del_range (hb_codepoint_t a, hb_codepoint_t b)
|
||||
{
|
||||
elt_t *la = &elt (a);
|
||||
elt_t *lb = &elt (b);
|
||||
if (la == lb)
|
||||
*la &= ~((mask (b) << 1) - mask(a));
|
||||
*la &= ~((mask (b) << 1llu) - mask(a));
|
||||
else
|
||||
{
|
||||
*la &= mask (a) - 1;
|
||||
|
@ -141,8 +145,9 @@ struct hb_bit_page_t
|
|||
|
||||
hb_memset (la, 0, (char *) lb - (char *) la);
|
||||
|
||||
*lb &= ~((mask (b) << 1) - 1);
|
||||
*lb &= ~((mask (b) << 1) - 1llu);
|
||||
}
|
||||
dirty ();
|
||||
}
|
||||
void set_range (hb_codepoint_t a, hb_codepoint_t b, bool v)
|
||||
{ if (v) add_range (a, b); else del_range (a, b); }
|
||||
|
@ -222,18 +227,25 @@ struct hb_bit_page_t
|
|||
}
|
||||
bool is_subset (const hb_bit_page_t &larger_page) const
|
||||
{
|
||||
if (has_population () && larger_page.has_population () &&
|
||||
population > larger_page.population)
|
||||
return false;
|
||||
|
||||
for (unsigned i = 0; i < len (); i++)
|
||||
if (~larger_page.v[i] & v[i])
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool has_population () const { return population != UINT_MAX; }
|
||||
unsigned int get_population () const
|
||||
{
|
||||
return
|
||||
if (has_population ()) return population;
|
||||
population =
|
||||
+ hb_iter (v)
|
||||
| hb_reduce ([] (unsigned pop, const elt_t &_) { return pop + hb_popcount (_); }, 0u)
|
||||
;
|
||||
return population;
|
||||
}
|
||||
|
||||
bool next (hb_codepoint_t *codepoint) const
|
||||
|
@ -329,9 +341,9 @@ struct hb_bit_page_t
|
|||
const elt_t& elt (hb_codepoint_t g) const { return v[(g & MASK) / ELT_BITS]; }
|
||||
static constexpr elt_t mask (hb_codepoint_t g) { return elt_t (1) << (g & ELT_MASK); }
|
||||
|
||||
mutable unsigned population;
|
||||
vector_t v;
|
||||
};
|
||||
static_assert (hb_bit_page_t::PAGE_BITS == sizeof (hb_bit_page_t) * 8, "");
|
||||
|
||||
|
||||
#endif /* HB_BIT_PAGE_HH */
|
||||
|
|
|
@ -553,6 +553,7 @@ struct hb_bit_set_t
|
|||
count--;
|
||||
page_map.arrayZ[count] = page_map.arrayZ[a];
|
||||
page_at (count).v = op (page_at (a).v, other.page_at (b).v);
|
||||
page_at (count).dirty ();
|
||||
}
|
||||
else if (page_map.arrayZ[a - 1].major > other.page_map.arrayZ[b - 1].major)
|
||||
{
|
||||
|
@ -571,7 +572,7 @@ struct hb_bit_set_t
|
|||
count--;
|
||||
page_map.arrayZ[count].major = other.page_map.arrayZ[b].major;
|
||||
page_map.arrayZ[count].index = next_page++;
|
||||
page_at (count).v = other.page_at (b).v;
|
||||
page_at (count) = other.page_at (b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -589,7 +590,7 @@ struct hb_bit_set_t
|
|||
count--;
|
||||
page_map.arrayZ[count].major = other.page_map.arrayZ[b].major;
|
||||
page_map.arrayZ[count].index = next_page++;
|
||||
page_at (count).v = other.page_at (b).v;
|
||||
page_at (count) = other.page_at (b);
|
||||
}
|
||||
assert (!count);
|
||||
resize (newCount);
|
||||
|
|
|
@ -94,10 +94,10 @@ struct CFFIndex
|
|||
for (const auto &_ : +it)
|
||||
{
|
||||
unsigned len = _.length;
|
||||
if (!len)
|
||||
continue;
|
||||
if (len <= 1)
|
||||
{
|
||||
if (!len)
|
||||
continue;
|
||||
*ret++ = *_.arrayZ;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -1061,6 +1061,8 @@ struct cff1
|
|||
template <typename PRIVOPSET, typename PRIVDICTVAL>
|
||||
struct accelerator_templ_t
|
||||
{
|
||||
static constexpr hb_tag_t tableTag = cff1::tableTag;
|
||||
|
||||
accelerator_templ_t (hb_face_t *face)
|
||||
{
|
||||
if (!face) return;
|
||||
|
|
|
@ -390,6 +390,8 @@ struct cff2
|
|||
template <typename PRIVOPSET, typename PRIVDICTVAL>
|
||||
struct accelerator_templ_t
|
||||
{
|
||||
static constexpr hb_tag_t tableTag = cff2::tableTag;
|
||||
|
||||
accelerator_templ_t (hb_face_t *face)
|
||||
{
|
||||
if (!face) return;
|
||||
|
|
|
@ -179,6 +179,7 @@ struct hmtxvmtx
|
|||
lm.advance = mtx.first;
|
||||
lm.sb = mtx.second;
|
||||
}
|
||||
// TODO(beyond-64k): This assumes that maxp.numGlyphs is 0xFFFF.
|
||||
else if (gid < 0x10000u)
|
||||
short_metrics[gid] = mtx.second;
|
||||
else
|
||||
|
@ -199,6 +200,8 @@ struct hmtxvmtx
|
|||
/* Determine num_long_metrics to encode. */
|
||||
auto& plan = c->plan;
|
||||
|
||||
// TODO Don't consider retaingid holes here.
|
||||
|
||||
num_long_metrics = hb_min (plan->num_output_glyphs (), 0xFFFFu);
|
||||
unsigned int last_advance = get_new_gid_advance_unscaled (plan, mtx_map, num_long_metrics - 1, _mtx);
|
||||
while (num_long_metrics > 1 &&
|
||||
|
|
|
@ -60,18 +60,21 @@ struct DeltaSetIndexMapFormat01
|
|||
|
||||
entryFormat = ((width-1)<<4)|(inner_bit_count-1);
|
||||
mapCount = output_map.length;
|
||||
HBUINT8 *p = c->allocate_size<HBUINT8> (width * output_map.length, false);
|
||||
HBUINT8 *p = c->allocate_size<HBUINT8> (width * output_map.length);
|
||||
if (unlikely (!p)) return_trace (false);
|
||||
for (unsigned int i = 0; i < output_map.length; i++)
|
||||
{
|
||||
unsigned int v = output_map[i];
|
||||
unsigned int outer = v >> 16;
|
||||
unsigned int inner = v & 0xFFFF;
|
||||
unsigned int u = (outer << inner_bit_count) | inner;
|
||||
for (unsigned int w = width; w > 0;)
|
||||
unsigned int v = output_map.arrayZ[i];
|
||||
if (v)
|
||||
{
|
||||
p[--w] = u;
|
||||
u >>= 8;
|
||||
unsigned int outer = v >> 16;
|
||||
unsigned int inner = v & 0xFFFF;
|
||||
unsigned int u = (outer << inner_bit_count) | inner;
|
||||
for (unsigned int w = width; w > 0;)
|
||||
{
|
||||
p[--w] = u;
|
||||
u >>= 8;
|
||||
}
|
||||
}
|
||||
p += width;
|
||||
}
|
||||
|
@ -755,14 +758,14 @@ struct tuple_delta_t
|
|||
|
||||
while (run_length >= 64)
|
||||
{
|
||||
*it++ = (DELTAS_ARE_ZERO | 63);
|
||||
*it++ = char (DELTAS_ARE_ZERO | 63);
|
||||
run_length -= 64;
|
||||
encoded_len++;
|
||||
}
|
||||
|
||||
if (run_length)
|
||||
{
|
||||
*it++ = (DELTAS_ARE_ZERO | (run_length - 1));
|
||||
*it++ = char (DELTAS_ARE_ZERO | (run_length - 1));
|
||||
encoded_len++;
|
||||
}
|
||||
return encoded_len;
|
||||
|
|
|
@ -45,7 +45,8 @@ struct index_map_subset_plan_t
|
|||
void init (const DeltaSetIndexMap &index_map,
|
||||
hb_inc_bimap_t &outer_map,
|
||||
hb_vector_t<hb_set_t *> &inner_sets,
|
||||
const hb_subset_plan_t *plan)
|
||||
const hb_subset_plan_t *plan,
|
||||
bool bypass_empty = true)
|
||||
{
|
||||
map_count = 0;
|
||||
outer_bit_count = 0;
|
||||
|
@ -53,11 +54,10 @@ struct index_map_subset_plan_t
|
|||
max_inners.init ();
|
||||
output_map.init ();
|
||||
|
||||
if (&index_map == &Null (DeltaSetIndexMap)) return;
|
||||
if (bypass_empty && !index_map.get_map_count ()) return;
|
||||
|
||||
unsigned int last_val = (unsigned int)-1;
|
||||
hb_codepoint_t last_gid = HB_CODEPOINT_INVALID;
|
||||
hb_codepoint_t num_gid = (hb_codepoint_t) hb_min (index_map.get_map_count (), plan->num_output_glyphs ());
|
||||
|
||||
outer_bit_count = (index_map.get_width () * 8) - index_map.get_inner_bit_count ();
|
||||
max_inners.resize (inner_sets.length);
|
||||
|
@ -68,24 +68,17 @@ struct index_map_subset_plan_t
|
|||
unsigned count = new_to_old_gid_list.length;
|
||||
for (unsigned j = count; j; j--)
|
||||
{
|
||||
hb_codepoint_t gid = new_to_old_gid_list[j - 1].first;
|
||||
if (gid >= num_gid) continue;
|
||||
|
||||
hb_codepoint_t old_gid = new_to_old_gid_list[j - 1].second;
|
||||
hb_codepoint_t gid = new_to_old_gid_list.arrayZ[j - 1].first;
|
||||
hb_codepoint_t old_gid = new_to_old_gid_list.arrayZ[j - 1].second;
|
||||
|
||||
unsigned int v = index_map.map (old_gid);
|
||||
if (last_gid == HB_CODEPOINT_INVALID)
|
||||
{
|
||||
if (gid + 1 != num_gid)
|
||||
{
|
||||
last_gid = gid + 1;
|
||||
break;
|
||||
}
|
||||
last_val = v;
|
||||
last_gid = gid;
|
||||
continue;
|
||||
}
|
||||
if (v != last_val || gid + 1 != last_gid)
|
||||
if (v != last_val)
|
||||
break;
|
||||
|
||||
last_gid = gid;
|
||||
|
@ -120,8 +113,6 @@ struct index_map_subset_plan_t
|
|||
const hb_vector_t<hb_inc_bimap_t> &inner_maps,
|
||||
const hb_subset_plan_t *plan)
|
||||
{
|
||||
if (input_map == &Null (DeltaSetIndexMap)) return;
|
||||
|
||||
for (unsigned int i = 0; i < max_inners.length; i++)
|
||||
{
|
||||
if (inner_maps[i].get_population () == 0) continue;
|
||||
|
@ -129,18 +120,17 @@ struct index_map_subset_plan_t
|
|||
if (bit_count > inner_bit_count) inner_bit_count = bit_count;
|
||||
}
|
||||
|
||||
output_map.resize (map_count);
|
||||
for (hb_codepoint_t gid = 0; gid < output_map.length; gid++)
|
||||
if (unlikely (!output_map.resize (map_count))) return;
|
||||
for (const auto &_ : plan->new_to_old_gid_list)
|
||||
{
|
||||
hb_codepoint_t old_gid;
|
||||
if (plan->old_gid_for_new_gid (gid, &old_gid))
|
||||
{
|
||||
uint32_t v = input_map->map (old_gid);
|
||||
unsigned int outer = v >> 16;
|
||||
output_map[gid] = (outer_map[outer] << 16) | (inner_maps[outer][v & 0xFFFF]);
|
||||
}
|
||||
else
|
||||
output_map[gid] = 0; /* Map unused glyph to outer/inner=0/0 */
|
||||
hb_codepoint_t new_gid = _.first;
|
||||
hb_codepoint_t old_gid = _.second;
|
||||
|
||||
if (unlikely (new_gid >= map_count)) break;
|
||||
|
||||
uint32_t v = input_map->map (old_gid);
|
||||
unsigned int outer = v >> 16;
|
||||
output_map.arrayZ[new_gid] = (outer_map[outer] << 16) | (inner_maps[outer][v & 0xFFFF]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -184,7 +174,7 @@ struct hvarvvar_subset_plan_t
|
|||
if (unlikely (!index_map_plans.length || !inner_sets.length || !inner_maps.length)) return;
|
||||
|
||||
bool retain_adv_map = false;
|
||||
index_map_plans[0].init (*index_maps[0], outer_map, inner_sets, plan);
|
||||
index_map_plans[0].init (*index_maps[0], outer_map, inner_sets, plan, false);
|
||||
if (index_maps[0] == &Null (DeltaSetIndexMap))
|
||||
{
|
||||
retain_adv_map = plan->flags & HB_SUBSET_FLAGS_RETAIN_GIDS;
|
||||
|
@ -201,13 +191,10 @@ struct hvarvvar_subset_plan_t
|
|||
|
||||
if (retain_adv_map)
|
||||
{
|
||||
unsigned num_glyphs = plan->num_output_glyphs ();
|
||||
for (hb_codepoint_t gid = 0; gid < num_glyphs; gid++)
|
||||
for (const auto &_ : plan->new_to_old_gid_list)
|
||||
{
|
||||
if (inner_sets[0]->has (gid))
|
||||
inner_maps[0].add (gid);
|
||||
else
|
||||
inner_maps[0].skip ();
|
||||
hb_codepoint_t old_gid = _.second;
|
||||
inner_maps[0].add (old_gid);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
|
|
@ -61,6 +61,9 @@ struct hb_priority_queue_t
|
|||
bubble_up (heap.length - 1);
|
||||
}
|
||||
|
||||
#ifndef HB_OPTIMIZE_SIZE
|
||||
HB_ALWAYS_INLINE
|
||||
#endif
|
||||
item_t pop_minimum ()
|
||||
{
|
||||
assert (!is_empty ());
|
||||
|
@ -106,8 +109,10 @@ struct hb_priority_queue_t
|
|||
return 2 * index + 2;
|
||||
}
|
||||
|
||||
HB_ALWAYS_INLINE
|
||||
void bubble_down (unsigned index)
|
||||
{
|
||||
repeat:
|
||||
assert (index < heap.length);
|
||||
|
||||
unsigned left = left_child (index);
|
||||
|
@ -123,19 +128,20 @@ struct hb_priority_queue_t
|
|||
&& (!has_right || heap.arrayZ[index].first <= heap.arrayZ[right].first))
|
||||
return;
|
||||
|
||||
unsigned child;
|
||||
if (!has_right || heap.arrayZ[left].first < heap.arrayZ[right].first)
|
||||
{
|
||||
swap (index, left);
|
||||
bubble_down (left);
|
||||
return;
|
||||
}
|
||||
child = left;
|
||||
else
|
||||
child = right;
|
||||
|
||||
swap (index, right);
|
||||
bubble_down (right);
|
||||
swap (index, child);
|
||||
index = child;
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
void bubble_up (unsigned index)
|
||||
{
|
||||
repeat:
|
||||
assert (index < heap.length);
|
||||
|
||||
if (index == 0) return;
|
||||
|
@ -145,7 +151,8 @@ struct hb_priority_queue_t
|
|||
return;
|
||||
|
||||
swap (index, parent_index);
|
||||
bubble_up (parent_index);
|
||||
index = parent_index;
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
void swap (unsigned a, unsigned b)
|
||||
|
|
|
@ -266,7 +266,7 @@ struct hb_serialize_context_t
|
|||
propagate_error (std::forward<Ts> (os)...); }
|
||||
|
||||
/* To be called around main operation. */
|
||||
template <typename Type>
|
||||
template <typename Type=char>
|
||||
__attribute__((returns_nonnull))
|
||||
Type *start_serialize ()
|
||||
{
|
||||
|
|
|
@ -773,7 +773,7 @@ struct subr_subsetter_t
|
|||
}
|
||||
}
|
||||
|
||||
/* Doing this here one by one instead of compacting all at the en
|
||||
/* Doing this here one by one instead of compacting all at the end
|
||||
* has massive peak-memory saving.
|
||||
*
|
||||
* The compacting both saves memory and makes further operations
|
||||
|
|
|
@ -551,14 +551,12 @@ struct cff1_subset_plan
|
|||
sid = sidmap.add (sid);
|
||||
|
||||
if (sid != last_sid + 1)
|
||||
{
|
||||
subset_charset_ranges.push (code_pair_t {sid, glyph});
|
||||
|
||||
if (glyph == old_glyph && skip)
|
||||
{
|
||||
glyph = hb_min (_.first - 1, glyph_to_sid_map->arrayZ[old_glyph].glyph);
|
||||
sid += glyph - old_glyph;
|
||||
}
|
||||
if (glyph == old_glyph && skip)
|
||||
{
|
||||
glyph = hb_min (_.first - 1, glyph_to_sid_map->arrayZ[old_glyph].glyph);
|
||||
sid += glyph - old_glyph;
|
||||
}
|
||||
last_sid = sid;
|
||||
}
|
||||
|
|
|
@ -273,7 +273,7 @@ _try_subset (const TableType *table,
|
|||
hb_vector_t<char>* buf,
|
||||
hb_subset_context_t* c /* OUT */)
|
||||
{
|
||||
c->serializer->start_serialize<TableType> ();
|
||||
c->serializer->start_serialize ();
|
||||
if (c->serializer->in_error ()) return false;
|
||||
|
||||
bool needed = table->subset (c);
|
||||
|
|
|
@ -53,14 +53,14 @@ HB_BEGIN_DECLS
|
|||
*
|
||||
* The micro component of the library version available at compile-time.
|
||||
*/
|
||||
#define HB_VERSION_MICRO 0
|
||||
#define HB_VERSION_MICRO 1
|
||||
|
||||
/**
|
||||
* HB_VERSION_STRING:
|
||||
*
|
||||
* A string literal containing the library version available at compile-time.
|
||||
*/
|
||||
#define HB_VERSION_STRING "8.0.0"
|
||||
#define HB_VERSION_STRING "8.0.1"
|
||||
|
||||
/**
|
||||
* HB_VERSION_ATLEAST:
|
||||
|
|
|
@ -64,6 +64,7 @@
|
|||
#pragma GCC diagnostic error "-Wbitwise-instead-of-logical"
|
||||
#pragma GCC diagnostic error "-Wcast-align"
|
||||
#pragma GCC diagnostic error "-Wcast-function-type"
|
||||
#pragma GCC diagnostic error "-Wconstant-conversion"
|
||||
#pragma GCC diagnostic error "-Wcomma"
|
||||
#pragma GCC diagnostic error "-Wdelete-non-virtual-dtor"
|
||||
#pragma GCC diagnostic error "-Wembedded-directive"
|
||||
|
|
Загрузка…
Ссылка в новой задаче