[BugFix] Using the builtin types like `int`, `bool` and `float`. (#5620)

This commit is contained in:
Hzbeta 2023-06-29 16:37:59 +08:00 коммит произвёл GitHub
Родитель 750546b111
Коммит 60c9459205
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
13 изменённых файлов: 23 добавлений и 24 удалений

Просмотреть файл

@ -303,7 +303,7 @@ class SubPolicy(object):
"translateY": np.linspace(0, 150 / 331, 10), "translateY": np.linspace(0, 150 / 331, 10),
"rotate": np.linspace(0, 30, 10), "rotate": np.linspace(0, 30, 10),
"color": np.linspace(0.0, 0.9, 10), "color": np.linspace(0.0, 0.9, 10),
"posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), "posterize": np.round(np.linspace(8, 4, 10), 0).astype(int),
"solarize": np.linspace(256, 0, 10), "solarize": np.linspace(256, 0, 10),
"contrast": np.linspace(0.0, 0.9, 10), "contrast": np.linspace(0.0, 0.9, 10),
"sharpness": np.linspace(0.0, 0.9, 10), "sharpness": np.linspace(0.0, 0.9, 10),

Просмотреть файл

@ -46,8 +46,8 @@ def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLAS
def load_mnist_data(args): def load_mnist_data(args):
(x_train, y_train), (x_test, y_test) = mnist.load_data() (x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] x_train = (np.expand_dims(x_train, -1).astype(float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] x_test = (np.expand_dims(x_test, -1).astype(float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
@ -216,8 +216,8 @@ def load_mnist_data(args):
''' '''
(x_train, y_train), (x_test, y_test) = mnist.load_data() (x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] x_train = (np.expand_dims(x_train, -1).astype(float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] x_test = (np.expand_dims(x_test, -1).astype(float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]

Просмотреть файл

@ -46,8 +46,8 @@ def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLAS
def load_mnist_data(args): def load_mnist_data(args):
(x_train, y_train), (x_test, y_test) = mnist.load_data() (x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] x_train = (np.expand_dims(x_train, -1).astype(float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] x_test = (np.expand_dims(x_test, -1).astype(float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
@ -213,8 +213,8 @@ def load_mnist_data(args):
''' '''
(x_train, y_train), (x_test, y_test) = mnist.load_data() (x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] x_train = (np.expand_dims(x_train, -1).astype(float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] x_test = (np.expand_dims(x_test, -1).astype(float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]

Просмотреть файл

@ -65,8 +65,8 @@ def load_mnist_data(args):
''' '''
(x_train, y_train), (x_test, y_test) = mnist.load_data() (x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] x_train = (np.expand_dims(x_train, -1).astype(float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] x_test = (np.expand_dims(x_test, -1).astype(float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]

Просмотреть файл

@ -67,8 +67,8 @@ def load_mnist_data(args):
(x_train, y_train), (x_test, y_test) = mnist.load_data(path=mnist_path) (x_train, y_train), (x_test, y_test) = mnist.load_data(path=mnist_path)
os.remove(mnist_path) os.remove(mnist_path)
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] x_train = (np.expand_dims(x_train, -1).astype(float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] x_test = (np.expand_dims(x_test, -1).astype(float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]

Просмотреть файл

@ -67,8 +67,8 @@ def load_mnist_data(args):
""" """
(x_train, y_train), (x_test, y_test) = mnist.load_data() (x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train] x_train = (np.expand_dims(x_train, -1).astype(float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test] x_test = (np.expand_dims(x_test, -1).astype(float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train] y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test] y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]

Просмотреть файл

@ -287,7 +287,7 @@ class CurveModel:
------- -------
None None
""" """
init_weight = np.ones((self.effective_model_num), dtype=np.float) / self.effective_model_num init_weight = np.ones((self.effective_model_num), dtype=float) / self.effective_model_num
self.weight_samples = np.broadcast_to(init_weight, (NUM_OF_INSTANCE, self.effective_model_num)) self.weight_samples = np.broadcast_to(init_weight, (NUM_OF_INSTANCE, self.effective_model_num))
for _ in range(NUM_OF_SIMULATION_TIME): for _ in range(NUM_OF_SIMULATION_TIME):
# sample new value from Q(i, j) # sample new value from Q(i, j)
@ -298,7 +298,7 @@ class CurveModel:
# sample u # sample u
u = np.random.rand(NUM_OF_INSTANCE) u = np.random.rand(NUM_OF_INSTANCE)
# new value # new value
change_value_flag = (u < alpha).astype(np.int) change_value_flag = (u < alpha).astype(int)
for j in range(NUM_OF_INSTANCE): for j in range(NUM_OF_INSTANCE):
new_values[j] = self.weight_samples[j] * (1 - change_value_flag[j]) + new_values[j] * change_value_flag[j] new_values[j] = self.weight_samples[j] * (1 - change_value_flag[j]) + new_values[j] * change_value_flag[j]
self.weight_samples = new_values self.weight_samples = new_values

Просмотреть файл

@ -219,7 +219,7 @@ class IncrementalGaussianProcess:
self._l_matrix.shape[0])) self._l_matrix.shape[0]))
k_inv = l_inv.dot(l_inv.T) k_inv = l_inv.dot(l_inv.T)
# Compute variance of predictive distribution # Compute variance of predictive distribution
y_var = np.ones(len(train_x), dtype=np.float) y_var = np.ones(len(train_x), dtype=float)
y_var -= np.einsum("ij,ij->i", np.dot(k_trans, k_inv), k_trans) y_var -= np.einsum("ij,ij->i", np.dot(k_trans, k_inv), k_trans)
# Check if any of the variances is negative because of # Check if any of the variances is negative because of

Просмотреть файл

@ -209,7 +209,7 @@ class PPOModel:
mb_actions = np.asarray(mb_actions) mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32) mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32) mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool) mb_dones = np.asarray(mb_dones, dtype=bool)
last_values = self.model.value(np_obs, S=states, M=dones) last_values = self.model.value(np_obs, S=states, M=dones)
return mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values return mb_obs, mb_actions, mb_values, mb_neglogpacs, mb_dones, last_values
@ -231,7 +231,7 @@ class PPOModel:
mb_returns = np.zeros_like(mb_rewards) mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards) mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0 lastgaelam = 0
last_dones = np.asarray([True for _ in trials_result], dtype=np.bool) # ugly last_dones = np.asarray([True for _ in trials_result], dtype=bool) # ugly
for t in reversed(range(self.model_config.nsteps)): for t in reversed(range(self.model_config.nsteps)):
if t == self.model_config.nsteps - 1: if t == self.model_config.nsteps - 1:
nextnonterminal = 1.0 - last_dones nextnonterminal = 1.0 - last_dones

Просмотреть файл

@ -13,7 +13,7 @@ def _labeling_from_architecture(architecture, vertices):
def _adjancency_matrix_from_architecture(architecture, vertices): def _adjancency_matrix_from_architecture(architecture, vertices):
matrix = np.zeros((vertices, vertices), dtype=np.bool) # type: ignore matrix = np.zeros((vertices, vertices), dtype=bool) # type: ignore
for i in range(1, vertices): for i in range(1, vertices):
for k in architecture['input{}'.format(i)]: for k in architecture['input{}'.format(i)]:
matrix[k, i] = 1 matrix[k, i] = 1

Просмотреть файл

@ -70,7 +70,7 @@ def _eliminate_list_slice(shape: tuple, slice_: multidim_slice) -> multidim_slic
for i in range(len(slice_)): for i in range(len(slice_)):
if isinstance(slice_[i], list): if isinstance(slice_[i], list):
# convert list of slices to mask # convert list of slices to mask
mask = np.zeros(shape[i], dtype=np.bool) # type: ignore mask = np.zeros(shape[i], dtype=bool) # type: ignore
for sl in cast(List[slice], slice_[i]): for sl in cast(List[slice], slice_[i]):
mask[sl] = 1 mask[sl] = 1
result.append(mask) result.append(mask)

Просмотреть файл

@ -9,5 +9,4 @@ filterwarnings =
ignore:Using key to access the identifier of:DeprecationWarning ignore:Using key to access the identifier of:DeprecationWarning
ignore:layer_choice.choices is deprecated.:DeprecationWarning ignore:layer_choice.choices is deprecated.:DeprecationWarning
ignore:The truth value of an empty array is ambiguous.:DeprecationWarning ignore:The truth value of an empty array is ambiguous.:DeprecationWarning
ignore:`np.bool` is a deprecated alias for the builtin `bool`:DeprecationWarning
ignore:nni.retiarii.serialize is deprecated and will be removed in future release.:DeprecationWarning ignore:nni.retiarii.serialize is deprecated and will be removed in future release.:DeprecationWarning

Просмотреть файл

@ -35,7 +35,7 @@ class TestCurveFittingAssessor(unittest.TestCase):
test_model.point_num = 9 test_model.point_num = 9
test_model.target_pos = 20 test_model.target_pos = 20
test_model.trial_history = ([1, 1, 1, 1, 1, 1, 1, 1, 1]) test_model.trial_history = ([1, 1, 1, 1, 1, 1, 1, 1, 1])
test_model.weight_samples = np.ones((test_model.effective_model_num), dtype=np.float) / test_model.effective_model_num test_model.weight_samples = np.ones((test_model.effective_model_num), dtype=float) / test_model.effective_model_num
self.assertAlmostEqual(test_model.predict_y('vap', 9), 0.5591906328335763) self.assertAlmostEqual(test_model.predict_y('vap', 9), 0.5591906328335763)
self.assertAlmostEqual(test_model.predict_y('logx_linear', 15), 1.0704360293379522) self.assertAlmostEqual(test_model.predict_y('logx_linear', 15), 1.0704360293379522)
self.assertAlmostEqual(test_model.f_comb(9, test_model.weight_samples), 1.1543379521172443) self.assertAlmostEqual(test_model.f_comb(9, test_model.weight_samples), 1.1543379521172443)