From 8ef46ea767a7c87cf55d1ff27f0712e0f9d21645 Mon Sep 17 00:00:00 2001 From: nissymori Date: Thu, 1 Sep 2022 16:23:51 +0900 Subject: [PATCH 01/12] implement train.py --- .../tests/test_train_helper.py | 22 ++++++++++-- workspace/suphnx-reward-shaping/train.py | 13 ++++--- .../suphnx-reward-shaping/train_helper.py | 34 +++++++++++++++++-- workspace/suphnx-reward-shaping/utils.py | 8 +++-- 4 files changed, 67 insertions(+), 10 deletions(-) diff --git a/workspace/suphnx-reward-shaping/tests/test_train_helper.py b/workspace/suphnx-reward-shaping/tests/test_train_helper.py index 47326c2e..47e25478 100644 --- a/workspace/suphnx-reward-shaping/tests/test_train_helper.py +++ b/workspace/suphnx-reward-shaping/tests/test_train_helper.py @@ -6,12 +6,14 @@ import optax sys.path.append("../") -from train_helper import evaluate, initializa_params, train +from train_helper import evaluate, initializa_params, plot_result, save_params, train from utils import to_data layer_sizes = [3, 4, 5, 1] feature_size = 6 seed = jax.random.PRNGKey(42) +save_dir = os.path.join(os.pardir, "trained_model/test_param.pickle") +result_dir = os.path.join(os.pardir, "result") mjxprotp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources") @@ -36,5 +38,21 @@ def test_evaluate(): assert loss >= 0 +def test_save_model(): + params = initializa_params(layer_sizes, feature_size, seed) + featurs, scores = to_data(mjxprotp_dir) + optimizer = optax.adam(0.05) + params = train(params, optimizer, featurs, scores, epochs=1, batch_size=1) + save_params(params, save_dir) + + +def test_plot_result(): + params = initializa_params(layer_sizes, feature_size, seed) + featurs, scores = to_data(mjxprotp_dir) + optimizer = optax.adam(0.05) + params = train(params, optimizer, featurs, scores, epochs=1, batch_size=1) + plot_result(params, featurs, scores, result_dir) + + if __name__ == "__main__": - test_train() + test_plot_result() diff --git a/workspace/suphnx-reward-shaping/train.py b/workspace/suphnx-reward-shaping/train.py index c3134555..e411802f 100644 --- a/workspace/suphnx-reward-shaping/train.py +++ b/workspace/suphnx-reward-shaping/train.py @@ -6,13 +6,15 @@ import jax import jax.numpy as jnp import optax -from train_helper import evaluate, initializa_params, train +from train_helper import evaluate, initializa_params, plot_result, train from utils import normalize, to_data mjxprotp_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), "resources/mjxproto" ) # please specify your mjxproto dir +result_dir = os.path.join(os.pardir, "suphnx-reward-shaping/result") + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("lr", help="Enter learning rate", type=float) @@ -21,9 +23,10 @@ args = parser.parse_args() - X, Y = to_data(mjxprotp_dir) - X = normalize(X) - Y = normalize(Y) + _X, _Y = to_data(mjxprotp_dir) + print(_X.mean(axis=0), _X.std(axis=0), _Y.mean(axis=0), _Y.std(axis=0)) + X = normalize(_X) + Y = normalize(_Y) train_x = X[: math.floor(len(X) * 0.8)] train_y = Y[: math.floor(len(X) * 0.8)] @@ -39,3 +42,5 @@ params = train(params, optimizer, train_x, train_y, args.epochs, args.batch_size) print(evaluate(params, test_x, test_y, args.batch_size)) + + plot_result(params, _X, _Y, result_dir) diff --git a/workspace/suphnx-reward-shaping/train_helper.py b/workspace/suphnx-reward-shaping/train_helper.py index f2618001..4ce50517 100644 --- a/workspace/suphnx-reward-shaping/train_helper.py +++ b/workspace/suphnx-reward-shaping/train_helper.py @@ -1,8 +1,12 @@ +import os +import pickle +from re import I from typing import Dict, List import jax import jax.nn as nn import jax.numpy as jnp +import matplotlib.pyplot as plt import numpy as np import optax import tensorflow as tf @@ -96,6 +100,32 @@ def evaluate(params: optax.Params, X: jnp.ndarray, Y: jnp.ndarray, batch_size: i dataset = tf.data.Dataset.from_tensor_slices((X, Y)) batched_dataset = dataset.batch(batch_size, drop_remainder=True) cum_loss = 0 - for batch_x, batch_y in batched_dataset: - cum_loss += loss(params, batch_x.numpy(), batch_y.numpy()) + for batched_x, batched_y in batched_dataset: + cum_loss += loss(params, batched_x.numpy(), batched_y.numpy()) + print(cum_loss / len(batched_dataset)) return cum_loss / len(batched_dataset) + + +def save_params(params: optax.Params, save_dir): + with open(save_dir, "wb") as f: + pickle.dump(params, f) + + +def plot_result(params: optax.Params, X, Y, result_dir): + x_mean, x_std = X.mean(axis=0), X.std(axis=0) + y_mean, y_std = Y.mean(axis=0), Y.std(axis=0) + for i in range(8): # 通常の局数分 + log_score = [] + log_pred = [] + for j in range(60): + score_mean, score_std = x_mean[0], x_std[0] + x = jnp.array([(j * 1000 - score_mean) / score_std, 0, 0, i, 0, 0]) + pred = net(x, params) + if i == 7: + print(pred) + print(y_mean, y_std) + log_score.append(j * 1000) + log_pred.append(pred * y_std + y_mean) + plt.plot(log_score, log_pred) + save_dir = os.path.join(result_dir, "prediction_at_round" + str(i) + ".png") + plt.savefig(save_dir) diff --git a/workspace/suphnx-reward-shaping/utils.py b/workspace/suphnx-reward-shaping/utils.py index 7ac64b26..419249b8 100644 --- a/workspace/suphnx-reward-shaping/utils.py +++ b/workspace/suphnx-reward-shaping/utils.py @@ -13,7 +13,7 @@ sys.path.append("../../../") import mjxproto -oka = [90, 40, 0, -130] +game_rewards = [90, 45, 0, -135] def to_data(mjxprotp_dir: str) -> Tuple[jnp.ndarray, jnp.ndarray]: @@ -43,6 +43,10 @@ def normalize(array: jnp.ndarray) -> jnp.ndarray: return (array - mean) / std +def inv_normalize(array: jnp.ndarray, mean: jnp.ndarray, std: jnp.ndarray) -> jnp.ndarray: + return array * std + mean + + def _select_one_round(states: List[mjxproto.State]) -> mjxproto.State: """ データセットに本質的で無い相関が生まれることを防ぐために一半荘につき1ペアのみを使う. @@ -75,4 +79,4 @@ def to_final_scores(states: List[mjxproto.State], target) -> List[int]: target_score = final_scores[target] sorted_scores = sorted(final_scores) rank = sorted_scores.index(target_score) - return [oka[rank]] + return [game_rewards[rank]] From 370054725b48f06509da484466d5a204250ce7c0 Mon Sep 17 00:00:00 2001 From: nissymori Date: Sun, 4 Sep 2022 18:23:30 +0900 Subject: [PATCH 02/12] fix feature --- .../suphnx-reward-shaping/tests/test_utils.py | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/workspace/suphnx-reward-shaping/tests/test_utils.py b/workspace/suphnx-reward-shaping/tests/test_utils.py index 0a6f7cfa..cd85629d 100644 --- a/workspace/suphnx-reward-shaping/tests/test_utils.py +++ b/workspace/suphnx-reward-shaping/tests/test_utils.py @@ -8,13 +8,28 @@ import mjxproto sys.path.append("../") -from utils import to_data +from utils import _preprocess_scores, _to_one_hot, to_data mjxprotp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources") -def test_to_dataset(): +def test_preprocess(): + scores = [0, 100000, 200000, 300000] + print(_preprocess_scores(scores, 1)) + assert _preprocess_scores(scores, 0) == [0, 3, 2, 1] + assert _preprocess_scores(scores, 1) == [1, 0, 3, 2] + assert _preprocess_scores(scores, 2) == [2, 1, 0, 3] + assert _preprocess_scores(scores, 3) == [3, 2, 1, 0] + + +def test_to_data(): num_resources = len(os.listdir(mjxprotp_dir)) features, scores = to_data(mjxprotp_dir) - assert features.shape == (num_resources, 6) + print(features) + assert features.shape == (num_resources, 15) assert scores.shape == (num_resources, 1) + + +if __name__ == "__main__": + test_preprocess() + test_to_data() From a04bc14cdf714de4689fc5dd27faf5186025e2bf Mon Sep 17 00:00:00 2001 From: nissymori Date: Wed, 7 Sep 2022 17:32:51 +0900 Subject: [PATCH 03/12] fix --- .gitignore | 3 + .../tests/test_train_helper.py | 12 ++- .../suphnx-reward-shaping/tests/test_utils.py | 7 +- workspace/suphnx-reward-shaping/train.py | 21 +++-- .../suphnx-reward-shaping/train_helper.py | 51 +++++++----- workspace/suphnx-reward-shaping/utils.py | 77 +++++++++++++------ 6 files changed, 111 insertions(+), 60 deletions(-) diff --git a/.gitignore b/.gitignore index 150c879a..f3ff48bc 100644 --- a/.gitignore +++ b/.gitignore @@ -20,8 +20,11 @@ mjx-py/.vscode/* dist .pytest_cache .cache + .ipynb_checkpoints workspace/suphnx-reward-shaping/resources/* +workspace/suphnx-reward-shaping/trained_model/* +workspace/suphnx-reward-shaping/result/* .DS_Store .vscode/ .python_versions diff --git a/workspace/suphnx-reward-shaping/tests/test_train_helper.py b/workspace/suphnx-reward-shaping/tests/test_train_helper.py index 47e25478..24b9b385 100644 --- a/workspace/suphnx-reward-shaping/tests/test_train_helper.py +++ b/workspace/suphnx-reward-shaping/tests/test_train_helper.py @@ -6,11 +6,11 @@ import optax sys.path.append("../") -from train_helper import evaluate, initializa_params, plot_result, save_params, train +from train_helper import evaluate, initializa_params, net, plot_result, save_params, train from utils import to_data layer_sizes = [3, 4, 5, 1] -feature_size = 6 +feature_size = 15 seed = jax.random.PRNGKey(42) save_dir = os.path.join(os.pardir, "trained_model/test_param.pickle") result_dir = os.path.join(os.pardir, "result") @@ -54,5 +54,11 @@ def test_plot_result(): plot_result(params, featurs, scores, result_dir) +def test_net(): + params = initializa_params(layer_sizes, feature_size, seed) + features, scores = to_data(mjxprotp_dir) + print(net(features[0], params), features, params) + + if __name__ == "__main__": - test_plot_result() + test_net() diff --git a/workspace/suphnx-reward-shaping/tests/test_utils.py b/workspace/suphnx-reward-shaping/tests/test_utils.py index cd85629d..1dae8d70 100644 --- a/workspace/suphnx-reward-shaping/tests/test_utils.py +++ b/workspace/suphnx-reward-shaping/tests/test_utils.py @@ -8,7 +8,7 @@ import mjxproto sys.path.append("../") -from utils import _preprocess_scores, _to_one_hot, to_data +from utils import _preprocess_scores, to_data, to_final_game_reward mjxprotp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources") @@ -28,8 +28,3 @@ def test_to_data(): print(features) assert features.shape == (num_resources, 15) assert scores.shape == (num_resources, 1) - - -if __name__ == "__main__": - test_preprocess() - test_to_data() diff --git a/workspace/suphnx-reward-shaping/train.py b/workspace/suphnx-reward-shaping/train.py index e411802f..57e11071 100644 --- a/workspace/suphnx-reward-shaping/train.py +++ b/workspace/suphnx-reward-shaping/train.py @@ -6,8 +6,8 @@ import jax import jax.numpy as jnp import optax -from train_helper import evaluate, initializa_params, plot_result, train -from utils import normalize, to_data +from train_helper import evaluate, initializa_params, plot_result, save_params, train +from utils import to_data mjxprotp_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), "resources/mjxproto" @@ -15,18 +15,17 @@ result_dir = os.path.join(os.pardir, "suphnx-reward-shaping/result") + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("lr", help="Enter learning rate", type=float) parser.add_argument("epochs", help="Enter epochs", type=int) parser.add_argument("batch_size", help="Enter batch_size", type=int) + parser.add_argument("is_round_one_hot", nargs="?", default="0") args = parser.parse_args() - _X, _Y = to_data(mjxprotp_dir) - print(_X.mean(axis=0), _X.std(axis=0), _Y.mean(axis=0), _Y.std(axis=0)) - X = normalize(_X) - Y = normalize(_Y) + X, Y = to_data(mjxprotp_dir) train_x = X[: math.floor(len(X) * 0.8)] train_y = Y[: math.floor(len(X) * 0.8)] @@ -36,11 +35,17 @@ layer_size = [32, 32, 1] seed = jax.random.PRNGKey(42) - params = initializa_params(layer_size, 6, seed) + if args.is_round_one_hot == "0": + params = initializa_params(layer_size, 15, seed) + else: + params = initializa_params(layer_size, 22, seed) # featureでroundがone-hotになっている. + optimizer = optax.adam(learning_rate=args.lr) params = train(params, optimizer, train_x, train_y, args.epochs, args.batch_size) print(evaluate(params, test_x, test_y, args.batch_size)) - plot_result(params, _X, _Y, result_dir) + save_params(params, result_dir) + + plot_result(params, X, Y, result_dir) diff --git a/workspace/suphnx-reward-shaping/train_helper.py b/workspace/suphnx-reward-shaping/train_helper.py index 4ce50517..a25b8c32 100644 --- a/workspace/suphnx-reward-shaping/train_helper.py +++ b/workspace/suphnx-reward-shaping/train_helper.py @@ -1,5 +1,6 @@ import os import pickle +from cProfile import label from re import I from typing import Dict, List @@ -29,7 +30,7 @@ def initializa_params(layer_sizes: List[int], features: int, seed) -> Dict: key=seed, shape=(features, units), minval=-np.sqrt(6) / np.sqrt(units), - maxval=-np.sqrt(6) / np.sqrt(units), + maxval=np.sqrt(6) / np.sqrt(units), dtype=jnp.float32, ) else: @@ -49,9 +50,10 @@ def relu(x: jnp.ndarray) -> jnp.ndarray: def net(x: jnp.ndarray, params: optax.Params) -> jnp.ndarray: - for k, param in params.items(): + for i, param in enumerate(params.values()): x = jnp.dot(x, param) - x = jax.nn.relu(x) + if i + 1 < len(params.values()): + x = jax.nn.relu(x) return x @@ -61,11 +63,21 @@ def loss(params: optax.Params, batched_x: jnp.ndarray, batched_y: jnp.ndarray) - return loss_value.mean() +@jax.jit +def step(params, opt_state, batch, labels, optimizer): + loss_value, grads = jax.value_and_grad(loss)(params, batch, labels) + updates, opt_state = optimizer.update(grads, opt_state, params) + params = optax.apply_updates(params, updates) + return params, opt_state, loss_value + + def train( params: optax.Params, optimizer: optax.GradientTransformation, - X: jnp.ndarray, - Y: jnp.ndarray, + X_train: jnp.ndarray, + Y_train: jnp.ndarray, + X_test: jnp.ndarray, + Y_test: jnp.ndarray, epochs: int, batch_size: int, buffer_size=3, @@ -73,7 +85,7 @@ def train( """ 学習用の関数. 線形層を前提としており, バッチ処理やシャッフルのためにtensorflowを使っている. """ - dataset = tf.data.Dataset.from_tensor_slices((X, Y)) + dataset = tf.data.Dataset.from_tensor_slices((X_train, Y_train)) batched_dataset = dataset.shuffle(buffer_size=buffer_size).batch( batch_size, drop_remainder=True ) @@ -92,7 +104,9 @@ def step(params, opt_state, batch, labels): params, opt_state, batched_x.numpy(), batched_y.numpy() ) if i % 100 == 0: # print MSE every 100 epochs - print(f"step {i}, loss: {loss_value}") + pred = net(batched_x[0].numpy(), params) + print(f"step {i}, loss: {loss_value}, pred {pred}, actual {batched_y[0]}") + evaluate(params, X_test, Y_test, batch_size) return params @@ -102,30 +116,31 @@ def evaluate(params: optax.Params, X: jnp.ndarray, Y: jnp.ndarray, batch_size: i cum_loss = 0 for batched_x, batched_y in batched_dataset: cum_loss += loss(params, batched_x.numpy(), batched_y.numpy()) - print(cum_loss / len(batched_dataset)) return cum_loss / len(batched_dataset) def save_params(params: optax.Params, save_dir): - with open(save_dir, "wb") as f: + with open(save_dir + "params.pickle", "wb") as f: pickle.dump(params, f) def plot_result(params: optax.Params, X, Y, result_dir): - x_mean, x_std = X.mean(axis=0), X.std(axis=0) - y_mean, y_std = Y.mean(axis=0), Y.std(axis=0) for i in range(8): # 通常の局数分 log_score = [] log_pred = [] for j in range(60): - score_mean, score_std = x_mean[0], x_std[0] - x = jnp.array([(j * 1000 - score_mean) / score_std, 0, 0, i, 0, 0]) + x = jnp.array(_create_data_for_plot(j * 1000, i)) pred = net(x, params) - if i == 7: - print(pred) - print(y_mean, y_std) log_score.append(j * 1000) - log_pred.append(pred * y_std + y_mean) - plt.plot(log_score, log_pred) + log_pred.append(pred * 100) + plt.plot(log_score, log_pred, label="round_" + str(i)) + plt.legend() save_dir = os.path.join(result_dir, "prediction_at_round" + str(i) + ".png") plt.savefig(save_dir) + + +def _create_data_for_plot(score, round) -> List: + scores = [score / 100000] + [(100000 - score) / 300000] * 3 + wind = [1, 0, 0, 0] + oya = [1, 0, 0, 0] + return scores + wind + oya + [round / 7, 0, 0] diff --git a/workspace/suphnx-reward-shaping/utils.py b/workspace/suphnx-reward-shaping/utils.py index 419249b8..94d17196 100644 --- a/workspace/suphnx-reward-shaping/utils.py +++ b/workspace/suphnx-reward-shaping/utils.py @@ -2,7 +2,7 @@ import os import random import sys -from typing import Dict, Iterator, List, Optional, Tuple +from typing import Dict, Iterator, List, Tuple import jax import jax.numpy as jnp @@ -31,22 +31,12 @@ def to_data(mjxprotp_dir: str) -> Tuple[jnp.ndarray, jnp.ndarray]: states = [json_format.ParseDict(d, mjxproto.State()) for d in _dicts] target: int = random.randint(0, 3) features.append(to_feature(states, target)) - scores.append(to_final_scores(states, target)) + scores.append(to_final_game_reward(states, target)) features_array: jnp.ndarray = jnp.array(features) scores_array: jnp.ndarray = jnp.array(scores) return features_array, scores_array -def normalize(array: jnp.ndarray) -> jnp.ndarray: - mean = array.mean(axis=0) - std = array.mean(axis=0) - return (array - mean) / std - - -def inv_normalize(array: jnp.ndarray, mean: jnp.ndarray, std: jnp.ndarray) -> jnp.ndarray: - return array * std + mean - - def _select_one_round(states: List[mjxproto.State]) -> mjxproto.State: """ データセットに本質的で無い相関が生まれることを防ぐために一半荘につき1ペアのみを使う. @@ -56,27 +46,64 @@ def _select_one_round(states: List[mjxproto.State]) -> mjxproto.State: def _calc_curr_pos(init_pos: int, round: int) -> int: - return init_pos + round % 4 + pos = (init_pos + round) % 4 + assert 0 <= pos <= 3 + return pos + + +def _to_one_hot(total_num: int, idx: int) -> List[int]: + _l = [0] * total_num + _l[idx] = 1 + return _l + + +def _clip_round(round: int, lim=7) -> int: + """ + 天鳳ではてんほうでは最長西4局まで行われるが何四局以降はサドンデスなので同一視. + """ + if round < 7: + return round + else: + return 7 + + +def _preprocess_scores(scores, target: int) -> List: + """ + 局終了時の点数を100000で割って自家, 下家, 対面, 上家の順に並び替える. + """ + _self: int = scores[target] / 100000 + _left: int = scores[target - 1] / 100000 + _front: int = scores[target - 2] / 100000 + _right: int = scores[target - 3] / 100000 + return [_self, _left, _front, _right] -def to_feature(states: List[mjxproto.State], target) -> List[int]: +def to_feature(states: List[mjxproto.State], target, is_round_one_hot=False) -> List: """ - 特徴量 = [終了時の点数, 自風, 親, 局, 本場, 詰み棒] + 特徴量 = [4playerの点数, 自風:one-hot, 親:one-hot, 局, 本場, 詰み棒] """ state = _select_one_round(states) - ten: int = state.round_terminal.final_score.tens[target] + scores: List = _preprocess_scores(state.round_terminal.final_score.tens, target) honba: int = state.round_terminal.final_score.honba tsumibo: int = state.round_terminal.final_score.riichi - round: int = state.round_terminal.final_score.round - wind: int = _calc_curr_pos(target, round) - oya: int = _calc_curr_pos(0, round) - return [ten, honba, tsumibo, round, wind, oya] - - -def to_final_scores(states: List[mjxproto.State], target) -> List[int]: + round: int = _clip_round(state.round_terminal.final_score.round) + wind: List[int] = _to_one_hot(4, _calc_curr_pos(target, round)) + oya: List[int] = _to_one_hot(4, _calc_curr_pos(0, round)) + if is_round_one_hot: + one_hot_round: List[int] = _to_one_hot(8, round) + feature = ( + scores + wind + oya + one_hot_round + [honba / 4, tsumibo / 4] + ) # len(feature) = 22 + else: + feature = scores + wind + oya + [round / 7, honba / 4, tsumibo / 4] # len(feature) = 15 + return feature + + +def to_final_game_reward(states: List[mjxproto.State], target) -> List: + """ """ final_state = states[-1] final_scores = final_state.round_terminal.final_score.tens target_score = final_scores[target] - sorted_scores = sorted(final_scores) + sorted_scores = sorted(final_scores, reverse=True) rank = sorted_scores.index(target_score) - return [game_rewards[rank]] + return [game_rewards[rank] / 100] From 0f0525aa8f4bfdace379f2dcb0031cb089a3648a Mon Sep 17 00:00:00 2001 From: nissymori Date: Thu, 8 Sep 2022 15:12:06 +0900 Subject: [PATCH 04/12] fix --- .../tests/test_train_helper.py | 25 +++---- workspace/suphnx-reward-shaping/train.py | 24 ++++-- .../suphnx-reward-shaping/train_helper.py | 75 +++++++++++++------ workspace/suphnx-reward-shaping/utils.py | 4 +- 4 files changed, 82 insertions(+), 46 deletions(-) diff --git a/workspace/suphnx-reward-shaping/tests/test_train_helper.py b/workspace/suphnx-reward-shaping/tests/test_train_helper.py index 24b9b385..24a9fc93 100644 --- a/workspace/suphnx-reward-shaping/tests/test_train_helper.py +++ b/workspace/suphnx-reward-shaping/tests/test_train_helper.py @@ -6,7 +6,7 @@ import optax sys.path.append("../") -from train_helper import evaluate, initializa_params, net, plot_result, save_params, train +from train_helper import initializa_params, net, plot_result, save_params, train from utils import to_data layer_sizes = [3, 4, 5, 1] @@ -25,33 +25,28 @@ def test_initialize_params(): def test_train(): params = initializa_params(layer_sizes, feature_size, seed) - featurs, scores = to_data(mjxprotp_dir) + features, scores = to_data(mjxprotp_dir) optimizer = optax.adam(0.05) - params = train(params, optimizer, featurs, scores, epochs=1, batch_size=1) + params, train_log, test_log = train( + params, optimizer, features, scores, features, scores, epochs=1, batch_size=1 + ) assert len(params) == 4 -def test_evaluate(): - params = initializa_params(layer_sizes, feature_size, seed) - featurs, scores = to_data(mjxprotp_dir) - loss = evaluate(params, featurs, scores, batch_size=2) - assert loss >= 0 - - def test_save_model(): params = initializa_params(layer_sizes, feature_size, seed) - featurs, scores = to_data(mjxprotp_dir) + features, scores = to_data(mjxprotp_dir) optimizer = optax.adam(0.05) - params = train(params, optimizer, featurs, scores, epochs=1, batch_size=1) + params = train(params, optimizer, features, scores, features, scores, epochs=1, batch_size=1) save_params(params, save_dir) def test_plot_result(): params = initializa_params(layer_sizes, feature_size, seed) - featurs, scores = to_data(mjxprotp_dir) + features, scores = to_data(mjxprotp_dir) optimizer = optax.adam(0.05) - params = train(params, optimizer, featurs, scores, epochs=1, batch_size=1) - plot_result(params, featurs, scores, result_dir) + params = train(params, optimizer, features, scores, features, scores, epochs=1, batch_size=1) + plot_result(params, features, scores, result_dir) def test_net(): diff --git a/workspace/suphnx-reward-shaping/train.py b/workspace/suphnx-reward-shaping/train.py index 57e11071..ea19909e 100644 --- a/workspace/suphnx-reward-shaping/train.py +++ b/workspace/suphnx-reward-shaping/train.py @@ -1,12 +1,13 @@ import argparse import math import os -import sys +import pickle import jax import jax.numpy as jnp +import matplotlib.pyplot as plt import optax -from train_helper import evaluate, initializa_params, plot_result, save_params, train +from train_helper import initializa_params, plot_result, save_params, train from utils import to_data mjxprotp_dir = os.path.join( @@ -22,10 +23,16 @@ parser.add_argument("epochs", help="Enter epochs", type=int) parser.add_argument("batch_size", help="Enter batch_size", type=int) parser.add_argument("is_round_one_hot", nargs="?", default="0") + parser.add_argument("--use_saved_data", nargs="?", default="0") args = parser.parse_args() - - X, Y = to_data(mjxprotp_dir) + if args.use_saved_data == "0": + X, Y = to_data(mjxprotp_dir) + jnp.save(os.path.join(result_dir, "features"), X) + jnp.save(os.path.join(result_dir, "labels"), Y) + else: + X: jnp.ndarray = jnp.load(os.path.join(result_dir, "features.npy")) + Y: jnp.ndarray = jnp.load(os.path.join(result_dir, "labels.npy")) train_x = X[: math.floor(len(X) * 0.8)] train_y = Y[: math.floor(len(X) * 0.8)] @@ -42,9 +49,14 @@ optimizer = optax.adam(learning_rate=args.lr) - params = train(params, optimizer, train_x, train_y, args.epochs, args.batch_size) + params, train_log, test_log = train( + params, optimizer, train_x, train_y, test_x, test_y, args.epochs, args.batch_size + ) - print(evaluate(params, test_x, test_y, args.batch_size)) + plt.plot(train_log, label="train") + plt.plot(test_log, label="val") + plt.legend() + plt.savefig(os.path.join(result_dir, "log/leaning_curve.png")) save_params(params, result_dir) diff --git a/workspace/suphnx-reward-shaping/train_helper.py b/workspace/suphnx-reward-shaping/train_helper.py index a25b8c32..22264fa2 100644 --- a/workspace/suphnx-reward-shaping/train_helper.py +++ b/workspace/suphnx-reward-shaping/train_helper.py @@ -2,7 +2,7 @@ import pickle from cProfile import label from re import I -from typing import Dict, List +from typing import Dict, List, Optional import jax import jax.nn as nn @@ -59,16 +59,35 @@ def net(x: jnp.ndarray, params: optax.Params) -> jnp.ndarray: def loss(params: optax.Params, batched_x: jnp.ndarray, batched_y: jnp.ndarray) -> jnp.ndarray: preds = net(batched_x, params) - loss_value = optax.l2_loss(preds, batched_y).sum(axis=-1) + loss_value = optax.l2_loss(preds, batched_y) return loss_value.mean() -@jax.jit -def step(params, opt_state, batch, labels, optimizer): - loss_value, grads = jax.value_and_grad(loss)(params, batch, labels) - updates, opt_state = optimizer.update(grads, opt_state, params) - params = optax.apply_updates(params, updates) - return params, opt_state, loss_value +def train_one_step(params: optax.Params, opt_state, batched_dataset, optimizer, epoch): + @jax.jit + def step(params: optax.Params, opt_state, batch, labels): + loss_value, grads = jax.value_and_grad(loss)(params, batch, labels) + updates, opt_state = optimizer.update(grads, opt_state, params) + params = optax.apply_updates(params, updates) + return params, opt_state, loss_value + + cum_loss = 0 + for batched_x, batched_y in batched_dataset: + params, opt_state, loss_value = step( + params, opt_state, batched_x.numpy(), batched_y.numpy(), optimizer + ) + cum_loss += loss_value + if epoch % 100 == 0: # print MSE every 100 epochs + pred = net(batched_x[0].numpy(), params) + print(f"step {epoch}, pred {pred}, actual {batched_y[0]}") + return params, cum_loss / len(batched_dataset) + + +def evaluate_one_step(params: optax.Params, batched_dataset) -> float: + cum_loss = 0 + for batched_x, batched_y in batched_dataset: + cum_loss += loss(params, batched_x.numpy(), batched_y.numpy()) + return cum_loss / len(batched_dataset) def train( @@ -85,12 +104,16 @@ def train( """ 学習用の関数. 線形層を前提としており, バッチ処理やシャッフルのためにtensorflowを使っている. """ - dataset = tf.data.Dataset.from_tensor_slices((X_train, Y_train)) - batched_dataset = dataset.shuffle(buffer_size=buffer_size).batch( + dataset_train = tf.data.Dataset.from_tensor_slices((X_train, Y_train)) + batched_dataset_train = dataset_train.shuffle(buffer_size=buffer_size).batch( batch_size, drop_remainder=True ) + dataset_test = tf.data.Dataset.from_tensor_slices((X_test, Y_test)) + batched_dataset_test = dataset_test.batch(batch_size, drop_remainder=True) opt_state = optimizer.init(params) + train_log, test_log = [], [] + @jax.jit def step(params, opt_state, batch, labels): loss_value, grads = jax.value_and_grad(loss)(params, batch, labels) @@ -99,24 +122,23 @@ def step(params, opt_state, batch, labels): return params, opt_state, loss_value for i in range(epochs): - for batched_x, batched_y in batched_dataset: + cum_loss = 0 + for batched_x, batched_y in batched_dataset_train: params, opt_state, loss_value = step( params, opt_state, batched_x.numpy(), batched_y.numpy() ) + cum_loss += loss_value if i % 100 == 0: # print MSE every 100 epochs pred = net(batched_x[0].numpy(), params) print(f"step {i}, loss: {loss_value}, pred {pred}, actual {batched_y[0]}") - evaluate(params, X_test, Y_test, batch_size) - return params + mean_train_loss = cum_loss / len(batched_dataset_train) + mean_test_loss = evaluate_one_step(params, batched_dataset_test) -def evaluate(params: optax.Params, X: jnp.ndarray, Y: jnp.ndarray, batch_size: int) -> float: - dataset = tf.data.Dataset.from_tensor_slices((X, Y)) - batched_dataset = dataset.batch(batch_size, drop_remainder=True) - cum_loss = 0 - for batched_x, batched_y in batched_dataset: - cum_loss += loss(params, batched_x.numpy(), batched_y.numpy()) - return cum_loss / len(batched_dataset) + # record mean of train loss and test loss per epoch + train_log.append(float(np.array(mean_train_loss).item(0))) + test_log.append(float(np.array(mean_test_loss).item(0))) + return params, train_log, test_log def save_params(params: optax.Params, save_dir): @@ -124,12 +146,12 @@ def save_params(params: optax.Params, save_dir): pickle.dump(params, f) -def plot_result(params: optax.Params, X, Y, result_dir): +def plot_result(params: optax.Params, X, Y, result_dir, is_round_one_hot=False): for i in range(8): # 通常の局数分 log_score = [] log_pred = [] for j in range(60): - x = jnp.array(_create_data_for_plot(j * 1000, i)) + x = jnp.array(_create_data_for_plot(j * 1000, i, is_round_one_hot)) pred = net(x, params) log_score.append(j * 1000) log_pred.append(pred * 100) @@ -139,8 +161,13 @@ def plot_result(params: optax.Params, X, Y, result_dir): plt.savefig(save_dir) -def _create_data_for_plot(score, round) -> List: +def _create_data_for_plot(score, round, is_round_one_hot) -> List: scores = [score / 100000] + [(100000 - score) / 300000] * 3 wind = [1, 0, 0, 0] oya = [1, 0, 0, 0] - return scores + wind + oya + [round / 7, 0, 0] + if is_round_one_hot: + rounds = [0] * 8 + rounds[round] = 1 + return scores + wind + oya + rounds + [0, 0] + else: + return scores + wind + oya + [round / 7, 0, 0] diff --git a/workspace/suphnx-reward-shaping/utils.py b/workspace/suphnx-reward-shaping/utils.py index 94d17196..ee32dea0 100644 --- a/workspace/suphnx-reward-shaping/utils.py +++ b/workspace/suphnx-reward-shaping/utils.py @@ -100,7 +100,9 @@ def to_feature(states: List[mjxproto.State], target, is_round_one_hot=False) -> def to_final_game_reward(states: List[mjxproto.State], target) -> List: - """ """ + """ + 順位点. + """ final_state = states[-1] final_scores = final_state.round_terminal.final_score.tens target_score = final_scores[target] From 992b01e20ed1ad93455e02359b8bc12c715d1efa Mon Sep 17 00:00:00 2001 From: nissymori Date: Thu, 8 Sep 2022 19:12:33 +0900 Subject: [PATCH 05/12] fix --- workspace/suphnx-reward-shaping/train.py | 23 +++++++++++--- .../suphnx-reward-shaping/train_helper.py | 5 ++- workspace/suphnx-reward-shaping/utils.py | 31 ++++++++++++++----- 3 files changed, 45 insertions(+), 14 deletions(-) diff --git a/workspace/suphnx-reward-shaping/train.py b/workspace/suphnx-reward-shaping/train.py index ea19909e..c7a8167d 100644 --- a/workspace/suphnx-reward-shaping/train.py +++ b/workspace/suphnx-reward-shaping/train.py @@ -24,15 +24,28 @@ parser.add_argument("batch_size", help="Enter batch_size", type=int) parser.add_argument("is_round_one_hot", nargs="?", default="0") parser.add_argument("--use_saved_data", nargs="?", default="0") + parser.add_argument("--round_candidates", type=int, default=None) args = parser.parse_args() if args.use_saved_data == "0": - X, Y = to_data(mjxprotp_dir) - jnp.save(os.path.join(result_dir, "features"), X) - jnp.save(os.path.join(result_dir, "labels"), Y) + X, Y = to_data(mjxprotp_dir, round_candidates=[args.round_candidates]) + if args.round_candidates: + jnp.save(os.path.join(result_dir, "features" + str(args.round_candidates)), X) + jnp.save(os.path.join(result_dir, "labels" + str(args.round_candidates)), Y) + else: + jnp.save(os.path.join(result_dir, "features"), X) + jnp.save(os.path.join(result_dir, "labels"), Y) else: - X: jnp.ndarray = jnp.load(os.path.join(result_dir, "features.npy")) - Y: jnp.ndarray = jnp.load(os.path.join(result_dir, "labels.npy")) + if args.round_candidates: + X: jnp.ndarray = jnp.load( + os.path.join(result_dir, "features" + str(args.round_candidates) + ".npy") + ) + Y: jnp.ndarray = jnp.load( + os.path.join(result_dir, "labels" + str(args.round_candidates) + ".npy") + ) + else: + X: jnp.ndarray = jnp.load(os.path.join(result_dir, "features.npy")) + Y: jnp.ndarray = jnp.load(os.path.join(result_dir, "labels.npy")) train_x = X[: math.floor(len(X) * 0.8)] train_y = Y[: math.floor(len(X) * 0.8)] diff --git a/workspace/suphnx-reward-shaping/train_helper.py b/workspace/suphnx-reward-shaping/train_helper.py index 22264fa2..4d57a9e5 100644 --- a/workspace/suphnx-reward-shaping/train_helper.py +++ b/workspace/suphnx-reward-shaping/train_helper.py @@ -147,6 +147,8 @@ def save_params(params: optax.Params, save_dir): def plot_result(params: optax.Params, X, Y, result_dir, is_round_one_hot=False): + fig = plt.figure(figsize=(10, 5)) + axes = fig.subplots(1, 2) for i in range(8): # 通常の局数分 log_score = [] log_pred = [] @@ -155,7 +157,8 @@ def plot_result(params: optax.Params, X, Y, result_dir, is_round_one_hot=False): pred = net(x, params) log_score.append(j * 1000) log_pred.append(pred * 100) - plt.plot(log_score, log_pred, label="round_" + str(i)) + axes[0].plot(log_score, log_pred, label="round_" + str(i)) + axes[1].plot(log_score, log_pred, ".", label="round_" + str(i)) plt.legend() save_dir = os.path.join(result_dir, "prediction_at_round" + str(i) + ".png") plt.savefig(save_dir) diff --git a/workspace/suphnx-reward-shaping/utils.py b/workspace/suphnx-reward-shaping/utils.py index ee32dea0..5cc2e923 100644 --- a/workspace/suphnx-reward-shaping/utils.py +++ b/workspace/suphnx-reward-shaping/utils.py @@ -2,7 +2,7 @@ import os import random import sys -from typing import Dict, Iterator, List, Tuple +from typing import Dict, Iterator, List, Optional, Tuple import jax import jax.numpy as jnp @@ -16,7 +16,9 @@ game_rewards = [90, 45, 0, -135] -def to_data(mjxprotp_dir: str) -> Tuple[jnp.ndarray, jnp.ndarray]: +def to_data( + mjxprotp_dir: str, round_candidates: Optional[List[int]] = None +) -> Tuple[jnp.ndarray, jnp.ndarray]: """ jsonが入っているディレクトリを引数としてjax.numpyのデータセットを作る. """ @@ -30,19 +32,27 @@ def to_data(mjxprotp_dir: str) -> Tuple[jnp.ndarray, jnp.ndarray]: _dicts = [json.loads(round) for round in lines] states = [json_format.ParseDict(d, mjxproto.State()) for d in _dicts] target: int = random.randint(0, 3) - features.append(to_feature(states, target)) + features.append(to_feature(states, target, round_candidates=round_candidates)) scores.append(to_final_game_reward(states, target)) features_array: jnp.ndarray = jnp.array(features) scores_array: jnp.ndarray = jnp.array(scores) return features_array, scores_array -def _select_one_round(states: List[mjxproto.State]) -> mjxproto.State: +def _select_one_round( + states: List[mjxproto.State], candidates: Optional[List[int]] = None +) -> mjxproto.State: """ データセットに本質的で無い相関が生まれることを防ぐために一半荘につき1ペアのみを使う. """ - idx: int = random.randint(0, len(states) - 1) - return states[idx] + if candidates: + if min(candidates) > len(states) - 1: # 候補のと対応する局がない場合, 一番近いものを返す. + return states[len(states) - 1] + idx = random.choice(candidates) + return states[idx] + else: + idx: int = random.randint(0, len(states) - 1) + return states[idx] def _calc_curr_pos(init_pos: int, round: int) -> int: @@ -78,11 +88,16 @@ def _preprocess_scores(scores, target: int) -> List: return [_self, _left, _front, _right] -def to_feature(states: List[mjxproto.State], target, is_round_one_hot=False) -> List: +def to_feature( + states: List[mjxproto.State], + target, + is_round_one_hot=False, + round_candidates: Optional[List[int]] = None, +) -> List: """ 特徴量 = [4playerの点数, 自風:one-hot, 親:one-hot, 局, 本場, 詰み棒] """ - state = _select_one_round(states) + state = _select_one_round(states, candidates=round_candidates) scores: List = _preprocess_scores(state.round_terminal.final_score.tens, target) honba: int = state.round_terminal.final_score.honba tsumibo: int = state.round_terminal.final_score.riichi From 8d35afd4c4e045a10bbc5512bf829fbcf2a76a5d Mon Sep 17 00:00:00 2001 From: nissymori Date: Thu, 8 Sep 2022 19:53:04 +0900 Subject: [PATCH 06/12] add readme --- workspace/suphnx-reward-shaping/README.md | 30 +++++++++++++++++++++++ workspace/suphnx-reward-shaping/train.py | 11 +++++++++ 2 files changed, 41 insertions(+) create mode 100644 workspace/suphnx-reward-shaping/README.md diff --git a/workspace/suphnx-reward-shaping/README.md b/workspace/suphnx-reward-shaping/README.md new file mode 100644 index 00000000..53ef36ea --- /dev/null +++ b/workspace/suphnx-reward-shaping/README.md @@ -0,0 +1,30 @@ +## Suphnx-like reward shaping + +## How to train the model + +Prepare the directories for data and result under this directory. After that, we can train the model thorough cli. + +``` +$python train.py 0.001 10 16 --use_saved_data 0 --data_path resources/mjxproto --result_path result. +``` + +Here is the information about argument. + +The first three are learning rate, epochs, batch size respectively. + +`--use_saved_data` 0 means not to use saved data and other than 0 means otherwise. The default is 0. + +`--round_candidates` We can specify rounds to use for training by this argument. + +`--data_path` Please specify the data path. + +`--result_path` Please specify the result path. + + + + + + + + + diff --git a/workspace/suphnx-reward-shaping/train.py b/workspace/suphnx-reward-shaping/train.py index c7a8167d..9ff1867f 100644 --- a/workspace/suphnx-reward-shaping/train.py +++ b/workspace/suphnx-reward-shaping/train.py @@ -25,8 +25,19 @@ parser.add_argument("is_round_one_hot", nargs="?", default="0") parser.add_argument("--use_saved_data", nargs="?", default="0") parser.add_argument("--round_candidates", type=int, default=None) + parser.add_argument("--data_path", default="resources/mjxproto") + parser.add_argument("--result_path", default="result") args = parser.parse_args() + + mjxprotp_dir = os.path.join( + os.path.dirname(os.path.abspath(__file__)), args.data_path + ) # please specify your mjxproto dir + + result_dir = os.path.join( + os.path.dirname(os.path.abspath(__file__)), args.result_path + ) # please specify your mjxproto dir + if args.use_saved_data == "0": X, Y = to_data(mjxprotp_dir, round_candidates=[args.round_candidates]) if args.round_candidates: From 3e4f326daf82ff6bb464071b8cb5ab2f5080dac9 Mon Sep 17 00:00:00 2001 From: nissymori Date: Thu, 8 Sep 2022 20:00:30 +0900 Subject: [PATCH 07/12] fix --- workspace/suphnx-reward-shaping/train.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/workspace/suphnx-reward-shaping/train.py b/workspace/suphnx-reward-shaping/train.py index 9ff1867f..35bbff68 100644 --- a/workspace/suphnx-reward-shaping/train.py +++ b/workspace/suphnx-reward-shaping/train.py @@ -10,13 +10,6 @@ from train_helper import initializa_params, plot_result, save_params, train from utils import to_data -mjxprotp_dir = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "resources/mjxproto" -) # please specify your mjxproto dir - -result_dir = os.path.join(os.pardir, "suphnx-reward-shaping/result") - - if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("lr", help="Enter learning rate", type=float) @@ -30,13 +23,9 @@ args = parser.parse_args() - mjxprotp_dir = os.path.join( - os.path.dirname(os.path.abspath(__file__)), args.data_path - ) # please specify your mjxproto dir + mjxprotp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.data_path) - result_dir = os.path.join( - os.path.dirname(os.path.abspath(__file__)), args.result_path - ) # please specify your mjxproto dir + result_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.result_path) if args.use_saved_data == "0": X, Y = to_data(mjxprotp_dir, round_candidates=[args.round_candidates]) From 77704e97b69462dc76a64f895bf613ef31d7d699 Mon Sep 17 00:00:00 2001 From: nissymori Date: Thu, 15 Sep 2022 17:51:22 +0900 Subject: [PATCH 08/12] fix features --- workspace/suphnx-reward-shaping/train.py | 2 +- .../suphnx-reward-shaping/train_helper.py | 8 ++++-- workspace/suphnx-reward-shaping/utils.py | 28 +++++++++++++++---- 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/workspace/suphnx-reward-shaping/train.py b/workspace/suphnx-reward-shaping/train.py index 35bbff68..6fcd9d1c 100644 --- a/workspace/suphnx-reward-shaping/train.py +++ b/workspace/suphnx-reward-shaping/train.py @@ -73,4 +73,4 @@ save_params(params, result_dir) - plot_result(params, X, Y, result_dir) + plot_result(params, X, Y, result_dir, round_candidates=[args.round_candidates]) diff --git a/workspace/suphnx-reward-shaping/train_helper.py b/workspace/suphnx-reward-shaping/train_helper.py index 4d57a9e5..437de3a5 100644 --- a/workspace/suphnx-reward-shaping/train_helper.py +++ b/workspace/suphnx-reward-shaping/train_helper.py @@ -146,10 +146,14 @@ def save_params(params: optax.Params, save_dir): pickle.dump(params, f) -def plot_result(params: optax.Params, X, Y, result_dir, is_round_one_hot=False): +def plot_result( + params: optax.Params, X, Y, result_dir, is_round_one_hot=False, round_candidates=None +): fig = plt.figure(figsize=(10, 5)) axes = fig.subplots(1, 2) - for i in range(8): # 通常の局数分 + if not round_candidates: + round_candidates = [i for i in range(8)] + for i in round_candidates: # 通常の局数分 log_score = [] log_pred = [] for j in range(60): diff --git a/workspace/suphnx-reward-shaping/utils.py b/workspace/suphnx-reward-shaping/utils.py index 5cc2e923..57ab4fbe 100644 --- a/workspace/suphnx-reward-shaping/utils.py +++ b/workspace/suphnx-reward-shaping/utils.py @@ -61,6 +61,15 @@ def _calc_curr_pos(init_pos: int, round: int) -> int: return pos +def _calc_wind(init_pos: int, round: int) -> int: + pos = (init_pos + round) % 4 + if pos == 1: + return 3 + if pos == 3: + return 1 + return pos + + def _to_one_hot(total_num: int, idx: int) -> List[int]: _l = [0] * total_num _l[idx] = 1 @@ -88,6 +97,10 @@ def _preprocess_scores(scores, target: int) -> List: return [_self, _left, _front, _right] +def _remaining_oya(round: int): # 局終了時の残りの親の数 + return [2 - (round // 4 + ((round % 4) >= i)) for i in range(4)] + + def to_feature( states: List[mjxproto.State], target, @@ -95,22 +108,25 @@ def to_feature( round_candidates: Optional[List[int]] = None, ) -> List: """ - 特徴量 = [4playerの点数, 自風:one-hot, 親:one-hot, 局, 本場, 詰み棒] + 特徴量 = [4playerの点数, 起家の風:one-hot, 親:one-hot, 残りの親の数, 局, 本場, 詰み棒] """ state = _select_one_round(states, candidates=round_candidates) - scores: List = _preprocess_scores(state.round_terminal.final_score.tens, target) + scores: List = [i / 100000 for i in state.round_terminal.final_score.tens] honba: int = state.round_terminal.final_score.honba tsumibo: int = state.round_terminal.final_score.riichi round: int = _clip_round(state.round_terminal.final_score.round) - wind: List[int] = _to_one_hot(4, _calc_curr_pos(target, round)) + wind: List[int] = _to_one_hot(4, _calc_wind(0, round)) # 起家の風のみを入力 oya: List[int] = _to_one_hot(4, _calc_curr_pos(0, round)) + remainning_oya = _remaining_oya(round) if is_round_one_hot: one_hot_round: List[int] = _to_one_hot(8, round) feature = ( - scores + wind + oya + one_hot_round + [honba / 4, tsumibo / 4] - ) # len(feature) = 22 + scores + wind + oya + remainning_oya + one_hot_round + [honba / 4, tsumibo / 4] + ) # len(feature) = 26 else: - feature = scores + wind + oya + [round / 7, honba / 4, tsumibo / 4] # len(feature) = 15 + feature = ( + scores + wind + oya + remainning_oya + [round / 7, honba / 4, tsumibo / 4] + ) # len(feature) = 19 return feature From 9bd627af06d6bf739787d5b78412e14ff9cf1583 Mon Sep 17 00:00:00 2001 From: nissymori Date: Thu, 15 Sep 2022 17:54:54 +0900 Subject: [PATCH 09/12] fix --- workspace/suphnx-reward-shaping/utils.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/workspace/suphnx-reward-shaping/utils.py b/workspace/suphnx-reward-shaping/utils.py index 57ab4fbe..868134de 100644 --- a/workspace/suphnx-reward-shaping/utils.py +++ b/workspace/suphnx-reward-shaping/utils.py @@ -31,9 +31,9 @@ def to_data( lines = f.readlines() _dicts = [json.loads(round) for round in lines] states = [json_format.ParseDict(d, mjxproto.State()) for d in _dicts] - target: int = random.randint(0, 3) - features.append(to_feature(states, target, round_candidates=round_candidates)) - scores.append(to_final_game_reward(states, target)) + + features.append(to_feature(states, round_candidates=round_candidates)) + scores.append(to_final_game_reward(states)) features_array: jnp.ndarray = jnp.array(features) scores_array: jnp.ndarray = jnp.array(scores) return features_array, scores_array @@ -103,7 +103,6 @@ def _remaining_oya(round: int): # 局終了時の残りの親の数 def to_feature( states: List[mjxproto.State], - target, is_round_one_hot=False, round_candidates: Optional[List[int]] = None, ) -> List: @@ -130,13 +129,12 @@ def to_feature( return feature -def to_final_game_reward(states: List[mjxproto.State], target) -> List: +def to_final_game_reward(states: List[mjxproto.State]) -> List: """ - 順位点. + 順位点. 起家から順番に. 4次元. """ final_state = states[-1] final_scores = final_state.round_terminal.final_score.tens - target_score = final_scores[target] sorted_scores = sorted(final_scores, reverse=True) - rank = sorted_scores.index(target_score) - return [game_rewards[rank] / 100] + ranks = [sorted_scores.index(final_scores[i]) for i in range(4)] + return [game_rewards[i] / 100 for i in ranks] From 55cf17c7e882a173883af15812d77f874f9ec233 Mon Sep 17 00:00:00 2001 From: nissymori Date: Fri, 16 Sep 2022 00:04:59 +0900 Subject: [PATCH 10/12] fix --- .gitignore | 2 +- workspace/.DS_Store | Bin 6148 -> 6148 bytes .../tests/test_train_helper.py | 13 +++++++-- .../suphnx-reward-shaping/tests/test_utils.py | 27 +++++++++++++++--- workspace/suphnx-reward-shaping/train.py | 6 ++-- workspace/suphnx-reward-shaping/utils.py | 15 ++++++---- 6 files changed, 46 insertions(+), 17 deletions(-) diff --git a/.gitignore b/.gitignore index f3ff48bc..b73a88ba 100644 --- a/.gitignore +++ b/.gitignore @@ -27,4 +27,4 @@ workspace/suphnx-reward-shaping/trained_model/* workspace/suphnx-reward-shaping/result/* .DS_Store .vscode/ -.python_versions +.python_version diff --git a/workspace/.DS_Store b/workspace/.DS_Store index 304a472acf898c08aaed3fe1a50add69c96672e5..13486b380e70182d3e804578f1c12d2f327a7ee3 100644 GIT binary patch delta 164 zcmZoMXfc=|#>B!ku~2NHo+2ar#(>?7iyN4k7}+QPWzySh#iYR~kdtm0oSdIqzyJj4 zPxB){G9tP8E-pzq`AI-Aj(#CK?Nyma9H9~^SS17sG7!dOKM!C4Tad)OlyNgV2R{eU b4Vw#@zcWwf7jfiZWME(d*|s@CWDPR_!=Nt& delta 71 zcmZoMXfc=|#>B)qu~2NHo+2aj#(>?7jLeh&vgmELV$opSoW!=2abW}VW_AvK4xqBl Zf*jwOC-aLqaxee^BLf4=<_M8B%mAo65f}gf diff --git a/workspace/suphnx-reward-shaping/tests/test_train_helper.py b/workspace/suphnx-reward-shaping/tests/test_train_helper.py index 24a9fc93..c7260aa4 100644 --- a/workspace/suphnx-reward-shaping/tests/test_train_helper.py +++ b/workspace/suphnx-reward-shaping/tests/test_train_helper.py @@ -6,11 +6,11 @@ import optax sys.path.append("../") -from train_helper import initializa_params, net, plot_result, save_params, train +from train_helper import initializa_params, loss, net, plot_result, save_params, train from utils import to_data -layer_sizes = [3, 4, 5, 1] -feature_size = 15 +layer_sizes = [3, 4, 5, 4] +feature_size = 19 seed = jax.random.PRNGKey(42) save_dir = os.path.join(os.pardir, "trained_model/test_param.pickle") result_dir = os.path.join(os.pardir, "result") @@ -55,5 +55,12 @@ def test_net(): print(net(features[0], params), features, params) +def test_loss(): + params = initializa_params(layer_sizes, feature_size, seed) + features, scores = to_data(mjxprotp_dir) + print(loss(params, features, scores)) + + if __name__ == "__main__": test_net() + test_loss() diff --git a/workspace/suphnx-reward-shaping/tests/test_utils.py b/workspace/suphnx-reward-shaping/tests/test_utils.py index 1dae8d70..e61a98d6 100644 --- a/workspace/suphnx-reward-shaping/tests/test_utils.py +++ b/workspace/suphnx-reward-shaping/tests/test_utils.py @@ -8,7 +8,7 @@ import mjxproto sys.path.append("../") -from utils import _preprocess_scores, to_data, to_final_game_reward +from utils import _calc_wind, _preprocess_scores, to_data, to_final_game_reward mjxprotp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources") @@ -22,9 +22,28 @@ def test_preprocess(): assert _preprocess_scores(scores, 3) == [3, 2, 1, 0] +def test_calc_wind(): + assert _calc_wind(1, 0) == 1 + assert _calc_wind(1, 3) == 2 + + +def test_to_final_game_reward(): + _dir = os.path.join(mjxprotp_dir, os.listdir(mjxprotp_dir)[0]) + with open(_dir, "r") as f: + lines = f.readlines() + _dicts = [json.loads(round) for round in lines] + states = [json_format.ParseDict(d, mjxproto.State()) for d in _dicts] + assert to_final_game_reward(states) == [0.9, 0.0, -1.35, 0.45] + + def test_to_data(): num_resources = len(os.listdir(mjxprotp_dir)) features, scores = to_data(mjxprotp_dir) - print(features) - assert features.shape == (num_resources, 15) - assert scores.shape == (num_resources, 1) + assert features.shape == (num_resources, 19) + assert scores.shape == (num_resources, 4) + + +if __name__ == "__main__": + test_to_data() + test_to_final_game_reward() + test_calc_wind() diff --git a/workspace/suphnx-reward-shaping/train.py b/workspace/suphnx-reward-shaping/train.py index 6fcd9d1c..30b44242 100644 --- a/workspace/suphnx-reward-shaping/train.py +++ b/workspace/suphnx-reward-shaping/train.py @@ -52,13 +52,13 @@ test_x = X[math.floor(len(X) * 0.8) :] test_y = Y[math.floor(len(X) * 0.8) :] - layer_size = [32, 32, 1] + layer_size = [32, 32, 4] seed = jax.random.PRNGKey(42) if args.is_round_one_hot == "0": - params = initializa_params(layer_size, 15, seed) + params = initializa_params(layer_size, 19, seed) else: - params = initializa_params(layer_size, 22, seed) # featureでroundがone-hotになっている. + params = initializa_params(layer_size, 26, seed) # featureでroundがone-hotになっている. optimizer = optax.adam(learning_rate=args.lr) diff --git a/workspace/suphnx-reward-shaping/utils.py b/workspace/suphnx-reward-shaping/utils.py index 868134de..9c51c644 100644 --- a/workspace/suphnx-reward-shaping/utils.py +++ b/workspace/suphnx-reward-shaping/utils.py @@ -17,7 +17,7 @@ def to_data( - mjxprotp_dir: str, round_candidates: Optional[List[int]] = None + mjxprotp_dir: str, round_candidates: Optional[List[int]] = None, model=None, use_model=False ) -> Tuple[jnp.ndarray, jnp.ndarray]: """ jsonが入っているディレクトリを引数としてjax.numpyのデータセットを作る. @@ -31,9 +31,12 @@ def to_data( lines = f.readlines() _dicts = [json.loads(round) for round in lines] states = [json_format.ParseDict(d, mjxproto.State()) for d in _dicts] - - features.append(to_feature(states, round_candidates=round_candidates)) - scores.append(to_final_game_reward(states)) + features = to_feature(states, round_candidates=round_candidates) + features.append(features) + if use_model: + scores.append(model(jnp.array(features))) + else: + scores.append(to_final_game_reward(states)) features_array: jnp.ndarray = jnp.array(features) scores_array: jnp.ndarray = jnp.array(scores) return features_array, scores_array @@ -56,13 +59,13 @@ def _select_one_round( def _calc_curr_pos(init_pos: int, round: int) -> int: - pos = (init_pos + round) % 4 + pos = (-init_pos + round) % 4 assert 0 <= pos <= 3 return pos def _calc_wind(init_pos: int, round: int) -> int: - pos = (init_pos + round) % 4 + pos = (-init_pos + round) % 4 if pos == 1: return 3 if pos == 3: From 0b89531d25f253089674a12145e245ad317d865f Mon Sep 17 00:00:00 2001 From: nissymori Date: Fri, 16 Sep 2022 19:11:01 +0900 Subject: [PATCH 11/12] fix typo --- .gitignore | 6 +++--- .../README.md | 0 workspace/suphx-reward-shaping/tests/.python-version | 1 + .../tests/resources/2022060100gm-00a9-0000-3e8b8aaf.json | 0 .../tests/resources/2022060100gm-00a9-0000-3ffa4858.json | 0 .../tests/resources/2022060100gm-00a9-0000-6db179be.json | 0 .../tests/resources/2022060100gm-00a9-0000-7c8869db.json | 0 .../tests/test_train_helper.py | 0 .../tests/test_utils.py | 0 .../train.py | 0 .../train_helper.py | 0 .../utils.py | 0 12 files changed, 4 insertions(+), 3 deletions(-) rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/README.md (100%) create mode 100644 workspace/suphx-reward-shaping/tests/.python-version rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/tests/resources/2022060100gm-00a9-0000-3e8b8aaf.json (100%) rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/tests/resources/2022060100gm-00a9-0000-3ffa4858.json (100%) rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/tests/resources/2022060100gm-00a9-0000-6db179be.json (100%) rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/tests/resources/2022060100gm-00a9-0000-7c8869db.json (100%) rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/tests/test_train_helper.py (100%) rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/tests/test_utils.py (100%) rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/train.py (100%) rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/train_helper.py (100%) rename workspace/{suphnx-reward-shaping => suphx-reward-shaping}/utils.py (100%) diff --git a/.gitignore b/.gitignore index b73a88ba..751f4b68 100644 --- a/.gitignore +++ b/.gitignore @@ -22,9 +22,9 @@ dist .cache .ipynb_checkpoints -workspace/suphnx-reward-shaping/resources/* -workspace/suphnx-reward-shaping/trained_model/* -workspace/suphnx-reward-shaping/result/* +workspace/suphx-reward-shaping/resources/* +workspace/suphx-reward-shaping/trained_model/* +workspace/suphx-reward-shaping/result/* .DS_Store .vscode/ .python_version diff --git a/workspace/suphnx-reward-shaping/README.md b/workspace/suphx-reward-shaping/README.md similarity index 100% rename from workspace/suphnx-reward-shaping/README.md rename to workspace/suphx-reward-shaping/README.md diff --git a/workspace/suphx-reward-shaping/tests/.python-version b/workspace/suphx-reward-shaping/tests/.python-version new file mode 100644 index 00000000..c1e43e6d --- /dev/null +++ b/workspace/suphx-reward-shaping/tests/.python-version @@ -0,0 +1 @@ +3.7.3 diff --git a/workspace/suphnx-reward-shaping/tests/resources/2022060100gm-00a9-0000-3e8b8aaf.json b/workspace/suphx-reward-shaping/tests/resources/2022060100gm-00a9-0000-3e8b8aaf.json similarity index 100% rename from workspace/suphnx-reward-shaping/tests/resources/2022060100gm-00a9-0000-3e8b8aaf.json rename to workspace/suphx-reward-shaping/tests/resources/2022060100gm-00a9-0000-3e8b8aaf.json diff --git a/workspace/suphnx-reward-shaping/tests/resources/2022060100gm-00a9-0000-3ffa4858.json b/workspace/suphx-reward-shaping/tests/resources/2022060100gm-00a9-0000-3ffa4858.json similarity index 100% rename from workspace/suphnx-reward-shaping/tests/resources/2022060100gm-00a9-0000-3ffa4858.json rename to workspace/suphx-reward-shaping/tests/resources/2022060100gm-00a9-0000-3ffa4858.json diff --git a/workspace/suphnx-reward-shaping/tests/resources/2022060100gm-00a9-0000-6db179be.json b/workspace/suphx-reward-shaping/tests/resources/2022060100gm-00a9-0000-6db179be.json similarity index 100% rename from workspace/suphnx-reward-shaping/tests/resources/2022060100gm-00a9-0000-6db179be.json rename to workspace/suphx-reward-shaping/tests/resources/2022060100gm-00a9-0000-6db179be.json diff --git a/workspace/suphnx-reward-shaping/tests/resources/2022060100gm-00a9-0000-7c8869db.json b/workspace/suphx-reward-shaping/tests/resources/2022060100gm-00a9-0000-7c8869db.json similarity index 100% rename from workspace/suphnx-reward-shaping/tests/resources/2022060100gm-00a9-0000-7c8869db.json rename to workspace/suphx-reward-shaping/tests/resources/2022060100gm-00a9-0000-7c8869db.json diff --git a/workspace/suphnx-reward-shaping/tests/test_train_helper.py b/workspace/suphx-reward-shaping/tests/test_train_helper.py similarity index 100% rename from workspace/suphnx-reward-shaping/tests/test_train_helper.py rename to workspace/suphx-reward-shaping/tests/test_train_helper.py diff --git a/workspace/suphnx-reward-shaping/tests/test_utils.py b/workspace/suphx-reward-shaping/tests/test_utils.py similarity index 100% rename from workspace/suphnx-reward-shaping/tests/test_utils.py rename to workspace/suphx-reward-shaping/tests/test_utils.py diff --git a/workspace/suphnx-reward-shaping/train.py b/workspace/suphx-reward-shaping/train.py similarity index 100% rename from workspace/suphnx-reward-shaping/train.py rename to workspace/suphx-reward-shaping/train.py diff --git a/workspace/suphnx-reward-shaping/train_helper.py b/workspace/suphx-reward-shaping/train_helper.py similarity index 100% rename from workspace/suphnx-reward-shaping/train_helper.py rename to workspace/suphx-reward-shaping/train_helper.py diff --git a/workspace/suphnx-reward-shaping/utils.py b/workspace/suphx-reward-shaping/utils.py similarity index 100% rename from workspace/suphnx-reward-shaping/utils.py rename to workspace/suphx-reward-shaping/utils.py From af20f238dcf61a1d40d66ac8c525495b63f75222 Mon Sep 17 00:00:00 2001 From: nissymori Date: Fri, 16 Sep 2022 19:11:33 +0900 Subject: [PATCH 12/12] fix --- workspace/suphx-reward-shaping/tests/.python-version | 1 - 1 file changed, 1 deletion(-) delete mode 100644 workspace/suphx-reward-shaping/tests/.python-version diff --git a/workspace/suphx-reward-shaping/tests/.python-version b/workspace/suphx-reward-shaping/tests/.python-version deleted file mode 100644 index c1e43e6d..00000000 --- a/workspace/suphx-reward-shaping/tests/.python-version +++ /dev/null @@ -1 +0,0 @@ -3.7.3