diff --git a/Tizen.platform/training_offloading_service/gen_training_offloading_service_rpm.sh b/Tizen.platform/training_offloading_service/gen_training_offloading_service_rpm.sh new file mode 100755 index 00000000..691e0299 --- /dev/null +++ b/Tizen.platform/training_offloading_service/gen_training_offloading_service_rpm.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +git init +git add * +git commit -m 'Initial commit' +#gbs build -A armv7l --include-all --clean +gbs build --include-all -A armv7l -P public_9.0_arm +rm -rf .git diff --git a/Tizen.platform/training_offloading_service/main.c b/Tizen.platform/training_offloading_service/main.c new file mode 100644 index 00000000..4c69e316 --- /dev/null +++ b/Tizen.platform/training_offloading_service/main.c @@ -0,0 +1,260 @@ +/** + * @file main.c + * @date 2 May 2024 + * @brief Training offloading service app. + * @see https://github.com/nnstreamer/nnstreamer + * @author + * @bug No known bugs. + */ + +#include +#include +#include +#include +#include + +#define MAX_STRING_LEN 2048 + +enum { + CURRENT_STATUS_MAINMENU, + CURRENT_STATUS_INPUT_FILENAME, + CURRENT_STATUS_INPUT_RW_PATH, +}; + +int g_menu_state = CURRENT_STATUS_MAINMENU; +GMainLoop *loop; +static gchar g_config_path[MAX_STRING_LEN]; +static gchar g_path[MAX_STRING_LEN]; +ml_service_h service_h; +#define ML_SERVICE_EVENT_REPLY 4 + +/** + * @brief Callback function for reply test. + */ +static void +_receive_trained_model_cb (ml_service_event_e event, ml_information_h event_data, void *user_data) +{ + switch ((int) event) { + case ML_SERVICE_EVENT_REPLY: + { + g_warning ("Get ML_SERVICE_EVENT_REPLY and received trained model"); + break; + } + default: + break; + } +} + +/** + * @brief Callback function for sink node. + */ +static void +_sink_register_cb (ml_service_event_e event, ml_information_h event_data, void *user_data) +{ + ml_tensors_data_h data = NULL; + double *output = NULL; + size_t data_size = 0; + int status, i; + double result_data[4]; + char *output_node_name = NULL; + + switch (event) { + case ML_SERVICE_EVENT_NEW_DATA: + g_return_if_fail (event_data != NULL); + + status = ml_information_get (event_data, "name", (void **) &output_node_name); + if (status != ML_ERROR_NONE) + return; + if (!output_node_name) + return; + + status = ml_information_get (event_data, "data", &data); + if (status != ML_ERROR_NONE) + return; + + status = ml_tensors_data_get_tensor_data (data, 0, (void **) &output, &data_size); + if (status != ML_ERROR_NONE) + return; + break; + default: + break; + } + + if (output) { + for (i = 0; i < 4; i++) + result_data[i] = output[i]; + + g_print ("name:%s >> [training_loss: %f, training_accuracy: %f, validation_loss: %f, validation_accuracy: %f]", + output_node_name, result_data[0], result_data[1], result_data[2], + result_data[3]); + } +} + + + +static void +input_filepath (gchar *src, gchar *path) +{ + gint len = 0; + gsize size = 0; + g_return_if_fail (path != NULL); + + len = strlen(path); + if (len < 0 || len > MAX_STRING_LEN -1) + return; + size = g_strlcpy(src, path, MAX_STRING_LEN); + if (len != size) + return; +} + +void +main_menu () +{ + g_print("\n"); + g_print("============================================================================\n"); + g_print(" Training Offloading Services Test (press q to quit) \n"); + g_print("----------------------------------------------------------------------------\n"); + g_print("a. Create \n"); + g_print("b. Set R/W path \n"); + g_print("c. Set callback for receiver \n"); + g_print("d. Set callback for sender \n"); + g_print("e. Start \n"); + g_print("f. Stop \n"); + g_print("g. End \n"); + g_print("q. Quit \n"); + g_print("============================================================================\n"); +} + +static void +quit_program () +{ + g_main_loop_quit (loop); +} + +void +reset_menu_status (void) +{ + g_menu_state = CURRENT_STATUS_MAINMENU; +} + + +static void +display_menu () +{ + if (g_menu_state == CURRENT_STATUS_MAINMENU) { + main_menu(); + } else if (g_menu_state == CURRENT_STATUS_INPUT_FILENAME) { + g_print("*** input config file path.\n"); + } else if (g_menu_state == CURRENT_STATUS_INPUT_RW_PATH) { + g_print("*** input path for reading or writing. e.g. /opt \n"); + } else { + g_print("*** Unknown status. \n"); + } + g_print(" >>> "); +} + +static void +interpret_main_menu (char *cmd) +{ + int ret = ML_ERROR_NONE; + if (!g_strcmp0 (cmd, "a")) { + g_menu_state = CURRENT_STATUS_INPUT_FILENAME; + } else if (!g_strcmp0 (cmd, "b")) { + g_menu_state = CURRENT_STATUS_INPUT_RW_PATH; + } else if (!g_strcmp0 (cmd, "c")) { + ret = ml_service_set_event_cb (service_h, _sink_register_cb, NULL); + } else if (!g_strcmp0 (cmd, "d")) { + ret = ml_service_set_event_cb (service_h, _receive_trained_model_cb, NULL); + } else if (!g_strcmp0 (cmd, "e")) { + ret = ml_service_start (service_h); + } else if (!g_strcmp0 (cmd, "f")) { + ret = ml_service_stop (service_h); + } else if (!g_strcmp0 (cmd, "g")) { + ret = ml_service_destroy (service_h); + } else if (!g_strcmp0 (cmd, "q")) { + quit_program(); + return; + } + + if (ret == ML_ERROR_NONE) + g_print("*** Success *** \n"); + else + g_print("*** Failed *** \n"); +} + +gboolean +timeout_menu_display (void *data) +{ + display_menu (); + return FALSE; +} + +static void +interpret (gchar *cmd) +{ + switch (g_menu_state) { + case CURRENT_STATUS_MAINMENU: + interpret_main_menu (cmd); + break; + case CURRENT_STATUS_INPUT_FILENAME: + input_filepath (g_config_path, cmd); + reset_menu_status (); + g_print("config path=%s\n", g_config_path); + if (ML_ERROR_NONE == ml_service_new (g_config_path, &service_h)) + g_print("Success to create service(handle=%p)\n", service_h); + else + g_print("Failed to create service \n"); + break; + case CURRENT_STATUS_INPUT_RW_PATH: + input_filepath (g_path, cmd); + reset_menu_status (); + if (ML_ERROR_NONE == ml_service_set_information (service_h, "path", g_path)) + g_print ("Success to set path \n"); + else + g_print ("Failed to set path \n"); + break; + default: + break; + } + g_timeout_add (100, timeout_menu_display, 0); +} + +gboolean +input (GIOChannel *channel) +{ + gchar buf[MAX_STRING_LEN]; + ssize_t cnt; + + memset (buf, 0, MAX_STRING_LEN); + cnt = read (0, (void *)buf, MAX_STRING_LEN); + if (cnt == 0) return TRUE; + buf[cnt - 1] = 0; + + interpret (buf); + + return TRUE; +} + +/** + * @brief Main function. + */ +int +main (int argc, char **argv) +{ + GIOChannel *stdin_channel; + + + stdin_channel = g_io_channel_unix_new (0); + g_io_channel_set_flags (stdin_channel, G_IO_FLAG_NONBLOCK, NULL); + g_io_add_watch (stdin_channel, G_IO_IN, (GIOFunc)input, NULL); + + loop = g_main_loop_new (NULL, FALSE); + + g_print("running\n"); + display_menu (); + g_main_loop_run (loop); + g_print("exit training offloading services\n"); + g_main_loop_unref (loop); + + return 0; +} diff --git a/Tizen.platform/training_offloading_service/meson.build b/Tizen.platform/training_offloading_service/meson.build new file mode 100644 index 00000000..61b5eda5 --- /dev/null +++ b/Tizen.platform/training_offloading_service/meson.build @@ -0,0 +1,30 @@ +project('nnstreamer-example', 'c', 'cpp', + version: '0.1.0', + license: ['LGPL-2.1'], + meson_version: '>=0.50.0', + default_options: [ + 'werror=true', + 'warning_level=1', + 'c_std=gnu89', + 'cpp_std=c++11' + ] +) + +# Set install path +nnst_exam_prefix = get_option('prefix') +nnst_exam_bindir = join_paths(nnst_exam_prefix, get_option('bindir')) +examples_install_dir = nnst_exam_bindir + +# Dependencies +glib_dep = dependency('glib-2.0') +nns_edge_dep = dependency('nnstreamer-edge', required: false) +nns_capi_common_dep = dependency('capi-ml-common', required: false) +ml_service_dep = dependency('capi-ml-service', required: false) + +executable('training_offloadinge_service', + 'main.c', + dependencies: [glib_dep, nns_edge_dep, nns_capi_common_dep, ml_service_dep], + install: true, + install_dir: examples_install_dir +) + diff --git a/Tizen.platform/training_offloading_service/packaging/training_offloading_service.manifest b/Tizen.platform/training_offloading_service/packaging/training_offloading_service.manifest new file mode 100644 index 00000000..017d22d3 --- /dev/null +++ b/Tizen.platform/training_offloading_service/packaging/training_offloading_service.manifest @@ -0,0 +1,5 @@ + + + + + diff --git a/Tizen.platform/training_offloading_service/packaging/training_offloading_service.spec b/Tizen.platform/training_offloading_service/packaging/training_offloading_service.spec new file mode 100644 index 00000000..d068cd7e --- /dev/null +++ b/Tizen.platform/training_offloading_service/packaging/training_offloading_service.spec @@ -0,0 +1,40 @@ +%define nnstexampledir /usr/lib/nnstreamer/bin + +Name: training_offloading_service +Summary: training_offloadingi_service app +Version: 1.0.0 +Release: 0 +Group: Machine Learning/ML Framework +Packager: Hyunil Park +License: LGPL-2.1 +Source0: %{name}-%{version}.tar.gz +Source1001: %{name}.manifest + +Requires: nnstreamer +Requires: nnstreamer-edge +BuildRequires: meson +BuildRequires: pkgconfig(glib-2.0) +BuildRequires: pkgconfig(capi-ml-service) + + +%description +training offloading service sample app with Tizen IoT platform + +%prep +%setup -q +cp %{SOURCE1001} . + +%build +mkdir -p build + +meson --buildtype=plain --prefix=%{_prefix} --libdir=%{_libdir} --bindir=%{nnstexampledir} --includedir=%{_includedir} build +ninja -C build %{?_smp_mflags} + +%install +DESTDIR=%{buildroot} ninja -C build install + +%files +%manifest %{name}.manifest +%defattr(-,root,root,-) +%{nnstexampledir}/* + diff --git a/Tizen.platform/training_offloading_service/res/mnist.data b/Tizen.platform/training_offloading_service/res/mnist.data new file mode 100644 index 00000000..df8456e5 Binary files /dev/null and b/Tizen.platform/training_offloading_service/res/mnist.data differ diff --git a/Tizen.platform/training_offloading_service/res/mnist.ini b/Tizen.platform/training_offloading_service/res/mnist.ini new file mode 100644 index 00000000..4dda4017 --- /dev/null +++ b/Tizen.platform/training_offloading_service/res/mnist.ini @@ -0,0 +1,76 @@ +# Network Section : Network + +[Model] +Type = NeuralNetwork # Network Type : Regression, KNN, NeuralNetwork +Epochs = 10 # Epochs +Loss = cross # Loss function : mse (mean squared error) + # cross ( for cross entropy ) +#Save_Path = "mnist_model_91_59.bin" # model path to save / read +batch_size = 1 # batch size + +[Optimizer] +Type = adam +beta1 = 0.9 # beta 1 for adam +beta2 = 0.999 # beta 2 for adam +epsilon = 1e-7 # epsilon for adam + +[LearningRateScheduler] +type=constant +Learning_rate = 1e-4 # Learning Rate + +# Layer Section : Name +[inputlayer] +Type = input +Input_Shape = 1:28:28 + +# Layer Section : Name +[conv2d_c1_layer] +Type = conv2d +input_layers = inputlayer +kernel_size = 5,5 +bias_initializer=zeros +Activation=sigmoid +weight_initializer = xavier_uniform +filters = 6 +stride = 1,1 +padding = 0,0 + +[pooling2d_p1] +Type=pooling2d +input_layers = conv2d_c1_layer +pool_size = 2,2 +stride =2,2 +padding = 0,0 +pooling = average + +[conv2d_c2_layer] +Type = conv2d +input_layers = pooling2d_p1 +kernel_size = 5,5 +bias_initializer=zeros +Activation=sigmoid +weight_initializer = xavier_uniform +filters = 12 +stride = 1,1 +padding = 0,0 + +[pooling2d_p2] +Type=pooling2d +input_layers = conv2d_c2_layer +pool_size = 2,2 +stride =2,2 +padding = 0,0 +pooling = average + +[flatten] +Type=flatten +input_layers = pooling2d_p2 + +[outputlayer] +Type = fully_connected +input_layers=flatten +Unit = 10 # Output Layer Dimension ( = Weight Width ) +weight_initializer = xavier_uniform +bias_initializer = zeros +Activation = softmax # activation : sigmoid, softmax + diff --git a/Tizen.platform/training_offloading_service/res/mnist.json b/Tizen.platform/training_offloading_service/res/mnist.json new file mode 100644 index 00000000..f2d8ea90 --- /dev/null +++ b/Tizen.platform/training_offloading_service/res/mnist.json @@ -0,0 +1,5 @@ +{ + "gst_caps":"other/tensors, format=(string)static, framerate=(fraction)30/1, num_tensors=(int)2, dimensions=(string)28:28:1:1.10:1:1:1, types=(string)float32.float32", + "total_samples":1000, + "sample_size":3176 +} diff --git a/Tizen.platform/training_offloading_service/res/pretrained-mnist.bin b/Tizen.platform/training_offloading_service/res/pretrained-mnist.bin new file mode 100644 index 00000000..524e3900 Binary files /dev/null and b/Tizen.platform/training_offloading_service/res/pretrained-mnist.bin differ diff --git a/Tizen.platform/training_offloading_service/res/receiver.conf b/Tizen.platform/training_offloading_service/res/receiver.conf new file mode 100644 index 00000000..3356c2ae --- /dev/null +++ b/Tizen.platform/training_offloading_service/res/receiver.conf @@ -0,0 +1,29 @@ +{ + "offloading" : + { + "node-type" : "receiver", + "host" : "192.168.0.7", + "port" : "3003", + "connect-type" : "TCP", + "topic" : "offloading_service_test_topic", + "training" : + { + "time-limit" : 10, + "transfer-data" : + { + "service_1" : "@APP_RW_PATH@/trained-model.bin" + } + } + }, + "services" : + { + "service_1" : + { + "name" : "registered-trained-model.bin", + "service-type" : "reply", + "service-key" : "trained_model_registration_key", + "activate" : "true", + "description" : "registered trained model file" + } + } +} diff --git a/Tizen.platform/training_offloading_service/res/sender.conf b/Tizen.platform/training_offloading_service/res/sender.conf new file mode 100644 index 00000000..7d5ee223 --- /dev/null +++ b/Tizen.platform/training_offloading_service/res/sender.conf @@ -0,0 +1,68 @@ +{ + "offloading" : + { + "node-type" : "sender", + "dest-host" : "192.168.0.7", + "dest-port" : "3003", + "host" : "192.168.0.5", + "connect-type" : "TCP", + "topic" : "offloading_service_test_topic", + "training" : + { + "sender-pipeline" : "datareposrc location=@APP_RW_PATH@/mnist.data json=@APP_RW_PATH@/mnist.json epochs=1 tensors-sequence=0,1 stop-sample-index = 9 ! edgesink port=0 connect-type=TCP topic=training_topic host=192.168.0.5 port=3110 wait-connection=true connection-timeout=10000000", + "transfer-data" : + { + "service_1" : "@APP_RW_PATH@/pretrained-mnist.bin", + "service_2" : "@APP_RW_PATH@/mnist.ini", + "service_3" : + { + "pipeline" : + { + "description" : "edgesrc dest-host=192.168.0.5 dest-port=3110 connect-type=TCP topic=training_topic ! queue ! other/tensors, format=static, num_tensors=2, framerate=0/1, dimensions=28:28:1:1.1:1:10:1, types=float32.float32 ! tensor_trainer name=trainer_nntrainer framework=nntrainer model-save-path=@TRAINED_MODEL_FILE@ model-config=@REMOTE_APP_RW_PATH@/registered-mnist.ini model-load-path=@REMOTE_APP_RW_PATH@/registered-mnist.bin num-training-samples=5 num-validation-samples=5 epochs=1 ! tensor_sink name=training_result async=true", + "output_node" : [ + { + "name" : "training_result", + "info" : [ + { + "type" : "float64", + "dimension" : "1:1:4:1" + } + ] + } + ], + "training_node" : [ + { + "name" : "trainer_nntrainer" + } + ] + } + } + } + } + }, + "services" : + { + "service_1" : + { + "name" : "registered-mnist.bin", + "service-type" : "model_raw", + "service-key" : "model_registration_key", + "activate" : "true", + "description" : "pretrined model" + }, + "service_2" : + { + "name" : "registered-mnist.ini", + "service-type" : "model_raw", + "service-key" : "model_config_registration_key", + "activate" : "true", + "description" : "model configuration file" + }, + "service_3" : + { + "service-type" : "pipeline_raw", + "service-key" : "pipeline_registration_key", + "description" : "temp description for remote pipeline registeration test using raw file transmission" + } + } +}