diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..59f9267 --- /dev/null +++ b/404.html @@ -0,0 +1,860 @@ + + + +
+ + + + + + + + + + + + + + + + + +$ git clone https://github.com/mnms/LightningDB
+
+$ ./build.sh compile
+
+$ cd nvkvs
+$ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3
+$ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3
+
+$ ./build.sh compile debug
+
+$ cd nvkvs
+$ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0
+$ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0
+
+$ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch
+
+// json-c (version: json-c-0.14-20200419)
+
+$ git clone https://github.com/json-c/json-c.git
+$ cd json-c
+$ git checkout json-c-0.14-20200419 -b json-c-0.14-20200419
+$ mkdir json-c-build
+$ cd json-c-build/
+$ cmake ../
+$ make -j48
+
+//Required Min Version: v75 (latest version: v78)
+
+$ git clone https://github.com/pmem/ndctl
+$ git checkout v75 -b v75
+$ meson setup build;
+$ meson compile -C build;
+$ meson install -C build;
+
+$ ./build.sh compile debug
+
+// dax-ctl 을 이미지 base 경로에 설치 필요
+// 컴파일 작업 디렉토리가 "/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output"라 가정
+// ndctl github 컴파일 디렉토리로 이동
+
+$ cd ndctl
+$ rm -rf build
+$ meson -Drootprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output -Dlibdir=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output/lib build -Dprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output
+$ meson compile -C build;
+$ meson install -C build;
+
+$ cd nvkvs
+$ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration
+$ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration
+
+Tip
+How to use maximum cores to compile (e.g. max cpu core:56)
+In 'build.sh', use cmake --build . --target install -- -j56
and mvn clean install -DskipTests -P $RELEASE_MODE $MAVEN_OPTS -T 56
$ git clone https://github.com/mnms/ltdb-http
+
+$ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision
+$ cd target-k8s
+$ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz
+$ cd ltdb-http
+$ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop
+$ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop
+
+$ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision
+$ cd target-k8s
+$ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz
+$ cd ltdb-http
+$ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2
+$ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2
+
+$ git clone https://github.com/mnms/thunderquery_api
+$ git clone https://github.com/mnms/thunderquery-cli
+
+$ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch
+
+$ vi /etc/yum.repos.d/cert-forensics-tools.repo
+
+[cert-forensics-tools]
+name=Cert Forensics Tools Repository
+baseurl=https://forensics.cert.org/centos/cert/8/x86_64/
+enabled=1
+gpgcheck=1
+gpgkey=https://forensics.cert.org/forensics.asc
+
+$ yum clean all
+$ yum makecache
+$ yum install musl-gcc.x86_64
+
+$ cat ~/.ssh/id_rsa.pub
+
+$ vi ~/.cargo/config.toml
+
+[net]
+git-fetch-with-cli = true
+
+$ cd thunderquery_api
+$ cargo install --path . --target=x86_64-unknown-linux-musl
+$ cd thunderquery-cli
+$ cargo install --path . --target=x86_64-unknown-linux-musl
+
+
+$ cd thunderquery_api
+
+## thunderquery-cli binary 를 api 디렉토리로 복사 ##
+$ cp ../thunderquery-cli/target/x86_64-unknown-linux-musl/release/thunderquery-cli target/x86_64-unknown-linux-musl/release
+
+$ docker build . -t harbor.k8s.lightningdb/ltdb/thunderquery_api:develop
+$ docker push harbor.k8s.lightningdb/ltdb/thunderquery_api:develop
+
+
+
+
+
+
+
+ You can use ping
command to check the status of the nodes.
Options
+cli ping --all
cli ping {hostname} {port}
Examples
+matthew@lightningdb:21> cli ping --all
+alive redis 12/12
+
+matthew@lightningdb:21> cli ping myServer 20101
+PONG
+
+You can read or write the configuration values of the current cluster.
+Options
+cli config get {feature name} --all
cli config get -h {hostname} -p {port}
cli config set {feature name} {value} --all
cli config set {feature name} {value} -h {hostname} -p {port}
Examples
+matthew@lightningdb:21> cli config get maxmemory --all
++--------+----------------------+--------+
+| TYPE | ADDR | RESULT |
++--------+----------------------+--------+
+| Master | 192.168.111.41:20100 | 300mb |
+| Master | 192.168.111.41:20101 | 300mb |
+| Master | 192.168.111.41:20102 | 300mb |
+| Master | 192.168.111.44:20100 | 300mb |
+| Master | 192.168.111.44:20101 | 300mb |
+| Master | 192.168.111.44:20102 | 300mb |
+| Slave | 192.168.111.41:20150 | 300mb |
+| Slave | 192.168.111.41:20151 | 300mb |
+| Slave | 192.168.111.41:20152 | 300mb |
+| Slave | 192.168.111.44:20150 | 300mb |
+| Slave | 192.168.111.44:20151 | 300mb |
+| Slave | 192.168.111.44:20152 | 300mb |
++--------+----------------------+--------+
+matthew@lightningdb:21> cli config set maxmemory 500mb --all
+success 12/12
+matthew@lightningdb:21> cli config get maxmemory --all
++--------+----------------------+--------+
+| TYPE | ADDR | RESULT |
++--------+----------------------+--------+
+| Master | 192.168.111.41:20100 | 500mb |
+| Master | 192.168.111.41:20101 | 500mb |
+| Master | 192.168.111.41:20102 | 500mb |
+| Master | 192.168.111.44:20100 | 500mb |
+| Master | 192.168.111.44:20101 | 500mb |
+| Master | 192.168.111.44:20102 | 500mb |
+| Slave | 192.168.111.41:20150 | 500mb |
+| Slave | 192.168.111.41:20151 | 500mb |
+| Slave | 192.168.111.41:20152 | 500mb |
+| Slave | 192.168.111.44:20150 | 500mb |
+| Slave | 192.168.111.44:20151 | 500mb |
+| Slave | 192.168.111.44:20152 | 500mb |
++--------+----------------------+--------+
+
+matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101
+500mb
+matthew@lightningdb:21> cli config set maxmemory 300mb -h myServer -p 20101
+OK
+matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101
+300mb
+matthew@lightningdb:21>
+
+You can get the information and stats of the current cluster.
+matthew@lightningdb:21> cli cluster info
+cluster_state:ok
+cluster_slots_assigned:16384
+cluster_slots_ok:16384
+cluster_slots_pfail:0
+cluster_slots_fail:0
+cluster_known_nodes:12
+cluster_size:6
+cluster_current_epoch:14
+cluster_my_epoch:6
+cluster_stats_messages_ping_sent:953859
+cluster_stats_messages_pong_sent:917798
+cluster_stats_messages_meet_sent:10
+cluster_stats_messages_sent:1871667
+cluster_stats_messages_ping_received:917795
+cluster_stats_messages_pong_received:951370
+cluster_stats_messages_meet_received:3
+cluster_stats_messages_received:1869168
+
+You can get the distribution and status of each node.
+matthew@lightningdb:21> cli cluster nodes
+4b8fe9d135670daabe19437e3b840b1c770ffa2f 192.168.111.44:20151 slave 985a2215d2acb3f1612751a13e0d7466d874cfe5 0 1604891127367 10 connected
+4dd5dff5008ccd89cf18faef736fe6492eb34d05 192.168.111.41:20152 slave 9bff873f9f5f84cd3b78288524230b5cd1c6190f 0 1604891128000 8 connected
+15b3c06c1edeb5d2eeb6c0f35c9f27cf616acd11 192.168.111.44:20101 myself,slave 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 0 1604891118000 13 connected
+8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 192.168.111.41:20100 master - 0 1604891130372 1 connected 0-2730
+9bff873f9f5f84cd3b78288524230b5cd1c6190f 192.168.111.44:20102 master - 0 1604891126000 6 connected 8193-10923
+60f88a9db445997112cf8947931988152767878f 192.168.111.44:20152 slave 974c0540741d89c7569b63345faa852361043e8b 0 1604891122000 11 connected
+985a2215d2acb3f1612751a13e0d7466d874cfe5 192.168.111.41:20101 master - 0 1604891125365 5 connected 2731-5461
+85de73ca2aa668a79fe5636ec74e68dee6f9b36a 192.168.111.44:20100 master - 0 1604891129371 4 connected 13654-16383
+974c0540741d89c7569b63345faa852361043e8b 192.168.111.41:20102 master - 0 1604891124363 2 connected 5462-8192
+9c6aef212b6d68d2a0298c1902629e1fdc95f943 192.168.111.41:20150 slave 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 0 1604891128370 4 connected
+474303b3b9e9f7b84b157ecf52ce11e153a28716 192.168.111.44:20150 slave 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 0 1604891126366 13 connected
+4b6bc980b33dd1eecc87babfb5762eda9e7921e7 192.168.111.41:20151 master - 0 1604891131375 14 connected 10924-13653
+
+You can get the slot information.
+matthew@lightningdb:21> cli cluster slots
++-------+-------+----------------+--------+----------------+----------+
+| start | end | m_ip | m_port | s_ip_0 | s_port_0 |
++-------+-------+----------------+--------+----------------+----------+
+| 0 | 2730 | 192.168.111.41 | 20100 | 192.168.111.44 | 20150 |
+| 2731 | 5461 | 192.168.111.41 | 20101 | 192.168.111.44 | 20151 |
+| 5462 | 8192 | 192.168.111.41 | 20102 | 192.168.111.44 | 20152 |
+| 8193 | 10923 | 192.168.111.44 | 20102 | 192.168.111.41 | 20152 |
+| 10924 | 13653 | 192.168.111.41 | 20151 | 192.168.111.44 | 20101 |
+| 13654 | 16383 | 192.168.111.44 | 20100 | 192.168.111.41 | 20150 |
++-------+-------+----------------+--------+----------------+----------+
+
+
+
+
+
+
+
+ Note
+By default, we support all of the features provided in LightningDB v1.x, and we only point you to the ones that have been added and changed.
+127.0.0.1:7389> help "TABLE.META.WRITE" "createTable"
+
+ TABLE.META.WRITE createTable catalog.namespace.table arrow::schema
+ summary: Create a new table
+ since: 2.0.0
+ group: table.meta
+
+127.0.0.1:7389> "TABLE.META.WRITE" "createTable" "cat_1.test.table" "\x10\x00\x00\x00\x00\x00\n\x00\x0e\x00\x06\x00\r\x00\b\x00\n\x00\x00\x00\x00\x00\x04\x00\x10\x00\x00\x00\x00\x01\n\x00\x0c\x00\x00\x00\b\x00\x04\x00\n\x00\x00\x00\b\x00\x00\x00\xc4\x01\x00\x00\t\x00\x00\x00\x80\x01\x00\x00D\x01\x00\x00\x18\x01\x00\x00\xec\x00\x00\x00\xc0\x00\x00\x00\x98\x00\x00\x00h\x00\x00\x00@\x00\x00\x00\x04\x00\x00\x00\xac\xfe\xff\xff\b\x00\x00\x00\x18\x00\x00\x00\x0e\x00\x00\x00127.0.0.1:7389\x00\x00\x13\x00\x00\x00properties.location\x00\xe4\xfe\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00job\x00\x0b\x00\x00\x00partition.1\x00\b\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x01\x00\x00\x001\x00\x00\x00\x10\x00\x00\x00internal.version\x00\x00\x00\x004\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00age\x00\x0b\x00\x00\x00partition.0\x00X\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x01\x00\x00\x002\x00\x00\x00\x0e\x00\x00\x00partition.size\x00\x00\x80\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00512\x00\x0c\x00\x00\x00cva.capacity\x00\x00\x00\x00\xa8\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x02\x00\x00\x0024\x00\x00\x0e\x00\x00\x00properties.ttl\x00\x00\xd0\xff\xff\xff\b\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x002560\x00\x00\x00\x00\x11\x00\x00\x00rowgroup.capacity\x00\x00\x00\b\x00\x0c\x00\b\x00\x04\x00\b\x00\x00\x00\b\x00\x00\x00\x18\x00\x00\x00\x0e\x00\x00\x00127.0.0.1:7379\x00\x00\x14\x00\x00\x00properties.metastore\x00\x00\x00\x00\x03\x00\x00\x00\x88\x00\x00\x004\x00\x00\x00\x04\x00\x00\x00\x96\xff\xff\xff\x14\x00\x00\x00\x14\x00\x00\x00\x14\x00\x00\x00\x00\x00\x05\x01\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x84\xff\xff\xff\x03\x00\x00\x00job\x00\xc2\xff\xff\xff\x14\x00\x00\x00\x14\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x02\x01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\b\x00\x0c\x00\b\x00\a\x00\b\x00\x00\x00\x00\x00\x00\x01 \x00\x00\x00\x03\x00\x00\x00age\x00\x00\x00\x12\x00\x18\x00\x14\x00\x13\x00\x12\x00\x0c\x00\x00\x00\b\x00\x04\x00\x12\x00\x00\x00\x14\x00\x00\x00\x14\x00\x00\x00\x18\x00\x00\x00\x00\x00\x05\x01\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00name\x00\x00\x00\x00"
+
+
+127.0.0.1:7389> help "TABLE.META.WRITE" "truncateTable"
+
+ TABLE.META.WRITE truncateTable catalog.namespace.table
+ summary: Truncate the table(Remove all data in the table)
+ since: 2.0.0
+ group: table.meta
+
+127.0.0.1:7389>
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*"
+ 1) "21\x1eSales Manager"
+ 2) "22\x1eTutor"
+ 3) "23\x1eBanker"
+ 4) "23\x1eProfessor"
+ 5) "23\x1eSales Manager"
+ 6) "24\x1eStudent"
+ 7) "26\x1eStudent"
+ 8) "27\x1eSales Manager"
+ 9) "29\x1eBanker"
+10) "29\x1eProfessor"
+11) "32\x1eProfessor"
+12) "32\x1eSales Manager"
+13) "33\x1eProfessor"
+14) "36\x1eProfessor"
+15) "41\x1eBanker"
+16) "43\x1eSales Manager"
+17) "45\x1eBanker"
+18) "47\x1eBanker"
+19) "48\x1eCEO"
+127.0.0.1:7389> TABLE.META.WRITE truncateTable "cat_1.test.table"
+"OK"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*"
+(empty list or set)
+
+127.0.0.1:7389> help "TABLE.META.WRITE" "dropTable"
+
+ TABLE.META.WRITE dropTable catalog.namespace.table
+ summary: Drop the table(Remove all data and the schema)
+ since: 2.0.0
+ group: table.meta
+
+127.0.0.1:7389>
+
+127.0.0.1:7389> TABLE.META.READ showTables
+1) "cat_1.test.table"
+2) "version: 1"
+127.0.0.1:7389> TABLE.META.WRITE dropTable "cat_1.test.table"
+"OK"
+127.0.0.1:7389> TABLE.META.READ showTables
+(empty list or set)
+
+127.0.0.1:7389> help "TABLE.META.WRITE" "dropAllTables"
+
+ TABLE.META.WRITE dropAllTables -
+ summary: Drop all tables
+ since: 2.0.0
+ group: table.meta
+
+127.0.0.1:7389>
+127.0.0.1:7389> TABLE.META.READ showTables
+1) "cat_1.test.table"
+2) "version: 1"
+127.0.0.1:7389> TABLE.META.WRITE dropAllTables
+1 tables are deleted.
+
+127.0.0.1:7389> help "TABLE.META.WRITE" "seTtableTtl"
+
+ TABLE.META.WRITE setTableTtl catalog.namespace.table ttl(msec)
+ summary: Set the ttl of the table
+ since: 2.0.0
+ group: table.meta
+
+127.0.0.1:7389> TABLE.META.WRITE setTableTtl "cat_1.test.table" 30000
+OK
+
+127.0.0.1:7389> help TABLE.META.READ showTables
+
+ TABLE.META.READ showTables -
+ summary: Get the list of tables with their own version
+ since: 2.0.0
+ group: table.meta
+
+127.0.0.1:7389>
+127.0.0.1:7389> TABLE.META.READ showTables
+1) "cat_1.test.table"
+2) "version: 1"
+
+127.0.0.1:7389> help TABLE.META.READ describeTables
+
+ TABLE.META.READ describeTables catalog.namespace.table
+ summary: Get all columns and partitions of the table
+ since: 2.0.0
+ group: table.meta
+
+127.0.0.1:7389>
+
+127.0.0.1:7389> TABLE.META.READ showTables
+1) "cat_1.test.table"
+2) "version: 1"
+
+127.0.0.1:7389> TABLE.META.READ describeTables "cat_1.test.table"
+1) "name: string"
+2) "age: int32"
+3) "job: string"
+4) "[ partitions: age job ]"
+
+127.0.0.1:7389> help TABLE.META.READ getTableTtl
+
+ TABLE.META.READ getTableTtl catalog.namespace.table
+ summary: Get the ttl of the table
+ since: 2.0.0
+ group: table.meta
+
+127.0.0.1:7389> TABLE.META.READ getTableTtl *
+1) "cat_1.test.network_table"
+2) "86400000"
+3) "cat_1.test.table"
+4) "86400000"
+127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.*
+1) "cat_1.test.network_table"
+2) "86400000"
+3) "cat_1.test.table"
+4) "86400000"
+127.0.0.1:7389> TABLE.META.READ getTableTtl *.network_table
+1) "cat_1.test.network_table"
+2) "86400000"
+127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.test.network_table
+1) "cat_1.test.network_table"
+2) "86400000"
+127.0.0.1:7389>
+
+127.0.0.1:7389> help TABLE.META.READ getPartitionTtl
+
+ TABLE.META.READ getPartitionTtl partition-string
+ summary: Get the ttl of the partition in the table
+ since: 2.0.0
+ group: table.meta
+
+127.0.0.1:7389> TABLE.META.READ getPartitionTtl "cat_1.test.table" "*"
+ 1) "21\x1eSales Manager"
+ 2) "86350123"
+ 3) "22\x1eTutor"
+ 4) "86350139"
+ 5) "23\x1eBanker"
+ 6) "86350126"
+ 7) "23\x1eProfessor"
+ 8) "86350125"
+ 9) "23\x1eSales Manager"
+10) "86350137"
+11) "24\x1eStudent"
+12) "86350121"
+13) "26\x1eStudent"
+14) "86350124"
+15) "27\x1eSales Manager"
+16) "86350132"
+17) "29\x1eBanker"
+18) "86350124"
+19) "29\x1eProfessor"
+20) "86350125"
+21) "32\x1eProfessor"
+22) "86350127"
+23) "32\x1eSales Manager"
+24) "86350123"
+25) "33\x1eProfessor"
+26) "86350120"
+27) "36\x1eProfessor"
+28) "86350134"
+29) "40\x1eBanker"
+30) "86350119"
+31) "41\x1eBanker"
+32) "86350120"
+33) "43\x1eSales Manager"
+34) "86350133"
+35) "45\x1eBanker"
+36) "86350128"
+37) "47\x1eBanker"
+38) "86350124"
+39) "48\x1eCEO"
+40) "86350138"
+127.0.0.1:7389> TABLE.META.READ getPartitionTtl "cat_1.test.table" "23*"
+1) "23\x1eBanker"
+2) "86343642"
+3) "23\x1eProfessor"
+4) "86343641"
+5) "23\x1eSales Manager"
+6) "86343653"
+127.0.0.1:7389> TABLE.META.READ getPartitionTtl "cat_1.test.table" "*CEO"
+1) "48\x1eCEO"
+2) "86336153"
+127.0.0.1:7389> TABLE.META.READ getPartitionTtl "cat_1.test.table" "45\x1eBanker"
+1) "45\x1eBanker"
+2) "86324848"
+127.0.0.1:7389>
+
+- Command + - "TABLE.DATA.WRITE" "Insert" "{catalog name}.{namespace name}.{table name}" "table version" "partition string" "binaries... ..." +- Examples
+127.0.0.1:7389> help "TABLE.DATA.WRITE" "Insert"
+
+ TABLE.DATA.WRITE insert catalog.namespace.table table-version partition-string data
+ summary: Insert a new data(row)
+ since: 2.0.0
+ group: table.data
+
+1636425657.602951 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "40\x1eBanker" "Jeannie" "(\x00\x00\x00" "Banker"
+1636425657.604043 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "33\x1eProfessor" "Ardith" "!\x00\x00\x00" "Professor"
+1636425657.604529 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "41\x1eBanker" "Elena" ")\x00\x00\x00" "Banker"
+1636425657.605351 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "24\x1eStudent" "Corliss" "\x18\x00\x00\x00" "Student"
+1636425657.607351 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "41\x1eBanker" "Kiyoko" ")\x00\x00\x00" "Banker"
+1636425657.608057 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "21\x1eSales Manager" "Hilton" "\x15\x00\x00\x00" "Sales Manager"
+1636425657.608455 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "32\x1eSales Manager" "Becky" " \x00\x00\x00" "Sales Manager"
+1636425657.609218 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "29\x1eBanker" "Wendie" "\x1d\x00\x00\x00" "Banker"
+1636425657.609940 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "26\x1eStudent" "Carolina" "\x1a\x00\x00\x00" "Student"
+1636425657.610284 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "47\x1eBanker" "Laquita" "/\x00\x00\x00" "Banker"
+1636425657.610638 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "23\x1eProfessor" "Stephani" "\x17\x00\x00\x00" "Professor"
+1636425657.610964 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "29\x1eProfessor" "Emile" "\x1d\x00\x00\x00" "Professor"
+1636425657.612257 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "23\x1eBanker" "Cherri" "\x17\x00\x00\x00" "Banker"
+1636425657.612630 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "47\x1eBanker" "Raleigh" "/\x00\x00\x00" "Banker"
+1636425657.612943 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "32\x1eProfessor" "Hollis" " \x00\x00\x00" "Professor"
+1636425657.614136 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "45\x1eBanker" "Brigette" "-\x00\x00\x00" "Banker"
+1636425657.615558 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "21\x1eSales Manager" "Damian" "\x15\x00\x00\x00" "Sales Manager"
+1636425657.617321 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "27\x1eSales Manager" "Star" "\x1b\x00\x00\x00" "Sales Manager"
+1636425657.618819 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "43\x1eSales Manager" "Elba" "+\x00\x00\x00" "Sales Manager"
+1636425657.619621 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "36\x1eProfessor" "Lourie" "$\x00\x00\x00" "Professor"
+1636425657.622977 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "23\x1eSales Manager" "\xea\xb0\x80\xeb\x82\x98\xeb\x82\x98\xeb\x82\x98\xea\xb0\x80\xeb\x82\x98\xeb\x82\x98" "\x17\x00\x00\x00" "Sales Manager"
+1636425657.623555 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "48\x1eCEO" "Elon" "0\x00\x00\x00" "CEO"
+1636425657.624359 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "22\x1eTutor" "Kijung" "\x16\x00\x00\x00" "Tutor"
+
+127.0.0.1:7389> help TABLE.DATA.READ partitions
+
+ TABLE.DATA.READ partitions catalog.namespace.table pattern partition-filter(optional)
+ summary: Get the list of partitions with the pattern and filter
+ since: 2.0.0
+ group: table.data
+
+127.0.0.1:7389>
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*"
+ 1) "21\x1eSales Manager"
+ 2) "22\x1eTutor"
+ 3) "23\x1eBanker"
+ 4) "23\x1eProfessor"
+ 5) "23\x1eSales Manager"
+ 6) "24\x1eStudent"
+ 7) "26\x1eStudent"
+ 8) "27\x1eSales Manager"
+ 9) "29\x1eBanker"
+10) "29\x1eProfessor"
+11) "32\x1eProfessor"
+12) "32\x1eSales Manager"
+13) "33\x1eProfessor"
+14) "36\x1eProfessor"
+15) "40\x1eBanker"
+16) "41\x1eBanker"
+17) "43\x1eSales Manager"
+18) "45\x1eBanker"
+19) "47\x1eBanker"
+20) "48\x1eCEO"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "29*"
+1) "29\x1eBanker"
+2) "29\x1eProfessor"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*Professor"
+1) "23\x1eProfessor"
+2) "29\x1eProfessor"
+3) "32\x1eProfessor"
+4) "33\x1eProfessor"
+5) "36\x1eProfessor"
+
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*" "age\x1e30\x1eLTE"
+ 1) "21\x1eSales Manager"
+ 2) "22\x1eTutor"
+ 3) "23\x1eBanker"
+ 4) "23\x1eProfessor"
+ 5) "23\x1eSales Manager"
+ 6) "24\x1eStudent"
+ 7) "26\x1eStudent"
+ 8) "27\x1eSales Manager"
+ 9) "29\x1eBanker"
+10) "29\x1eProfessor"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*" "age\x1e32\x1eEQ"
+1) "32\x1eProfessor"
+2) "32\x1eSales Manager"
+
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*" "age\x1e32\x1eLT\x1ejob\x1eCEO\x1eLTE\x1eAND"
+1) "23\x1eBanker"
+2) "29\x1eBanker"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*" "age\x1e32\x1eLT\x1ejob\x1eCEO\x1eGTE\x1eAND"
+1) "21\x1eSales Manager"
+2) "22\x1eTutor"
+3) "23\x1eProfessor"
+4) "23\x1eSales Manager"
+5) "24\x1eStudent"
+6) "26\x1eStudent"
+7) "27\x1eSales Manager"
+8) "29\x1eProfessor"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*" "age\x1e32\x1eGT\x1ejob\x1eCEO\x1eGTE\x1eAND"
+1) "33\x1eProfessor"
+2) "36\x1eProfessor"
+3) "43\x1eSales Manager"
+4) "48\x1eCEO"
+
+127.0.0.1:7389> help TABLE.DATA.READ select
+
+ TABLE.DATA.READ select catalog.namespace.table projection partition-filter data-filter
+ summary: Get the data with the pattern and filter
+ since: 2.0.0
+ group: table.data
+
+127.0.0.1:7389> TABLE.DATA.READ select xxx ....
+
+127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowCount
+
+ TABLE.DATA.READ getPartitionRowCount catalog.namespace.table partition-string
+ summary: Get the count of the rows in the partition
+ since: 2.0.0
+ group: table.data
+
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount "cat_1.test.table" *
+ 1) "21\x1eSales Manager"
+ 2) "2"
+ 3) "22\x1eTutor"
+ 4) "1"
+ 5) "23\x1eBanker"
+ 6) "1"
+ 7) "23\x1eProfessor"
+ 8) "1"
+ 9) "23\x1eSales Manager"
+10) "1"
+11) "24\x1eStudent"
+12) "1"
+13) "26\x1eStudent"
+14) "1"
+15) "27\x1eSales Manager"
+16) "1"
+17) "29\x1eBanker"
+18) "1"
+19) "29\x1eProfessor"
+20) "1"
+21) "32\x1eProfessor"
+22) "1"
+23) "32\x1eSales Manager"
+24) "1"
+25) "33\x1eProfessor"
+26) "1"
+27) "36\x1eProfessor"
+28) "1"
+29) "40\x1eBanker"
+30) "1"
+31) "41\x1eBanker"
+32) "2"
+33) "43\x1eSales Manager"
+34) "1"
+35) "45\x1eBanker"
+36) "1"
+37) "47\x1eBanker"
+38) "2"
+39) "48\x1eCEO"
+40) "1"
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount "cat_1.test.table" "23*"
+1) "23\x1eBanker"
+2) "1"
+3) "23\x1eProfessor"
+4) "1"
+5) "23\x1eSales Manager"
+6) "1"
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount "cat_1.test.table" "*Professor"
+ 1) "23\x1eProfessor"
+ 2) "1"
+ 3) "29\x1eProfessor"
+ 4) "1"
+ 5) "32\x1eProfessor"
+ 6) "1"
+ 7) "33\x1eProfessor"
+ 8) "1"
+ 9) "36\x1eProfessor"
+10) "1"
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount "cat_1.test.table" "45\x1eBanker"
+1) "45\x1eBanker"
+2) "1"
+
+
+127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowGroup
+
+ TABLE.DATA.READ getPartitionRowGroup catalog.namespace.table partition-string
+ summary: Get the count of the rows in the each row-group of the partition
+ since: 2.0.0
+ group: table.data
+
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowGroup "cat_1.test.table" "21\x1eSales Manager"
+1) "0"
+2) "1"
+3) "1"
+4) "2"
+127.0.0.1:7389>
+
+127.0.0.1:7389> help TABLE.DATA.READ getTableRowCount
+
+ TABLE.DATA.READ getTableRowCount -
+ summary: Get the row count of each table
+ since: 2.0.0
+ group: table.data
+
+127.0.0.1:7389> TABLE.DATA.READ getTableRowCount *
+1) "cat_1.test.network_table"
+2) "33229"
+3) "cat_1.test.table"
+4) "23"
+127.0.0.1:7389>
+
+
+
+
+
+
+
+ Note
+Command Line Interface(CLI) of LightningDB supports not only deploy and start command but also many commands to access and manipulate data in LightningDB.
+If you want to see the list of cluster commands, use the cluster
command without any option.
ec2-user@lightningdb:1> cluster
+
+NAME
+ ltcli cluster - This is cluster command
+
+SYNOPSIS
+ ltcli cluster COMMAND
+
+DESCRIPTION
+ This is cluster command
+
+COMMANDS
+ COMMAND is one of the following:
+
+ add_slave
+ Add slaves to cluster additionally
+
+ clean
+ Clean cluster
+
+ configure
+
+ create
+ Create cluster
+
+ ls
+ Check cluster list
+
+ rebalance
+ Rebalance
+
+ restart
+ Restart redist cluster
+
+ rowcount
+ Query and show cluster row count
+
+ start
+ Start cluster
+
+ stop
+ Stop cluster
+
+ use
+ Change selected cluster
+
+(1) Cluster configure
+redis-{port}.conf
is generated with using redis-{master/slave}.conf.template
and redis.properties
files.
matthew@lightningdb:21> cluster configure
+Check status of hosts...
+OK
+sync conf
++----------------+--------+
+| HOST | STATUS |
++----------------+--------+
+| 192.168.111.44 | OK |
+| 192.168.111.41 | OK |
++----------------+--------+
+OK
+
+(2) Cluster start
+${SR2_HOME}/logs/redis/
1 will be moved to ${SR2_HOME}/logs/redis/backup/
.${SR2_REDIS_DATA}
${SR2_HOME}/conf/redis/redis-{port}.conf
file${SR2_HOME}/logs/redis/
ec2-user@lightningdb:1> cluster start
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+Generate redis configuration files for master hosts
+sync conf
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+
+Errors
+Redis-server(master) process with the same port is already running. To resolve this error, use cluster stop
or kill {pid of the process}
.
$ cluster start
+...
+...
+[ErrorCode 11] Fail to start... Must be checked running MASTER redis processes!
+We estimate that redis process is <alive-redis-count>.
+
+Redis-server(slave) process with the same port is already running. To resolve this error, use cluster stop
or kill {pid of the process}
.
$ cluster start
+...
+[ErrorCode 12] Fail to start... Must be checked running SLAVE redis processes!
+We estimate that redis process is <alive-redis-count>.
+
+Conf file is not found. To resove this error, use cluster configure
and then cluster start
.
$ cluster start
+...
+FileNotExistError: ${SR2_HOME}/conf/redis/redis-{port}.conf
+
+$ cluster start
+...
+ClusterRedisError: Fail to start redis: max try exceed
+Recommendation Command: 'monitor'
+
+(3) Cluster create
+After checking the information of the cluster, create a cluster of LightningDB.
+Case 1) When redis-server processes are running, create a cluster only.
+ec2-user@lightningdb:1>cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST | PORT | TYPE |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18100
+ - 127.0.0.1:18103
+ - 127.0.0.1:18104
+ - 127.0.0.1:18101
+ - 127.0.0.1:18102
+Adding slots...
+ - 127.0.0.1:18100, 3280
+ - 127.0.0.1:18103, 3276
+ - 127.0.0.1:18104, 3276
+ - 127.0.0.1:18101, 3276
+ - 127.0.0.1:18102, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+
+Case 2) When redis-server processes are not running, create a cluster after launching redis-server processes with cluster start
command.
ec2-user@lightningdb:4>cluster create
+Check status of hosts...
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+>>> Creating cluster
++-----------+-------+--------+
+| HOST | PORT | TYPE |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18103
+ - 127.0.0.1:18104
+ - 127.0.0.1:18101
+ - 127.0.0.1:18102
+ - 127.0.0.1:18100
+Adding slots...
+ - 127.0.0.1:18103, 3280
+ - 127.0.0.1:18104, 3276
+ - 127.0.0.1:18101, 3276
+ - 127.0.0.1:18102, 3276
+ - 127.0.0.1:18100, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+
+Errors
+When redis servers are not running, this error(Errno 111) will occur. To solve this error, use cluster start
command previously.
ec2-user@lightningdb:1>cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST | PORT | TYPE |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+127.0.0.1:18100 - [Errno 111] Connection refused
+
+(4) Cluster stop
+Gracefully kill all redis-servers(master/slave) with SIGINT +
+ec2-user@lightningdb:1> cluster stop
+Check status of hosts...
+OK
+Stopping master cluster of redis...
+cur: 5 / total: 5
+cur: 0 / total: 5
+Complete all redis process down
+
+Options
+--force
+
+(5) Cluster clean
+Remove conf files for redis-server and all data(aof, rdb, RocksDB) of LightningDB
+ec2-user@lightningdb:1> cluster clean
+Removing redis generated master configuration files
+ - 127.0.0.1
+Removing flash db directory, appendonly and dump.rdb files in master
+ - 127.0.0.1
+Removing master node configuration
+ - 127.0.0.1
+
+(6) Cluster restart
+Process cluster stop
and then cluster start
.
Options
+--force-stop
+
+--reset
+
+cluster create
. This command should be called with --reset
.--cluster
+
+(7) Update version
+You can update LightningDB by using the 'deploy' command.
+> c 1 // alias of 'cluster use 1'
+> deploy
+(Watch out) Cluster 1 is already deployed. Do you want to deploy again? (y/n) [n]
+y
+
+Select installer
+
+ [ INSTALLER LIST ]
+ (1) lightningdb.release.master.5a6a38.bin
+ (2) lightningdb.trial.master.dbcb9e-dirty.bin
+ (3) lightningdb.trial.master.dbcb9e.bin
+
+Please enter the number, file path or URL of the installer you want to use.
+you can also add a file in list by copy to '$FBPATH/releases/'
+1
+OK, lightningdb.release.master.5a6a38.bin
+
+Do you want to restore conf? (y/n)
+y
+
+If the current settings will be reused, type 'y'.
++-----------------+---------------------------------------------------+
+| NAME | VALUE |
++-----------------+---------------------------------------------------+
+| installer | lightningdb.release.master.5a6a38.bin |
+| nodes | nodeA |
+| | nodeB |
+| | nodeC |
+| | nodeD |
+| master ports | 18100 |
+| slave ports | 18150-18151 |
+| ssd count | 3 |
+| redis data path | ~/sata_ssd/ssd_ |
+| redis db path | ~/sata_ssd/ssd_ |
+| flash db path | ~/sata_ssd/ssd_ |
++-----------------+---------------------------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+Check status of hosts...
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| nodeA | OK |
+| nodeB | OK |
+| nodeC | OK |
+| nodeD | OK |
++-----------+--------+
+Checking for cluster exist...
++------+--------+
+| HOST | STATUS |
++------+--------+
+Backup conf of cluster 1...
+OK, cluster_1_conf_bak_<time-stamp>
+Backup info of cluster 1 at nodeA...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeB...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeC...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeD...
+OK, cluster_1_bak_<time-stamp>
+Transfer installer and execute...
+ - nodeA
+ - nodeB
+ - nodeC
+ - nodeD
+Sync conf...
+Complete to deploy cluster 1.
+Cluster 1 selected.
+
+> cluster restart
+
+After the restart, the new version will be applied.
+(1) Cluster use
+Change the cluster to use LTCLI. Use cluster use
or c
commands.
Examples
+ec2-user@lightningdb:2> cluster use 1
+Cluster '1' selected.
+ec2-user@lightningdb:1> c 2
+Cluster '2' selected.
+
+(2) Cluster ls
+List the deployed clusters.
+Examples
+ec2-user@lightningdb:2> cluster ls
+[1, 2]
+
+(3) Cluster rowcount
+Check the count of records that are stored in the cluster.
+Examples
+ec2-user@lightningdb:1> cluster rowcount
+0
+
+(4) Cluster tree
+User can check the status of master nodes and slaves and show which master and slave nodes are linked.
+Examples
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(connected)
+|__ 127.0.0.1:18952(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+(5) Cluster distribution
+The distribution of Master/Slave nodes are displayed with their hostnames(IP addresses).
+Examples
+matthew@lightningdb:21> cluster distribution
++-----------------------+--------+-------+
+| HOST | MASTER | SLAVE |
++-----------------------+--------+-------+
+| fbg04(192.168.111.41) | 4 | 2 |
+| fbg05(192.168.111.44) | 2 | 4 |
+| TOTAL | 6 | 6 |
++-----------------------+--------+-------+
+
+(1) Cluster failover_list
+Examples
+matthew@lightningdb:21> cluster failover_list
+
+1) failovered masters:
+192.168.111.44:20152
+192.168.111.44:20153
+192.168.111.44:20156
+
+2) no-slave masters:
+192.168.111.44:20100
+192.168.111.41:20101
+
+3) no-slot masters:
+192.168.111.44:20152
+
+4) failbacked slaves:
+192.168.111.41:20102
+192.168.111.41:20105
+
+(2) Cluster do_replicate
+You can add a node as the slave of a master nodes like cluster do_replicate {slave's IP}:{slave's Port} {master's IP}:{master's Port}
.
The IP addresses of masters or slaves can be replaced with their hostnames.
+Examples
+matthew@lightningdb:21> cluster tree
+192.168.111.44:20101(connected)
+|__ 192.168.111.44:20151(connected)
+
+192.168.111.44:20102(connected)
+|__ 192.168.111.44:20152(connected)
+
+192.168.111.44:20150(connected)
+|__ 192.168.111.44:20100(connected)
+
+matthew@lightningdb:21> cluster do_replicate 192.168.111.44:20100 192.168.111.44:20101
+Start to replicate...
+
+OK
+
+matthew@lightningdb:21> cluster tree
+192.168.111.44:20101(connected)
+|__ 192.168.111.44:20100(connected)
+|__ 192.168.111.44:20151(connected)
+
+192.168.111.44:20102(connected)
+|__ 192.168.111.44:20152(connected)
+
+192.168.111.44:20150(connected)
+
+with hostnames,
+matthew@lightningdb:21> cluster do_replicate fbg05:20100 fbg05:20101
+Start to replicate...
+
+OK
+
+(3) Cluster find_noaddr & cluster forget_noaddr
+You can find and remove 'noaddr' nodes in the current cluster.
+'noaddr' nodes are no more valid nodes.
+Examples
+matthew@lightningdb:21> cluster find_noaddr
+
++------------------------------------------+
+| UUID |
++------------------------------------------+
+| 40675af73cd8fa1272a20fe9536ad19c398b5bca |
++------------------------------------------+
+
+matthew@lightningdb:21> cluster forget_noaddr
+
+"27" nodes have forgot "40675af73cd8fa1272a20fe9536ad19c398b5bca"
+
+matthew@lightningdb:21> cluster find_noaddr
+
++------+
+| UUID |
++------+
+
+(4) Cluster failover
+If a master node is killed, its slave node will automatically promote after 'cluster-node-time'2.
+User can promote the slave node immediately by using the 'cluster failover' command.
+Examples
+Step 1) Check the status of the cluster
+In this case, '127.0.0.1:18902' node is killed.
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(disconnected) <--- Killed!
+|__ 127.0.0.1:18952(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+Step 2) Do failover with 'cluster failover' command
+ec2-user@lightningdb:9> cluster failover
+failover 127.0.0.1:18952 for 127.0.0.1:18902
+OK
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(disconnected) <--- Killed!
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+127.0.0.1:18952(connected) <--- Promoted to master!
+
+(5) Cluster failback
+With 'cluster failback' command, the killed node is restarted and added to the cluster as the slave node.
+Examples
+ec2-user@lightningdb:9> cluster failback
+run 127.0.0.1:18902
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+127.0.0.1:18952(connected) <--- Promoted to master!
+|__ 127.0.0.1:18902(connected) <--- Failbacked. Now this node is slave!
+
+(6) Cluster reset_distribution
+To initialize the node distribution, use 'reset-distribution'.
+Examples
+matthew@lightningdb:21> cluster failover_list
+1) failovered masters:
+192.168.111.44:20152
+
+2) no-slave masters:
+
+3) no-slot masters:
+
+4) failbacked slaves:
+192.168.111.41:20101
+
+matthew@lightningdb:21> cluster reset_distribution
+'192.168.111.41:20101' will be master...
+
+OK
+
+matthew@lightningdb:21> cluster failover_list
+1) failovered masters:
+
+2) no-slave masters:
+
+3) no-slot masters:
+
+4) failbacked slaves:
+
+(7) Cluster nodes_with_dir & Cluster masters_with_dir
+Examples
+
+matthew@lightningdb:21> cluster nodes_with_dir 192.168.111.44 matthew03
++----------------+-------+------------------------------------------+
+| HOST | PORT | PATH |
++----------------+-------+------------------------------------------+
+| 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20150 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20153 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20156 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
++----------------+-------+------------------------------------------+
+
+matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03
++----------------+-------+------------------------------------------+
+| HOST | PORT | PATH |
++----------------+-------+------------------------------------------+
+| 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
++----------------+-------+------------------------------------------+
+
+with hostnames,
+matthew@lightningdb:21> cluster nodes_with_dir fbg05 matthew02
++-------+-------+------------------------------------------+
+| HOST | PORT | PATH |
++-------+-------+------------------------------------------+
+| fbg05 | 20101 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew |
+| fbg05 | 20152 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew |
++-------+-------+------------------------------------------+
+matthew@lightningdb:21> cluster masters_with_dir fbg05 matthew02
++-------+-------+------------------------------------------+
+| HOST | PORT | PATH |
++-------+-------+------------------------------------------+
+| fbg05 | 20101 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew |
++-------+-------+------------------------------------------+
+
+(8) Cluster failover_with_dir
+Do failover and change the master using the disk to the slave
+Examples
+matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03
++----------------+-------+------------------------------------------+
+| HOST | PORT | PATH |
++----------------+-------+------------------------------------------+
+| 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
++----------------+-------+------------------------------------------+
+
+matthew@lightningdb:21> cluster failover_list
+1) failovered masters:
+
+2) no-slave masters:
+
+3) no-slot masters:
+
+4) failbacked slaves:
+
+matthew@lightningdb:21> cluster failover_with_dir 192.168.111.44 matthew03
+'192.168.111.41:20152' will be master...
+OK
+
+'192.168.111.41:20155' will be master...
+OK
+
+matthew@lightningdb:21> cluster failover_list
+1) failovered masters:
+192.168.111.41:20152
+192.168.111.41:20155
+
+2) no-slave masters:
+
+3) no-slot masters:
+
+4) failbacked slaves:
+192.168.111.44:20102
+192.168.111.44:20105
+
+matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03
++------+------+------+
+| HOST | PORT | PATH |
++------+------+------+
+
+with hostnames,
+matthew@lightningdb:21> cluster masters_with_dir fbg05 matthew01
++-------+-------+------------------------------------------+
+| HOST | PORT | PATH |
++-------+-------+------------------------------------------+
+| fbg05 | 20151 | /sata_ssd/ssd_02/matthew01/nvkvs/matthew |
++-------+-------+------------------------------------------+
+matthew@lightningdb:21> cluster tree
+192.168.111.44:20102(connected)
+|__ 192.168.111.44:20152(connected)
+
+192.168.111.44:20150(connected)
+|__ 192.168.111.44:20100(connected)
+
+192.168.111.44:20151(connected)
+|__ 192.168.111.44:20101(connected)
+
+matthew@lightningdb:21> cluster failover_with_dir fbg05 matthew01
+'192.168.111.44:20101' will be master...
+OK
+
+
+matthew@lightningdb:21> cluster tree
+192.168.111.44:20101(connected)
+|__ 192.168.111.44:20151(connected)
+
+192.168.111.44:20102(connected)
+|__ 192.168.111.44:20152(connected)
+
+192.168.111.44:20150(connected)
+|__ 192.168.111.44:20100(connected)
+
+(9) Cluster force_failover
+When a server need to be shutdown by HW fault or checking, change all masters in the server to slaves by failover of those slaves.
+Examples
+matthew@lightningdb:21> cluster distribution
++----------------+--------+-------+
+| HOST | MASTER | SLAVE |
++----------------+--------+-------+
+| 192.168.111.44 | 7 | 7 |
+| 192.168.111.41 | 7 | 7 |
+| TOTAL | 14 | 14 |
++----------------+--------+-------+
+
+
+matthew@lightningdb:21> cluster force_failover 192.168.111.41
+'192.168.111.44:20150' will be master...
+OK
+
+'192.168.111.44:20151' will be master...
+OK
+
+'192.168.111.44:20152' will be master...
+OK
+
+'192.168.111.44:20153' will be master...
+OK
+
+'192.168.111.44:20154' will be master...
+OK
+
+'192.168.111.44:20155' will be master...
+OK
+
+'192.168.111.44:20156' will be master...
+OK
+
+matthew@lightningdb:21> cluster distribution
++----------------+--------+-------+
+| HOST | MASTER | SLAVE |
++----------------+--------+-------+
+| 192.168.111.44 | 14 | 0 |
+| 192.168.111.41 | 0 | 14 |
+| TOTAL | 14 | 14 |
++----------------+--------+-------+
+matthew@lightningdb:21>
+
+(1) Cluster add_slave
+Warning
+Before using the add-slave
command, ingestion to master nodes should be stopped. After replication and sync between master and slave are completed, ingestion will be available again.
You can add a slave to a cluster that is configured only with the master without redundancy.
+Create cluster only with masters
+Proceed with the deploy.
+ec2-user@lightningdb:2> deploy 3
+Select installer
+
+ [ INSTALLER LIST ]
+ (1) lightningdb.dev.master.5a6a38.bin
+
+Please enter the number, file path or url of the installer you want to use.
+you can also add file in list by copy to '$FBPATH/releases/'
+https://flashbase.s3.ap-northeast-2.amazonaws.com/lightningdb.release.master.5a6a38.bin
+Downloading lightningdb.release.master.5a6a38.bin
+[==================================================] 100%
+OK, lightningdb.release.master.5a6a38.bin
+Please type host list separated by comma(,) [127.0.0.1]
+
+OK, ['127.0.0.1']
+How many masters would you like to create on each host? [5]
+
+OK, 5
+Please type ports separate with comma(,) and use hyphen(-) for range. [18300-18304]
+
+OK, ['18300-18304']
+How many replicas would you like to create on each master? [0]
+
+OK, 0
+How many ssd would you like to use? [3]
+
+OK, 3
+Type prefix of db path [~/sata_ssd/ssd_]
+
+OK, ~/sata_ssd/ssd_
++--------------+---------------------------------+
+| NAME | VALUE |
++--------------+---------------------------------+
+| installer | lightningdb.dev.master.5a6a38.bin |
+| hosts | 127.0.0.1 |
+| master ports | 18300-18304 |
+| ssd count | 3 |
+| db path | ~/sata_ssd/ssd_ |
++--------------+---------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+Check status of hosts...
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+OK
+Checking for cluster exist...
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | CLEAN |
++-----------+--------+
+OK
+Transfer installer and execute...
+ - 127.0.0.1
+Sync conf...
+Complete to deploy cluster 3.
+Cluster '3' selected.
+
+ec2-user@lightningdb:3> cluster start
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18300|18301|18302|18303|18304 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+ec2-user@lightningdb:3> cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST | PORT | TYPE |
++-----------+-------+--------+
+| 127.0.0.1 | 18300 | MASTER |
+| 127.0.0.1 | 18301 | MASTER |
+| 127.0.0.1 | 18302 | MASTER |
+| 127.0.0.1 | 18303 | MASTER |
+| 127.0.0.1 | 18304 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18300
+ - 127.0.0.1:18303
+ - 127.0.0.1:18304
+ - 127.0.0.1:18301
+ - 127.0.0.1:18302
+Adding slots...
+ - 127.0.0.1:18300, 3280
+ - 127.0.0.1:18303, 3276
+ - 127.0.0.1:18304, 3276
+ - 127.0.0.1:18301, 3276
+ - 127.0.0.1:18302, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+ec2-user@lightningdb:3>
+
+Open the conf file.
+ec2-user@lightningdb:3> conf cluster
+
+You can modify redis.properties by entering the command as shown above.
+#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) )
+
+## Slave hosts and ports (optional)
+[[export]] SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" )
+[[export]] SR2_REDIS_SLAVE_PORTS=( $(seq 18600 18609) )
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="~/sata_ssd/ssd_"
+export SR2_REDIS_DB_PATH="~/sata_ssd/ssd_"
+export SR2_FLASH_DB_PATH="~/sata_ssd/ssd_"
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+Modify SR2_REDIS_SLAVE_HOSTS
and SR2_REDIS_SLAVE_PORTS
as shown below.
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18350 18354) )
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="~/sata_ssd/ssd_"
+export SR2_REDIS_DB_PATH="~/sata_ssd/ssd_"
+export SR2_FLASH_DB_PATH="~/sata_ssd/ssd_"
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+Save the modification and exit.
+ec2-user@lightningdb:3> conf cluster
+Check status of hosts...
+OK
+sync conf
+OK
+Complete edit
+
+cluster add-slave
commandec2-user@lightningdb:3> cluster add-slave
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+clean redis conf, node conf, db data of master
+clean redis conf, node conf, db data of slave
+ - 127.0.0.1
+Backup redis slave log in each SLAVE hosts...
+ - 127.0.0.1
+create redis data directory in each SLAVE hosts
+ - 127.0.0.1
+sync conf
+OK
+Starting slave nodes : 127.0.0.1 : 18350|18351|18352|18353|18354 ...
+Wait until all redis process up...
+cur: 10 / total: 10
+Complete all redis process up
+replicate [M] 127.0.0.1 18300 - [S] 127.0.0.1 18350
+replicate [M] 127.0.0.1 18301 - [S] 127.0.0.1 18351
+replicate [M] 127.0.0.1 18302 - [S] 127.0.0.1 18352
+replicate [M] 127.0.0.1 18303 - [S] 127.0.0.1 18353
+replicate [M] 127.0.0.1 18304 - [S] 127.0.0.1 18354
+5 / 5 meet complete.
+
+ec2-user@lightningdb:3> cli cluster nodes
+0549ec03031213f95121ceff6c9c13800aef848c 127.0.0.1:18303 master - 0 1574132251126 3 connected 3280-6555
+1b09519d37ebb1c09095158b4f1c9f318ddfc747 127.0.0.1:18352 slave a6a8013cf0032f0f36baec3162122b3d993dd2c8 0 1574132251025 6 connected
+c7dc4815e24054104dff61cac6b13256a84ac4ae 127.0.0.1:18353 slave 0549ec03031213f95121ceff6c9c13800aef848c 0 1574132251126 3 connected
+0ab96cb79165ddca7d7134f80aea844bd49ae2e1 127.0.0.1:18351 slave 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 0 1574132250724 4 connected
+7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 127.0.0.1:18301 master - 0 1574132250524 4 connected 9832-13107
+e67005a46984445e559a1408dd0a4b24a8c92259 127.0.0.1:18304 master - 0 1574132251126 5 connected 6556-9831
+a6a8013cf0032f0f36baec3162122b3d993dd2c8 127.0.0.1:18302 master - 0 1574132251126 2 connected 13108-16383
+492cdf4b1dedab5fb94e7129da2a0e05f6c46c4f 127.0.0.1:18350 slave 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 0 1574132251126 6 connected
+f9f7fcee9009f25618e63d2771ee2529f814c131 127.0.0.1:18354 slave e67005a46984445e559a1408dd0a4b24a8c92259 0 1574132250724 5 connected
+83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 127.0.0.1:18300 myself,master - 0 1574132250000 1 connected 0-3279
+
+
+(2) Scale out
+You can scale out the current cluster with a new server.
+Examples
+
+matthew@lightningdb:21> cluster distribution
++-----------------------+--------+-------+
+| HOST | MASTER | SLAVE |
++-----------------------+--------+-------+
+| fbg04(192.168.111.41) | 3 | 3 |
+| TOTAL | 3 | 3 |
++-----------------------+--------+-------+
+
+matthew@lightningdb:21> cluster scaleout
+Please type hosts to scaleout separated by comma(,) [127.0.0.1]
+fbg05
+OK, ['fbg05']
+Check status of hosts...
+OK
+Checking cluster exist...
+ - fbg04
+ - fbg05
+OK
++-------+-------+--------+
+| HOST | PORT | TYPE |
++-------+-------+--------+
+| fbg04 | 20100 | MASTER |
+| fbg04 | 20101 | MASTER |
+| fbg04 | 20102 | MASTER |
+| fbg05 | 20100 | MASTER |
+| fbg05 | 20101 | MASTER |
+| fbg05 | 20102 | MASTER |
+| fbg04 | 20150 | SLAVE |
+| fbg04 | 20151 | SLAVE |
+| fbg04 | 20152 | SLAVE |
+| fbg05 | 20150 | SLAVE |
+| fbg05 | 20151 | SLAVE |
+| fbg05 | 20152 | SLAVE |
++-------+-------+--------+
+replicas: 1
+Do you want to proceed with replicate according to the above information? (y/n)
+y
+Backup redis master log in each MASTER hosts...
+ - fbg04
+ - fbg05
+Backup redis slave log in each SLAVE hosts...
+ - fbg04
+ - fbg05
+create redis data directory in each MASTER
+ - fbg04
+ - fbg05
+create redis data directory in each SLAVE
+ - fbg04
+ - fbg05
+sync conf
+OK
+Starting master nodes : fbg04 : 20100|20101|20102 ...
+Starting master nodes : fbg05 : 20100|20101|20102 ...
+Starting slave nodes : fbg04 : 20150|20151|20152 ...
+Starting slave nodes : fbg05 : 20150|20151|20152 ...
+Wait until all redis process up...
+alive redis 12/12
+Complete all redis process up.
+Replicate [M] fbg04:20100 - [S] fbg05:20150
+Replicate [M] fbg04:20101 - [S] fbg05:20151
+Replicate [M] fbg04:20102 - [S] fbg05:20152
+Replicate [M] fbg05:20100 - [S] fbg04:20150
+Replicate [M] fbg05:20101 - [S] fbg04:20151
+Replicate [M] fbg05:20102 - [S] fbg04:20152
+6 / 6 replicate completion.
+M: 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 192.168.111.41 20100 slots:5462-10922 (5461 slots)
+M: 2ee3d14c92321132e12cddb90dde8240ea6b8768 192.168.111.44 20101 slots: (0 slots)
+S: 0516e827969880b2322ae112e70e809b395c6d46 192.168.111.44 20151 slots: (0 slots)
+S: fd1466ec198951cbe7e172ae34bd5b3db66aa309 192.168.111.44 20150 slots: (0 slots)
+S: 28e4d04419c90c7b1bb4b067f9e15d4012d313b1 192.168.111.44 20152 slots: (0 slots)
+S: 56e1d3ab563b23bbf857a8f502d1c4b24ce74a3c 192.168.111.41 20151 slots: (0 slots)
+M: 00d9cea97499097645eecd0bddf0f4679a6f1be1 192.168.111.44 20100 slots: (0 slots)
+S: 9a21e798fc8d69a4b04910b9e4b87a69417d33fe 192.168.111.41 20150 slots: (0 slots)
+M: 6afbfe0ed8d701d269d8b2837253678d3452fb70 192.168.111.41 20102 slots:0-5461 (5462 slots)
+M: 7e2e3de6daebd6e144365d58db19629cfb1b87d1 192.168.111.41 20101 slots:10923-16383 (5461 slots)
+S: 1df738824e9d41622158a4102ba4aab355225747 192.168.111.41 20152 slots: (0 slots)
+M: 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 192.168.111.44 20102 slots: (0 slots)
+>>> Performing Cluster Check (using node 192.168.111.41:20100)
+[OK] All nodes agree about slots configuration.
+>>> Check for open slots...
+>>> Check slots coverage...
+[OK] All 16384 slots covered
+err_perc: 50.009156
+err_perc: 50.018308
+err_perc: 50.009156
+>>> Rebalancing across 6 nodes. Total weight = 6
+2ee3d14c92321132e12cddb90dde8240ea6b8768 balance is -2732
+00d9cea97499097645eecd0bddf0f4679a6f1be1 balance is -2731
+71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 balance is -2731
+47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 balance is 2731
+7e2e3de6daebd6e144365d58db19629cfb1b87d1 balance is 2731
+6afbfe0ed8d701d269d8b2837253678d3452fb70 balance is 2732
+Moving 2732 slots from 6afbfe0ed8d701d269d8b2837253678d3452fb70 to 2ee3d14c92321132e12cddb90dde8240ea6b8768
+############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
+
+Moving 2731 slots from 7e2e3de6daebd6e144365d58db19629cfb1b87d1 to 00d9cea97499097645eecd0bddf0f4679a6f1be1
+###########################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
+
+Moving 2731 slots from 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 to 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891
+###########################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
+
+OK
+
+matthew@lightningdb:21> cluster distribution
++-----------------------+--------+-------+
+| HOST | MASTER | SLAVE |
++-----------------------+--------+-------+
+| fbg04(192.168.111.41) | 3 | 3 |
+| fbg05(192.168.111.44) | 3 | 3 |
+| TOTAL | 6 | 6 |
++-----------------------+--------+-------+
+
+
+
+
+
+
+
+
+ With conf
commands, you can configure out the cluster.
You can open the template file with the below options(cluster
/master
/thriftserver
)
After saving the template file, the configuration will be synchronized with all nodes in the current cluster.
+conf cluster
will open redis.properties
file of the current cluster.
matthew@lightningdb:21> conf cluster
+Check status of hosts...
+OK
+Sync conf...
+OK
+Complete edit.
+
+redis.properties
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "192.168.111.41" "192.168.111.44" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 20100 20102) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "192.168.111.41" "192.168.111.44" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 20150 20152) )
+
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+export SR2_REDIS_DATA="/sata_ssd/ssd_02/matthew"
+export SR2_REDIS_DB_PATH="/sata_ssd/ssd_02/matthew"
+export SR2_FLASH_DB_PATH="/sata_ssd/ssd_02/matthew"
+
+conf master
will open redis-master.conf.template
file of the current cluster. This file will configure all redis-servers in the current cluster.
matthew@lightningdb:21> conf master
+Check status of hosts...
+OK
+Sync conf...
+OK
+Complete edit.
+
+redis-master.conf.template
# In short... if you have slaves attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for slave
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory <bytes>
+# maxmemory should be greater than 51mb in TSR2
+maxmemory 300mb
+
+conf thrifserver
will open thriftserver.properties
file of the current thriftserver.
matthew@lightningdb:21> conf thriftserver
+Check status of hosts...
+OK
+Sync conf...
+OK
+Complete edit.
+
+thriftserver.properties
#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14050}
+EXECUTERS=12
+EXECUTER_CORES=32
+
+HIVE_METASTORE_URL=''
+HIVE_HOST=${HIVE_HOST:-localhost}
+HIVE_PORT=${HIVE_PORT:-13000}
+
+COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \
+-o -name 'geospark*' -o -name 'gt-*' | tr '\n' ':')
+
+###############################################################################
+# Driver
+DRIVER_MEMORY=6g
+DRIVER_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Execute
+EXECUTOR_MEMORY=2g
+EXECUTOR_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Thrift Server logs
+EVENT_LOG_ENABLED=false
+EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs
+EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling
+EVENT_LOG_SAVE_MIN=60
+EXTRACTED_EVENT_LOG_SAVE_DAY=5
+SPARK_LOG_SAVE_MIN=2000
+##############
+
+########################
+# Thrift Name
+cluster_id=$(echo $SR2_HOME | awk -F "cluster_" '{print $2}' | awk -F '/' '{print $1}')
+host=$(hostname)
+THRIFT_NAME="ThriftServer_${host}_${cluster_id}"
+########################
+
+###############################################################################
+# AGGREGATION PUSHDOWN
+AGG_PUSHDOWN=true
+###############################################################################
+
+With sync {IP address}
or sync {hostname}
command, you can load the configurations of all clusters from the remote server to localhost.
matthew@lightningdb:21> sync fbg04
+Localhost already has the information on the cluster 21. Do you want to overwrite? (y/n) [n]
+y
+Localhost already has the information on the cluster 20. Do you want to overwrite? (y/n) [n]
+n
+Importing cluster complete...
+
+
+
+
+
+
+
+ If you want to see the list of Thrift Server commands, use the the thriftserver
command without any option.
NAME
+ ltcli thriftserver
+
+SYNOPSIS
+ ltcli thriftserver COMMAND
+
+COMMANDS
+ COMMAND is one of the following:
+
+ beeline
+ Connect to thriftserver command line
+
+ monitor
+ Show thriftserver log
+
+ restart
+ Thriftserver restart
+
+ start
+ Start thriftserver
+
+ stop
+ Stop thriftserver
+
+Connect to the thrift server
+ec2-user@lightningdb:1> thriftserver beeline
+Connecting...
+Connecting to jdbc:hive2://localhost:13000
+19/11/19 04:45:18 INFO jdbc.Utils: Supplied authorities: localhost:13000
+19/11/19 04:45:18 INFO jdbc.Utils: Resolved authority: localhost:13000
+19/11/19 04:45:18 INFO jdbc.HiveConnection: Will try to open client transport with JDBC Uri: jdbc:hive2://localhost:13000
+Connected to: Spark SQL (version 2.3.1)
+Driver: Hive JDBC (version 1.2.1.spark2)
+Transaction isolation: TRANSACTION_REPEATABLE_READ
+Beeline version 1.2.1.spark2 by Apache Hive
+0: jdbc:hive2://localhost:13000> show tables;
++-----------+------------+--------------+--+
+| database | tableName | isTemporary |
++-----------+------------+--------------+--+
++-----------+------------+--------------+--+
+No rows selected (0.55 seconds)
+
+Default value of db url to connect is jdbc:hive2://$HIVE_HOST:$HIVE_PORT
You can modify $HIVE_HOST
and $HIVE_PORT
by the command conf thriftserver
You can view the logs of the thrift server in real-time.
+ec2-user@lightningdb:1> thriftserver monitor
+Press Ctrl-C for exit.
+19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ip-172-31-39-147.ap-northeast-2.compute.internal:35909 with 912.3 MB RAM, BlockManagerId(4, ip-172-31-39-147.ap-northeast-2.compute.internal, 35909, None)
+19/11/19 04:43:33 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (172.31.39.147:53604) with ID 5
+19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager
+...
+
+Restart the thrift server.
+ec2-user@lightningdb:1> thriftserver restart
+no org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 to stop
+starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out
+
+Run the thrift server.
+ec2-user@lightningdb:1> thriftserver start
+starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out
+
+You can view the logs through the command monitor
.
Shut down the thrift server.
+ec2-user@lightningdb:1> thriftserver stop
+stopping org.apache.spark.sql.hive.thriftserver.HiveThriftServer2
+
+
+
+
+
+
+
+ You can check the version of LTCLI tool.
+$ ltcli --version
+ltcli version 1.1.5
+
+You can check the version of Lightning DB that is deployed in each cluster.
+$ ltcli
+Cluster '21' selected.
+matthew@lightningdb:21> cluster version
+- build date : 20200820-173819
+- branch: release.flashbase_v1.2.3
+- last commit-id: 45814d
+- output binary: lightningdb.release.release.flashbase_v1.2.3.45814d.bin
+matthew@lightningdb:21>
+
+
+
+
+
+
+
+ Note
+Command Line Interface(CLI) of LightningDB supports not only deploy and start command but also many commands to access and manipulate data in LightningDB.
+If you want to see the list of cluster commands, use the cluster
command without any option.
ec2-user@lightningdb:1> cluster
+
+NAME
+ ltcli cluster - This is cluster command
+
+SYNOPSIS
+ ltcli cluster COMMAND
+
+DESCRIPTION
+ This is cluster command
+
+COMMANDS
+ COMMAND is one of the following:
+
+ add_slave
+ Add slaves to cluster additionally
+
+ clean
+ Clean cluster
+
+ configure
+
+ create
+ Create cluster
+
+ ls
+ Check cluster list
+
+ rebalance
+ Rebalance
+
+ restart
+ Restart redist cluster
+
+ rowcount
+ Query and show cluster row count
+
+ start
+ Start cluster
+
+ stop
+ Stop cluster
+
+ use
+ Change selected cluster
+
+(1) Cluster configure
+redis-{port}.conf
is generated with using redis-{master/slave}.conf.template
and redis.properties
files.
> cluster configure
+
+(2) Cluster start
+${SR2_HOME}/logs/redis/
1 will be moved to ${SR2_HOME}/logs/redis/backup/
.${SR2_REDIS_DATA}
${SR2_HOME}/conf/redis/redis-{port}.conf
file${SR2_HOME}/logs/redis/
ec2-user@lightningdb:1> cluster start
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+Generate redis configuration files for master hosts
+sync conf
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+
+Errors
+Redis-server(master) process with the same port is already running. To resolve this error, use cluster stop
or kill {pid of the process}
.
$ cluster start
+...
+...
+[ErrorCode 11] Fail to start... Must be checked running MASTER redis processes!
+We estimate that redis process is <alive-redis-count>.
+
+Redis-server(slave) process with the same port is already running. To resolve this error, use cluster stop
or kill {pid of the process}
.
$ cluster start
+...
+[ErrorCode 12] Fail to start... Must be checked running SLAVE redis processes!
+We estimate that redis process is <alive-redis-count>.
+
+Conf file is not found. To resove this error, use cluster configure
and then cluster start
.
$ cluster start
+...
+FileNotExistError: ${SR2_HOME}/conf/redis/redis-{port}.conf
+
+$ cluster start
+...
+ClusterRedisError: Fail to start redis: max try exceed
+Recommendation Command: 'monitor'
+
+(3) Cluster create
+After checking the information of the cluster, create a cluster of LightningDB.
+Case 1) When redis-server processes are running, create a cluster only.
+ec2-user@lightningdb:1>cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST | PORT | TYPE |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18100
+ - 127.0.0.1:18103
+ - 127.0.0.1:18104
+ - 127.0.0.1:18101
+ - 127.0.0.1:18102
+Adding slots...
+ - 127.0.0.1:18100, 3280
+ - 127.0.0.1:18103, 3276
+ - 127.0.0.1:18104, 3276
+ - 127.0.0.1:18101, 3276
+ - 127.0.0.1:18102, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+
+Case 2) When redis-server processes are not running, create a cluster after launching redis-server processes with cluster start
command.
ec2-user@lightningdb:4>cluster create
+Check status of hosts...
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+>>> Creating cluster
++-----------+-------+--------+
+| HOST | PORT | TYPE |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18103
+ - 127.0.0.1:18104
+ - 127.0.0.1:18101
+ - 127.0.0.1:18102
+ - 127.0.0.1:18100
+Adding slots...
+ - 127.0.0.1:18103, 3280
+ - 127.0.0.1:18104, 3276
+ - 127.0.0.1:18101, 3276
+ - 127.0.0.1:18102, 3276
+ - 127.0.0.1:18100, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+
+Errors
+When redis servers are not running, this error(Errno 111) will occur. To solve this error, use cluster start
command previously.
ec2-user@lightningdb:1>cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST | PORT | TYPE |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+127.0.0.1:18100 - [Errno 111] Connection refused
+
+(4) Cluster stop
+Gracefully kill all redis-servers(master/slave) with SIGINT +
+ec2-user@lightningdb:1> cluster stop
+Check status of hosts...
+OK
+Stopping master cluster of redis...
+cur: 5 / total: 5
+cur: 0 / total: 5
+Complete all redis process down
+
+Options
+--force
+
+(5) Cluster clean
+Remove conf files for redis-server and all data(aof, rdb, RocksDB) of LightningDB
+ec2-user@lightningdb:1> cluster clean
+Removing redis generated master configuration files
+ - 127.0.0.1
+Removing flash db directory, appendonly and dump.rdb files in master
+ - 127.0.0.1
+Removing master node configuration
+ - 127.0.0.1
+
+(6) Cluster restart
+Process cluster stop
and then cluster start
.
Options
+--force-stop
+
+--reset
+
+cluster create
. This command should be called with --reset
.--cluster
+
+(7) Cluster ls
+List the deployed clusters.
+ec2-user@lightningdb:2> cluster ls
+[1, 2]
+
+(8) Cluster use
+Change the cluster to use LTCLI. Use cluster use
or c
commands.
ec2-user@lightningdb:2> cluster use 1
+Cluster '1' selected.
+ec2-user@lightningdb:1> c 2
+Cluster '2' selected.
+
+(9) Cluster add_slave
+Warning
+Before using the add-slave
command, ingestion to master nodes should be stopped. After replication and sync between master and slave are completed, ingestion will be available again.
You can add a slave to a cluster that is configured only with the master without redundancy.
+Create cluster only with masters
+Proceed with the deploy.
+ec2-user@lightningdb:2> deploy 3
+Select installer
+
+ [ INSTALLER LIST ]
+ (1) lightningdb.dev.master.5a6a38.bin
+
+Please enter the number, file path or url of the installer you want to use.
+you can also add file in list by copy to '$FBPATH/releases/'
+https://flashbase.s3.ap-northeast-2.amazonaws.com/lightningdb.release.master.5a6a38.bin
+Downloading lightningdb.release.master.5a6a38.bin
+[==================================================] 100%
+OK, lightningdb.release.master.5a6a38.bin
+Please type host list separated by comma(,) [127.0.0.1]
+
+OK, ['127.0.0.1']
+How many masters would you like to create on each host? [5]
+
+OK, 5
+Please type ports separate with comma(,) and use hyphen(-) for range. [18300-18304]
+
+OK, ['18300-18304']
+How many replicas would you like to create on each master? [0]
+
+OK, 0
+How many ssd would you like to use? [3]
+
+OK, 3
+Type prefix of db path [~/sata_ssd/ssd_]
+
+OK, ~/sata_ssd/ssd_
++--------------+---------------------------------+
+| NAME | VALUE |
++--------------+---------------------------------+
+| installer | lightningdb.dev.master.5a6a38.bin |
+| hosts | 127.0.0.1 |
+| master ports | 18300-18304 |
+| ssd count | 3 |
+| db path | ~/sata_ssd/ssd_ |
++--------------+---------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+Check status of hosts...
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+OK
+Checking for cluster exist...
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | CLEAN |
++-----------+--------+
+OK
+Transfer installer and execute...
+ - 127.0.0.1
+Sync conf...
+Complete to deploy cluster 3.
+Cluster '3' selected.
+
+ec2-user@lightningdb:3> cluster start
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18300|18301|18302|18303|18304 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+ec2-user@lightningdb:3> cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST | PORT | TYPE |
++-----------+-------+--------+
+| 127.0.0.1 | 18300 | MASTER |
+| 127.0.0.1 | 18301 | MASTER |
+| 127.0.0.1 | 18302 | MASTER |
+| 127.0.0.1 | 18303 | MASTER |
+| 127.0.0.1 | 18304 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18300
+ - 127.0.0.1:18303
+ - 127.0.0.1:18304
+ - 127.0.0.1:18301
+ - 127.0.0.1:18302
+Adding slots...
+ - 127.0.0.1:18300, 3280
+ - 127.0.0.1:18303, 3276
+ - 127.0.0.1:18304, 3276
+ - 127.0.0.1:18301, 3276
+ - 127.0.0.1:18302, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+ec2-user@lightningdb:3>
+
+Open the conf file.
+ec2-user@lightningdb:3> conf cluster
+
+You can modify redis.properties by entering the command as shown above.
+#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) )
+
+## Slave hosts and ports (optional)
+[[export]] SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" )
+[[export]] SR2_REDIS_SLAVE_PORTS=( $(seq 18600 18609) )
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="~/sata_ssd/ssd_"
+export SR2_REDIS_DB_PATH="~/sata_ssd/ssd_"
+export SR2_FLASH_DB_PATH="~/sata_ssd/ssd_"
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+Modify SR2_REDIS_SLAVE_HOSTS
and SR2_REDIS_SLAVE_PORTS
as shown below.
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18350 18354) )
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="~/sata_ssd/ssd_"
+export SR2_REDIS_DB_PATH="~/sata_ssd/ssd_"
+export SR2_FLASH_DB_PATH="~/sata_ssd/ssd_"
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+Save the modification and exit.
+ec2-user@lightningdb:3> conf cluster
+Check status of hosts...
+OK
+sync conf
+OK
+Complete edit
+
+cluster add-slave
commandec2-user@lightningdb:3> cluster add-slave
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+clean redis conf, node conf, db data of master
+clean redis conf, node conf, db data of slave
+ - 127.0.0.1
+Backup redis slave log in each SLAVE hosts...
+ - 127.0.0.1
+create redis data directory in each SLAVE hosts
+ - 127.0.0.1
+sync conf
+OK
+Starting slave nodes : 127.0.0.1 : 18350|18351|18352|18353|18354 ...
+Wait until all redis process up...
+cur: 10 / total: 10
+Complete all redis process up
+replicate [M] 127.0.0.1 18300 - [S] 127.0.0.1 18350
+replicate [M] 127.0.0.1 18301 - [S] 127.0.0.1 18351
+replicate [M] 127.0.0.1 18302 - [S] 127.0.0.1 18352
+replicate [M] 127.0.0.1 18303 - [S] 127.0.0.1 18353
+replicate [M] 127.0.0.1 18304 - [S] 127.0.0.1 18354
+5 / 5 meet complete.
+
+ec2-user@lightningdb:3> cli cluster nodes
+0549ec03031213f95121ceff6c9c13800aef848c 127.0.0.1:18303 master - 0 1574132251126 3 connected 3280-6555
+1b09519d37ebb1c09095158b4f1c9f318ddfc747 127.0.0.1:18352 slave a6a8013cf0032f0f36baec3162122b3d993dd2c8 0 1574132251025 6 connected
+c7dc4815e24054104dff61cac6b13256a84ac4ae 127.0.0.1:18353 slave 0549ec03031213f95121ceff6c9c13800aef848c 0 1574132251126 3 connected
+0ab96cb79165ddca7d7134f80aea844bd49ae2e1 127.0.0.1:18351 slave 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 0 1574132250724 4 connected
+7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 127.0.0.1:18301 master - 0 1574132250524 4 connected 9832-13107
+e67005a46984445e559a1408dd0a4b24a8c92259 127.0.0.1:18304 master - 0 1574132251126 5 connected 6556-9831
+a6a8013cf0032f0f36baec3162122b3d993dd2c8 127.0.0.1:18302 master - 0 1574132251126 2 connected 13108-16383
+492cdf4b1dedab5fb94e7129da2a0e05f6c46c4f 127.0.0.1:18350 slave 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 0 1574132251126 6 connected
+f9f7fcee9009f25618e63d2771ee2529f814c131 127.0.0.1:18354 slave e67005a46984445e559a1408dd0a4b24a8c92259 0 1574132250724 5 connected
+83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 127.0.0.1:18300 myself,master - 0 1574132250000 1 connected 0-3279
+
+
+(10) Cluster rowcount
+Check the count of records that are stored in the cluster.
+ec2-user@lightningdb:1> cluster rowcount
+0
+
+(11) Check the status of cluster
+With the following commands, you can check the status of the cluster.
+ec2-user@lightningdb:1> cli ping --all
+alive redis 10/10
+
+If a node does not reply, the fail node will be displayed like below.
++-------+-----------------+--------+
+| TYPE | ADDR | RESULT |
++-------+-----------------+--------+
+| Slave | 127.0.0.1:18352 | FAIL |
++-------+-----------------+--------+
+alive redis 9/10
+
+ec2-user@lightningdb:1> cli cluster info
+cluster_state:ok
+cluster_slots_assigned:16384
+cluster_slots_ok:16384
+cluster_slots_pfail:0
+cluster_slots_fail:0
+cluster_known_nodes:5
+cluster_size:5
+cluster_current_epoch:4
+cluster_my_epoch:2
+cluster_stats_messages_ping_sent:12
+cluster_stats_messages_pong_sent:14
+cluster_stats_messages_sent:26
+cluster_stats_messages_ping_received:10
+cluster_stats_messages_pong_received:12
+cluster_stats_messages_meet_received:4
+cluster_stats_messages_received:26
+
+ec2-user@lightningdb:1> cli cluster nodes
+559af5e90c3f2c92f19c927c29166c268d938e8f 127.0.0.1:18104 master - 0 1574127926000 4 connected 6556-9831
+174e2a62722273fb83814c2f12e2769086c3d185 127.0.0.1:18101 myself,master - 0 1574127925000 3 connected 9832-13107
+35ab4d3f7f487c5332d7943dbf4b20d5840053ea 127.0.0.1:18100 master - 0 1574127926000 1 connected 0-3279
+f39ed05ace18e97f74c745636ea1d171ac1d456f 127.0.0.1:18103 master - 0 1574127927172 0 connected 3280-6555
+9fd612b86a9ce1b647ba9170b8f4a8bfa5c875fc 127.0.0.1:18102 master - 0 1574127926171 2 connected 13108-16383
+
+(12) Cluster tree
+User can check the status of master nodes and slaves and show which master and slave nodes are linked.
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(connected)
+|__ 127.0.0.1:18952(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+(13) Cluster failover
+If a master node is killed, its slave node will automatically promote after 'cluster-node-time'2.
+User can promote the slave node immediately by using the 'cluster failover' command.
+Step 1) Check the status of the cluster
+In this case, '127.0.0.1:18902' node is killed.
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(disconnected) <--- Killed!
+|__ 127.0.0.1:18952(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+Step 2) Do failover with 'cluster failover' command
+ec2-user@lightningdb:9> cluster failover
+failover 127.0.0.1:18952 for 127.0.0.1:18902
+OK
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(disconnected) <--- Killed!
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+127.0.0.1:18952(connected) <--- Promoted to master!
+
+(14) Cluster failback
+With 'cluster failback' command, the killed node is restarted and added to the cluster as the slave node.
+ec2-user@lightningdb:9> cluster failback
+run 127.0.0.1:18902
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+127.0.0.1:18952(connected) <--- Promoted to master!
+|__ 127.0.0.1:18902(connected) <--- Failbacked. Now this node is slave!
+
+If you want to see the list of Thrift Server commands, use the the thriftserver
command without any option.
NAME
+ ltcli thriftserver
+
+SYNOPSIS
+ ltcli thriftserver COMMAND
+
+COMMANDS
+ COMMAND is one of the following:
+
+ beeline
+ Connect to thriftserver command line
+
+ monitor
+ Show thriftserver log
+
+ restart
+ Thriftserver restart
+
+ start
+ Start thriftserver
+
+ stop
+ Stop thriftserver
+
+(1) Thriftserver beeline
+Connect to the thrift server
+ec2-user@lightningdb:1> thriftserver beeline
+Connecting...
+Connecting to jdbc:hive2://localhost:13000
+19/11/19 04:45:18 INFO jdbc.Utils: Supplied authorities: localhost:13000
+19/11/19 04:45:18 INFO jdbc.Utils: Resolved authority: localhost:13000
+19/11/19 04:45:18 INFO jdbc.HiveConnection: Will try to open client transport with JDBC Uri: jdbc:hive2://localhost:13000
+Connected to: Spark SQL (version 2.3.1)
+Driver: Hive JDBC (version 1.2.1.spark2)
+Transaction isolation: TRANSACTION_REPEATABLE_READ
+Beeline version 1.2.1.spark2 by Apache Hive
+0: jdbc:hive2://localhost:13000> show tables;
++-----------+------------+--------------+--+
+| database | tableName | isTemporary |
++-----------+------------+--------------+--+
++-----------+------------+--------------+--+
+No rows selected (0.55 seconds)
+
+Default value of db url to connect is jdbc:hive2://$HIVE_HOST:$HIVE_PORT
You can modify $HIVE_HOST
and $HIVE_PORT
by the command conf thriftserver
(2) Thriftserver monitor
+You can view the logs of the thrift server in real-time.
+ec2-user@lightningdb:1> thriftserver monitor
+Press Ctrl-C for exit.
+19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ip-172-31-39-147.ap-northeast-2.compute.internal:35909 with 912.3 MB RAM, BlockManagerId(4, ip-172-31-39-147.ap-northeast-2.compute.internal, 35909, None)
+19/11/19 04:43:33 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (172.31.39.147:53604) with ID 5
+19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager
+...
+
+(3) Thriftserver restart
+Restart the thrift server.
+ec2-user@lightningdb:1> thriftserver restart
+no org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 to stop
+starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out
+
+(4) Start thriftserver
+Run the thrift server.
+ec2-user@lightningdb:1> thriftserver start
+starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out
+
+You can view the logs through the command monitor
.
(5) Stop thriftserver
+Shut down the thrift server.
+ec2-user@lightningdb:1> thriftserver stop
+stopping org.apache.spark.sql.hive.thriftserver.HiveThriftServer2
+
+(6) Conf thriftserver
+ec2-user@lightningdb:1> conf thriftserver
+
+#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14050}
+EXECUTERS=12
+EXECUTER_CORES=32
+
+HIVE_METASTORE_URL=''
+HIVE_HOST=${HIVE_HOST:-localhost}
+HIVE_PORT=${HIVE_PORT:-13000}
+
+COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \
+-o -name 'geospark*' -o -name 'gt-*' | tr '\n' ':')
+
+###############################################################################
+# Driver
+DRIVER_MEMORY=6g
+DRIVER_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Execute
+EXECUTOR_MEMORY=2g
+EXECUTOR_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Thrift Server logs
+EVENT_LOG_ENABLED=false
+EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs
+EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling
+EVENT_LOG_SAVE_MIN=60
+EXTRACTED_EVENT_LOG_SAVE_DAY=5
+SPARK_LOG_SAVE_MIN=2000
+##############
+
+
+
+
+
+
+
+
+ You can create tables in the metastore using standard DDL.
+CREATE TABLE `pcell` (
+ `event_time` STRING,
+ `m_10_under` DOUBLE,
+ `m_10_19` DOUBLE,
+ `m_20_29` DOUBLE,
+ `m_30_39` DOUBLE,
+ `m_40_49` DOUBLE,
+ `m_50_59` DOUBLE,
+ `m_60_over` DOUBLE,
+ `longitude` DOUBLE,
+ `lattitude` DOUBLE,
+ `geohash` STRING)
+USING r2
+OPTIONS (
+ `table` '100',
+ `host` 'localhost',
+ `port` '18100',
+ `partitions` 'event_time geohash',
+ `mode` 'nvkvs',
+ `at_least_one_partition_enabled` 'no',
+ `rowstore` 'true'
+ )
+
+There are various options used to describe storage properties.
+table : Positive Integer. The identification of the table. Redis identifies a table with this value.
+host/port : The host/port of representative Redis Node. Using this host and port, Spark builds a Redis cluster client that retrieves and inserts data to the Redis cluster.
+partitions : The partitions columns. The partition column values are used to distribute data in Redis cluster. That is, the partition column values are concatenated with a colon(:) and used as KEY of Redis which is the criteria distributing data. For more information, you can refer to Keys distribution model page in Redis.
+Tip
+Deciding a partition column properly is a crucial factor for performance because it is related to sharding data to multiple Redis nodes. It is important to try to distribute KEYs to 16384 slots of REDIS evenly and to try to map at least 200 rows for each KEY.
+mode : 'nvkvs' for this field
+at_least_one_partition_enabled : yes or no. If yes, the queries which do not have partition filter are not permitted.
+rowstore : true or false. If yes, all columns are merged and stored in RockDB as one column. It enhances ingesting performance. However, the query performance can be dropped because there is overhead for parsing columns in the Redis layer when retrieving data from RockDB.
+Tip
+The metastore of LightningDB only contains metadata/schema of tables. +The actual data are stored in Lightning DB which consists of Redis & RockDB (Abbreviation: r2), and the table information is stored in metastore.
+(1) Insert data with DataFrameWriter
+You can use DataFrameWriter to write data into LightningDB.
+Now, LightingDB only supports "Append mode".
+// Create source DataFrame.
+val df = spark.sqlContext.read.format("csv")
+ .option("header", "false")
+ .option("inferSchema", "true")
+ .load("/nvme/data_01/csv/")
+
+// "pcell" is a name of table which has R2 options.
+df.write.insertInto("pcell")
+
+(2) Insert data with INSERT INTO SELECT query
+-- pcell : table with R2 option
+-- csv_table : table with csv option
+-- udf : UDF can be used to transform original data.
+INSERT INTO pcell SELECT *, udf(event_time) FROM csv_table
+
+You can query data with SparkSQL interfaces such as DataFrames and Spark ThriftServer. +Please refer to Spark SQL guide page.
+ + + + + + +$ git clone https://github.com/mnms/metavision2_k8s_manifests
+
+$ cd ltdb-operator
+$ kubectl create -f ltdb-operator-controller-manager.yaml
+
+$ cd ltdb
+$ kubectl create -f ltdb.yaml -n {namespace}
+
+$ kubectl delete -f ltdb.yaml
+or
+$ kubectl delete ltdb ltdb -n metavision
+$ for i in {0..39}; do kubectl delete pvc "ltdb-data-logging-ltdb-$i" -n metavision; done
+$ for i in {0..39}; do kubectl delete pvc "ltdb-data-ltdb-$i" -n metavision; done
+
+$ cd ltdbv2
+$ kubectl create -f ltdbv2-all-in-one.yaml
+$ kubectl -n metavision exec -it ltdbv2-0 -- redis-cli --cluster-yes --cluster create `kubectl -n metavision get po -o wide -l app=ltdbv2 | grep ltdbv2 | awk '{print $6":6379"}' | tr '\n' ' '`
+
+$ kubectl delete -f ltdbv2-all-in-one.yaml
+$ for i in {0..99}; do kubectl delete pvc "ltdbv2-pvc-ltdbv2-$i" -n metavision; done
+
+$ cd ltdb-http
+$ ls -alh
+total 32
+drwxr-xr-x 6 1111462 1437349805 192B 8 31 17:53 .
+drwxr-xr-x 11 1111462 1437349805 352B 8 31 17:54 ..
+-rw-r--r-- 1 1111462 1437349805 1.3K 8 31 17:53 ltdb-http-configmap.yaml
+-rw-r--r-- 1 1111462 1437349805 1.5K 8 31 17:53 ltdb-http.yaml
+-rw-r--r-- 1 1111462 1437349805 259B 8 31 17:53 pvc.yaml
+-rw-r--r-- 1 1111462 1437349805 342B 8 31 17:53 spark-rbac.yaml
+
+kubectl -n metavision apply -f ltdb-http-configmap.yaml
+kubectl -n metavision apply -f spark-rbac.yaml
+kubectl -n metavision apply -f pvc.yaml
+
+kubectl -n metavision apply -f ltdb-http.yaml // 가장 나중에...
+
+$ cd ltdbv2-http
+$ kubectl create -f ltdb-http-configmap.yaml
+$ kubectl create -f ltdb-http.yaml
+$ kubectl create -f ltdbv2-http-vs.yaml
+
+$ kubectl delete -f ltdbv2-http-vs.yaml
+$ kubectl delete -f ltdb-http.yaml
+$ kubectl delete -f ltdb-http-configmap.yaml
+
+$ cd hynix
+$ kubectl create -f ltdbv2.yaml
+$ kubectl -n hynix exec -it ltdbv2-0 -- redis-cli --cluster-yes --cluster create `kubectl -n hynix get po -o wide -l app=ltdbv2 | grep ltdbv2 | awk '{print $6":6379"}' | tr '\n' ' '`
+$ kubectl create -f thunderquery.yaml
+$ kubectl create -f ltdbv2-http.yaml
+$ kubectl create -f istio-ingress.yaml
+
+$ vi ltdbv2.yaml
+...
+cms-enabled no
+dax-device-name no
+cms-device-name no
+
+$ cd hynix
+$ kubectl delete -f ltdbv2-http.yaml
+$ kubectl delete -f thunderquery.yaml
+$ kubectl delete -f ltdbv2.yaml
+for i in {0..9}; do kubectl delete pvc "ltdbv2-pvc-ltdbv2-$i" -n hynix; done
+$ kubectl delete -f istio-ingress.yaml
+
+
+
+
+
+
+
+ Note
+This page guides how to start LightningDB on CentOS manually. In case of using AWS EC2 Instance, please use Installation
+ (1) Edit /etc/sysctl.conf
like following
...
+vm.swappiness = 0
+vm.overcommit_memory = 1
+vm.overcommit_ratio = 50
+fs.file-max = 6815744
+net.ipv4.ip_local_port_range = 32768 65535
+net.core.rmem_default = 262144
+net.core.wmem_default = 262144
+net.core.rmem_max = 16777216
+net.core.wmem_max = 16777216
+net.ipv4.tcp_max_syn_backlog = 4096
+net.core.somaxconn = 65535
+...
+
+Tip
+In case of application in runtime, use sudo sysctl -p
(2) Edit /etc/security/limits.conf
...
+* soft core -1
+* soft nofile 262144
+* hard nofile 262144
+* soft nproc 131072
+* hard nproc 131072
+[account name] * soft nofile 262144
+[account name] * hard nofile 262144
+[account name] * soft nproc 131072
+[account name] * hard nproc 131072
+...
+
+Tip
+In case of application in runtime, use ulimit -n 65535, ulimit -u 131072
(3) Edit /etc/fstab
Remove SWAP Partition (Comment out SWAP partition with using #
and reboot)
...
+[[/dev/mapper/centos-swap]] swap swap defaults 0 0
+...
+
+Tip
+In case of application in runtime, use swapoff -a
(4) /etc/init.d/disable-transparent-hugepages
root@fbg01 ~] cat /etc/init.d/disable-transparent-hugepages
+#!/bin/bash
+### BEGIN INIT INFO
+# Provides: disable-transparent-hugepages
+# Required-Start: $local_fs
+# Required-Stop:
+# X-Start-Before: mongod mongodb-mms-automation-agent
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Disable Linux transparent huge pages
+# Description: Disable Linux transparent huge pages, to improve
+# database performance.
+### END INIT INFO
+
+case $1 in
+start)
+ if [ -d /sys/kernel/mm/transparent_hugepage ]; then
+ thp_path=/sys/kernel/mm/transparent_hugepage
+ elif [ -d /sys/kernel/mm/redhat_transparent_hugepage ]; then
+ thp_path=/sys/kernel/mm/redhat_transparent_hugepage
+ else
+ return 0
+ fi
+
+ echo 'never' > ${thp_path}/enabled
+ echo 'never' > ${thp_path}/defrag
+
+ re='^[0-1]+$'
+ if [[ $(cat ${thp_path}/khugepaged/defrag) =~ $re ]]
+ then
+ # RHEL 7
+ echo 0 > ${thp_path}/khugepaged/defrag
+ else
+ # RHEL 6
+ echo 'no' > ${thp_path}/khugepaged/defrag
+ fi
+
+ unset re
+ unset thp_path
+ ;;
+esac
+[root@fbg01 ~]
+[root@fbg01 ~]
+[root@fbg01 ~] chmod 755 /etc/init.d/disable-transparent-hugepages
+[root@fbg01 ~] chkconfig --add disable-transparent-hugepages
+
+- bash, unzip, ssh
+- JDK 1.8 or higher
+- gcc 4.8.5 or higher
+- glibc 2.17 or higher
+- epel-release
+sudo yum install epel-release
+
+- boost, boost-thread, boost-devel
+sudo yum install boost boost-thread boost-devel
+
+- Exchange SSH Key
+For all servers that LightningDB will be deployed, SSH key should be exchanged.
+ssh-keygen -t rsa
+chmod 0600 ~/.ssh/authorized_keys
+cat .ssh/id_rsa.pub | ssh {server name} "cat >> .ssh/authorized_keys"
+
+- Intel MKL library
+(1) Intel MKL 2019 library install
+ sudo ./install.sh
+
+matthew@fbg05 /opt/intel $ pwd
+/opt/intel
+
+matthew@fbg05 /opt/intel $ ls -alh
+합계 0
+drwxr-xr-x 10 root root 307 3월 22 01:34 .
+drwxr-xr-x. 5 root root 83 3월 22 01:34 ..
+drwxr-xr-x 6 root root 72 3월 22 01:35 .pset
+drwxr-xr-x 2 root root 53 3월 22 01:34 bin
+lrwxrwxrwx 1 root root 28 3월 22 01:34 compilers_and_libraries -> compilers_and_libraries_2019
+drwxr-xr-x 3 root root 19 3월 22 01:34 compilers_and_libraries_2019
+drwxr-xr-x 4 root root 36 1월 24 23:04 compilers_and_libraries_2019.2.187
+drwxr-xr-x 6 root root 63 1월 24 22:50 conda_channel
+drwxr-xr-x 4 root root 26 1월 24 23:01 documentation_2019
+lrwxrwxrwx 1 root root 33 3월 22 01:34 lib -> compilers_and_libraries/linux/lib
+lrwxrwxrwx 1 root root 33 3월 22 01:34 mkl -> compilers_and_libraries/linux/mkl
+lrwxrwxrwx 1 root root 29 3월 22 01:34 parallel_studio_xe_2019 -> parallel_studio_xe_2019.2.057
+drwxr-xr-x 5 root root 216 3월 22 01:34 parallel_studio_xe_2019.2.057
+drwxr-xr-x 3 root root 16 3월 22 01:34 samples_2019
+lrwxrwxrwx 1 root root 33 3월 22 01:34 tbb -> compilers_and_libraries/linux/tbb
+
+(2) Intel MKL 2019 library environment settings
+# INTEL MKL enviroment variables for ($MKLROOT, can be checked with the value export | grep MKL)
+source /opt/intel/mkl/bin/mklvars.sh intel64
+
+- Apache Hadoop 2.6.0 (or higher)
+- Apache Spark 2.3 on Hadoop 2.6
+- ntp +For clock synchronization between servers over packet-switched, variable-latency data networks.
+- Settings for core dump(Optional)
+(1) INSTALLING ABRT AND STARTING ITS SERVICES
+(2) Set core dump file size
+ulimit -c unlimited
+
+(3) Change the path of core dump files
+echo /tmp/core.%p > /proc/sys/kernel/core_pattern
+
+'~/.bashrc'
+Add followings
+# .bashrc
+
+if [ -f /etc/bashrc ]; then
+. /etc/bashrc
+fi
+
+# User specific environment and startup programs
+
+PATH=$PATH:$HOME/.local/bin:$HOME/bin
+
+HADOOP_HOME=/home/nvkvs/hadoop
+HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
+YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
+SPARK_HOME=/home/nvkvs/spark
+
+PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$SPARK_HOME/bin:$SPARK_HOME/sbin:$HOME/sbin
+
+export PATH SPARK_HOME HADOOP_HOME HADOOP_CONF_DIR YARN_CONF_DIR
+alias cfc='source ~/.use_cluster'
+
+With LTCLI provided by LightningDB, users can deploy and use LightningDB.
+Install LTCLI with the following command.
+$ pip install ltcli --upgrade --user
+
+After installation is completed, start LTCLI with Commands
+ + + + + + +Note
+This document guides how to use 'flashbase' script for scale-out. +If you use LTCLI, you can operate Lightning DB more easily and powerfully. +Therefore, if possible, we recommend LTCLI rather than 'flashbase' script.
+You can use 'redis-trib.rb check {master's IP}:{master's Port} | grep slots | grep master' command to check slots assigned to each master. Any master can be used for '{master's IP}:{master's Port}'.
+$ redis-trib.rb check 192.168.111.201:18800 | grep slots | grep master
+
+ slots:0-818 (819 slots) master
+
+ slots:3277-4095 (819 slots) master
+
+ slots:5734-6553 (820 slots) master
+
+ slots:7373-8191 (819 slots) master
+
+ slots:13926-14745 (820 slots) master
+
+ slots:4096-4914 (819 slots) master
+
+ slots:8192-9010 (819 slots) master
+
+ slots:2458-3276 (819 slots) master
+
+ slots:9011-9829 (819 slots) master
+
+ slots:10650-11468 (819 slots) master
+
+ slots:11469-12287 (819 slots) master
+
+ slots:1638-2457 (820 slots) master
+
+ slots:12288-13106 (819 slots) master
+
+ slots:15565-16383 (819 slots) master
+
+ slots:9830-10649 (820 slots) master
+
+ slots:819-1637 (819 slots) master
+
+ slots:6554-7372 (819 slots) master
+
+ slots:4915-5733 (819 slots) master
+
+ slots:13107-13925 (819 slots) master
+
+ slots:14746-15564 (819 slots) master
+
+$ flashbase check-distribution
+
+check distribution of masters/slaves...
+
+SERVER NAME | M | S
+
+--------------------------------
+
+192.168.111.201 | 10 | 10
+
+192.168.111.202 | 10 | 10
+
+--------------------------------
+
+Total nodes | 20 | 20
+
+
+Open 'redis.properties' with 'flashbase edit' command.
+$ flashbase edit
+
+Add a new node("192.168.111.203").
+As-is
+#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "192.168.111.201" "192.168.111.202" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18800 18809) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "192.168.111.201" "192.168.111.202" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18850 18859) )
+
+To-be
+#!/bin/bash
+
+## Master hosts and ports
+
+export SR2_REDIS_MASTER_HOSTS=( "192.168.111.201" "192.168.111.202" "192.168.111.203" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18800 18809) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "192.168.111.201" "192.168.111.202" "192.168.111.203" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18850 18859) )
+
+Scale out the cluster with a 'flashbase scale-out {new node's IP}' command. If you add more than one node, you can use like 'flashbase scale-out 192.168.111.203 192.168.111.204 192.168.111.205'.
+$ flashbase scale-out 192.168.111.203
+
+$ redis-trib.rb check 192.168.111.201:18800 | grep master | grep slot
+ slots:273-818 (546 slots) master
+ slots:11742-12287 (546 slots) master
+ slots:0-272,10650-10921,14198-14199 (547 slots) master
+ slots:10922,11469-11741,14746-15018 (547 slots) master
+ slots:6827-7372 (546 slots) master
+ slots:1912-2457 (546 slots) master
+ slots:6008-6553 (546 slots) master
+ slots:7646-8191 (546 slots) master
+ slots:1911,5734-6007,13926-14197 (547 slots) master
+ slots:5188-5733 (546 slots) master
+ slots:13380-13925 (546 slots) master
+ slots:1092-1637 (546 slots) master
+ slots:1638-1910,9830-10103 (547 slots) master
+ slots:3550-4095 (546 slots) master
+ slots:7373-7645,8192-8464 (546 slots) master
+ slots:14200-14745 (546 slots) master
+ slots:2458-2730,4096-4368 (546 slots) master
+ slots:4369-4914 (546 slots) master
+ slots:9284-9829 (546 slots) master
+ slots:12561-13106 (546 slots) master
+ slots:6554-6826,15565-15837 (546 slots) master
+ slots:9011-9283,12288-12560 (546 slots) master
+ slots:4915-5187,13107-13379 (546 slots) master
+ slots:15019-15564 (546 slots) master
+ slots:10923-11468 (546 slots) master
+ slots:819-1091,3277-3549 (546 slots) master
+ slots:8465-9010 (546 slots) master
+ slots:2731-3276 (546 slots) master
+ slots:15838-16383 (546 slots) master
+ slots:10104-10649 (546 slots) master
+
+$ fb check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+192.168.111.201 | 10 | 10
+192.168.111.202 | 10 | 10
+192.168.111.203 | 10 | 10
+--------------------------------
+Total nodes | 30 | 30
+
+
+
+
+
+
+
+ Note
+This document guides how to use 'flashbase' script for installation and operation. +If you use LTCLI, you can deploy and operate Lightning DB more easily and powerfully. +Therefore, if possible, we recommend LTCLI rather than 'flashbase' script.
+You can download the recommended version of Lightning DB in Release Notes
+Deploy the Lightning DB binary with using deploy-flashbase.sh.
+Type ./deploy-flashbase.sh {binary path} {cluster list}
to deploy.
> ./deploy-flashbase.sh ./lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 1 2 // deploy cluster 1 and cluster 2 with lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin
+
+DATEMIN: 20200811113038
+INSTALLER PATH: ./lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin
+INSTALLER NAME: lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin
+======================================================
+DEPLOY CLUSTER 1
+
+CLUSTER_DIR: /Users/myaccount/tsr2/cluster_1
+SR2_HOME: /Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT
+SR2_CONF: /Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT/conf
+BACKUP_DIR: /Users/myaccount/tsr2/cluster_1_bak_20200811113038
+CONF_BACKUP_DIR: /Users/myaccount/tsr2/cluster_1_conf_bak_20200811113038
+======================================================
+backup...
+
+DEPLOY NODE localhost
+lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 100% 126MB 256.8MB/s 00:00
+\e[01;32mInstalling tsr2 as full...\e[00m
+Skip to create \e[01:31m/Users/myaccount/tsr2/cluster_1\e[00m
+\e[01;32mUnarchieving to /Users/myaccount/tsr2/cluster_1...\e[00m
+\e[01;32mMaking required directories...\e[00m
+\e[01;32mProcessing a native library linkage...\e[00m
+\e[01;31mNo ldconfig in $PATH. Fix the problem and try again\e[00m
+building file list ... done
+logback-kaetlyn.xml.template
+logback.xml
+redis-master.conf.template
+redis-slave.conf.template
+redis.conf.sample
+redis.properties
+sentinel.conf.template
+thriftserver.properties
+tsr2-kaetlyn.properties
+redis/
+redis/redis-18500.conf
+redis/redis-18501.conf
+redis/redis-18502.conf
+redis/redis-18503.conf
+redis/redis-18504.conf
+redis/redis-18505.conf
+redis/redis-18506.conf
+redis/redis-18507.conf
+redis/redis-18508.conf
+redis/redis-18509.conf
+redis/redis-18600.conf
+redis/redis-18601.conf
+redis/redis-18602.conf
+redis/redis-18603.conf
+redis/redis-18604.conf
+redis/redis-18605.conf
+redis/redis-18606.conf
+redis/redis-18607.conf
+redis/redis-18608.conf
+redis/redis-18609.conf
+sample-configure/
+sample-configure/etc/
+sample-configure/etc/sysctl.conf.sample
+sample-configure/etc/profile.d/
+sample-configure/etc/profile.d/jdk.sh.sample
+sample-configure/hadoop/
+sample-configure/hadoop/core-site.xml.sample
+sample-configure/hadoop/hdfs-site.xml.sample
+sample-configure/hadoop/mapred-site.xml.sample
+sample-configure/hadoop/slaves.sample
+sample-configure/hadoop/yarn-site.xml.sample
+sample-configure/spark/
+sample-configure/spark/log4j.properties.sample
+sample-configure/spark/metrics.properties.sample
+sample-configure/spark/scheduler-site.xml.sample
+sample-configure/spark/spark-defaults.conf.sample
+
+sent 995838 bytes received 2532 bytes 1996740.00 bytes/sec
+total size is 1161578 speedup is 1.16
+
+======================================================
+DEPLOY CLUSTER 2
+
+CLUSTER_DIR: /Users/myaccount/tsr2/cluster_2
+SR2_HOME: /Users/myaccount/tsr2/cluster_2/tsr2-assembly-1.0.0-SNAPSHOT
+SR2_CONF: /Users/myaccount/tsr2/cluster_2/tsr2-assembly-1.0.0-SNAPSHOT/conf
+BACKUP_DIR: /Users/myaccount/tsr2/cluster_2_bak_20200811113038
+CONF_BACKUP_DIR: /Users/myaccount/tsr2/cluster_2_conf_bak_20200811113038
+======================================================
+backup...
+
+DEPLOY NODE localhost
+lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 100% 126MB 232.7MB/s 00:00
+\e[01;32mInstalling tsr2 as full...\e[00m
+Skip to create \e[01:31m/Users/myaccount/tsr2/cluster_2\e[00m
+\e[01;32mUnarchieving to /Users/myaccount/tsr2/cluster_2...\e[00m
+\e[01;32mMaking required directories...\e[00m
+\e[01;32mProcessing a native library linkage...\e[00m
+\e[01;31mNo ldconfig in $PATH. Fix the problem and try again\e[00m
+building file list ... done
+logback-kaetlyn.xml.template
+logback.xml
+redis-master.conf.template
+redis-slave.conf.template
+redis.conf.sample
+redis.properties
+sentinel.conf.template
+thriftserver.properties
+tsr2-kaetlyn.properties
+redis/
+redis/redis-18200.conf
+redis/redis-18201.conf
+redis/redis-18202.conf
+redis/redis-18203.conf
+redis/redis-18204.conf
+redis/redis-18205.conf
+redis/redis-18206.conf
+redis/redis-18207.conf
+redis/redis-18208.conf
+redis/redis-18209.conf
+redis/redis-18250.conf
+redis/redis-18251.conf
+redis/redis-18252.conf
+redis/redis-18253.conf
+redis/redis-18254.conf
+redis/redis-18255.conf
+redis/redis-18256.conf
+redis/redis-18257.conf
+redis/redis-18258.conf
+redis/redis-18259.conf
+sample-configure/
+sample-configure/etc/
+sample-configure/etc/sysctl.conf.sample
+sample-configure/etc/profile.d/
+sample-configure/etc/profile.d/jdk.sh.sample
+sample-configure/hadoop/
+sample-configure/hadoop/core-site.xml.sample
+sample-configure/hadoop/hdfs-site.xml.sample
+sample-configure/hadoop/mapred-site.xml.sample
+sample-configure/hadoop/slaves.sample
+sample-configure/hadoop/yarn-site.xml.sample
+sample-configure/spark/
+sample-configure/spark/log4j.properties.sample
+sample-configure/spark/metrics.properties.sample
+sample-configure/spark/scheduler-site.xml.sample
+sample-configure/spark/spark-defaults.conf.sample
+
+sent 992400 bytes received 2532 bytes 663288.00 bytes/sec
+total size is 1165442 speedup is 1.17
+
+If you've deployed Lightning DB successfully, you can create and start the clusters.
+To choose the cluster, .use_cluster is used.
+source ~/.use_cluster.sh 1 // 'source ~/.use_cluster.sh {cluster number}
+
+If you add alias in .bashrc.sh
like below, you can change the cluster easily.
alias cfc="source ~/.use_cluster"
+
+and type cfc {cluster number}
to use the specified cluster.
cfc 1
+
+Open and modify redis.properties file of the cluster by typing 'flashbase edit'.
+#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" ) // need to configure
+export SR2_REDIS_MASTER_PORTS=( $(seq 18100 18109) ) // need to configure
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" ) // need to configure in case of replication
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18150 18159) ) // need to configure in case of replication
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3 // need to configure
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="/sata_ssd/ssd_" // need to configure. With this settings, '/sata_ssd/ssd_01', '/sata_ssd/ssd_02' and '/sata_ssd/ssd_03' are used.
+export SR2_REDIS_DB_PATH="/sata_ssd/ssd_" // need to configure
+export SR2_FLASH_DB_PATH="/sata_ssd/ssd_" // need to configure
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+Type flashbase restart --reset --cluster --yes
.
> flashbase restart --reset --cluster --yes
+\e[01;32mStopping master cluster of redis...\e[00m
+\e[01;33m - Stopping 127.0.0.1[*]...\e[00m
+\e[01;32mStopping slave cluster of redis...\e[00m
+\e[01;33m - Stopping 127.0.0.1[*]...\e[00m
+\e[01;32mRemoving master node configuration in \e[00m
+\e[01;32m - 127.0.0.1\e[00m
+\e[01;32mRemoving slave node configuration in \e[00m
+\e[01;32m - 127.0.0.1\e[00m
+\e[01;32mRemoving redis generated MASTER configuration files...\e[00m
+\e[01;32m - 127.0.0.1 \e[00m
+\e[01;32mRemoving redis generated SLAVE configuration files...\e[00m
+\e[01;32m - 127.0.0.1 \e[00m
+\e[01;33m
+Redis flashdb path is "/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/db/db-#{PORT}-#{DB_NUMBER}".\e[00m
+\e[01;33mRedis dump.rdb path is "/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/dump/dump-#{PORT}.*".\e[00m
+\e[01;33mRedis aof path is "/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/appendonly-#{PORT}.aof".
+\e[00m
+\e[01;32mRemoving flash db directory, appendonly and dump.rdb files in MASTER NODE;\e[00m
+\e[01;32m - 127.0.0.1 \e[00m
+\e[01;32mRemoving flash db directory, appendonly and dump.rdb files in SLAVE NODE;\e[00m
+\e[01;32m - 127.0.0.1 \e[00m
+\e[01;32mGenerate redis configuration files for MASTER hosts\e[00m
+\e[01;32mGenerate redis configuration files for SLAVE hosts\e[00m
+\e[01;32m- Master nodes\e[00m
+\e[01;32m -- Copying to 127.0.0.1...\e[00m
+\e[01;32m- Slave nodes\e[00m
+\e[01;32m -- Copying to 127.0.0.1...\e[00m
+\e[01;32mSuccess to configure redis.\e[00m
+netstat: t4: unknown or uninstrumented protocol
+netstat: t4: unknown or uninstrumented protocol
+\e[01;32mBackup redis master log in each MASTER hosts... \e[00m
+\e[01;33m - 127.0.0.1\e[00m
+\e[01;33m - 127.0.0.1\e[00m
+\e[01;32mStarting master nodes : 127.0.0.1 : \e[00m\e[01;32m[18100, 18101, 18102, 18103, 18104, 18105, 18106, 18107, 18108, 18109]...\e[00m
+\e[01;32mStarting slave nodes : 127.0.0.1 : \e[00m\e[01;32m[18150, 18151, 18152, 18153, 18154, 18155, 18156, 18157, 18158, 18159]...\e[00m
+total_master_num: 10
+total_slave_num: 10
+num_replica: 1
+>>> Creating cluster
+>>> Performing hash slots allocation on 20 nodes...
+Using 10 masters:
+127.0.0.1:18100
+127.0.0.1:18101
+127.0.0.1:18102
+127.0.0.1:18103
+127.0.0.1:18104
+127.0.0.1:18105
+127.0.0.1:18106
+127.0.0.1:18107
+127.0.0.1:18108
+127.0.0.1:18109
+Adding replica 127.0.0.1:18150 to 127.0.0.1:18100
+Adding replica 127.0.0.1:18151 to 127.0.0.1:18101
+Adding replica 127.0.0.1:18152 to 127.0.0.1:18102
+Adding replica 127.0.0.1:18153 to 127.0.0.1:18103
+Adding replica 127.0.0.1:18154 to 127.0.0.1:18104
+Adding replica 127.0.0.1:18155 to 127.0.0.1:18105
+Adding replica 127.0.0.1:18156 to 127.0.0.1:18106
+Adding replica 127.0.0.1:18157 to 127.0.0.1:18107
+Adding replica 127.0.0.1:18158 to 127.0.0.1:18108
+Adding replica 127.0.0.1:18159 to 127.0.0.1:18109
+M: 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100
+ slots:0-1637 (1638 slots) master
+M: c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101
+ slots:1638-3276 (1639 slots) master
+M: ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102
+ slots:3277-4914 (1638 slots) master
+M: 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103
+ slots:4915-6553 (1639 slots) master
+M: 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104
+ slots:6554-8191 (1638 slots) master
+M: d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105
+ slots:8192-9829 (1638 slots) master
+M: 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106
+ slots:9830-11468 (1639 slots) master
+M: d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107
+ slots:11469-13106 (1638 slots) master
+M: ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108
+ slots:13107-14745 (1639 slots) master
+M: b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109
+ slots:14746-16383 (1638 slots) master
+S: 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150
+ replicates 7e72dff98fdda09cf97e02420727fd8b6564b6ae
+S: ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151
+ replicates c3b5e673033758d77680e4534855686649fe5daa
+S: f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152
+ replicates ba39bada8a2e393f76d265ea02d3e078c9406a93
+S: f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153
+ replicates 16da3917eff32cde8942660324c7374117902b01
+S: 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154
+ replicates 5ed447baf1f1c6c454459c24809ffc197809cb6b
+S: 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155
+ replicates d4cdcfdfdfb966a74a1bafce8969f956b5312094
+S: 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156
+ replicates 6f89f0b44f0a515865173984b95fc3f6fe4e7d72
+S: bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157
+ replicates d531628bf7b2afdc095e445d21dedc2549cc4590
+S: 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158
+ replicates ae71f4430fba6a019e4111c3d26e27e225764200
+S: 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159
+ replicates b3734a60336856f8c4ef08efe763ae3ac32bb94a
+Can I set the above configuration? (type 'yes' to accept): >>> Nodes configuration updated
+>>> Assign a different config epoch to each node
+>>> Sending CLUSTER MEET messages to join the cluster
+Waiting for the cluster to join..................................................................................
+>>> Performing Cluster Check (using node 127.0.0.1:18100)
+M: 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100
+ slots:0-1637 (1638 slots) master
+M: c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101
+ slots:1638-3276 (1639 slots) master
+M: ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102
+ slots:3277-4914 (1638 slots) master
+M: 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103
+ slots:4915-6553 (1639 slots) master
+M: 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104
+ slots:6554-8191 (1638 slots) master
+M: d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105
+ slots:8192-9829 (1638 slots) master
+M: 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106
+ slots:9830-11468 (1639 slots) master
+M: d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107
+ slots:11469-13106 (1638 slots) master
+M: ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108
+ slots:13107-14745 (1639 slots) master
+M: b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109
+ slots:14746-16383 (1638 slots) master
+M: 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150
+ slots: (0 slots) master
+ replicates 7e72dff98fdda09cf97e02420727fd8b6564b6ae
+M: ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151
+ slots: (0 slots) master
+ replicates c3b5e673033758d77680e4534855686649fe5daa
+M: f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152
+ slots: (0 slots) master
+ replicates ba39bada8a2e393f76d265ea02d3e078c9406a93
+M: f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153
+ slots: (0 slots) master
+ replicates 16da3917eff32cde8942660324c7374117902b01
+M: 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154
+ slots: (0 slots) master
+ replicates 5ed447baf1f1c6c454459c24809ffc197809cb6b
+M: 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155
+ slots: (0 slots) master
+ replicates d4cdcfdfdfb966a74a1bafce8969f956b5312094
+M: 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156
+ slots: (0 slots) master
+ replicates 6f89f0b44f0a515865173984b95fc3f6fe4e7d72
+M: bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157
+ slots: (0 slots) master
+ replicates d531628bf7b2afdc095e445d21dedc2549cc4590
+M: 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158
+ slots: (0 slots) master
+ replicates ae71f4430fba6a019e4111c3d26e27e225764200
+M: 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159
+ slots: (0 slots) master
+ replicates b3734a60336856f8c4ef08efe763ae3ac32bb94a
+[OK] All nodes agree about slots configuration.
+>>> Check for open slots...
+>>> Check slots coverage...
+[OK] All 16384 slots covered.
+
+You can simply check the status of the node with PING
command.
> flashbase cli -h localhost -p 18101
+localhost:18101> ping
+PONG
+localhost:18101>
+
+With using flashbase cli-all
, you can check the status of all nodes.
> flashbase cli-all ping
+redis client for 127.0.0.1:18100
+PONG
+redis client for 127.0.0.1:18101
+PONG
+redis client for 127.0.0.1:18102
+PONG
+redis client for 127.0.0.1:18103
+PONG
+redis client for 127.0.0.1:18104
+PONG
+redis client for 127.0.0.1:18105
+PONG
+redis client for 127.0.0.1:18106
+PONG
+redis client for 127.0.0.1:18107
+PONG
+redis client for 127.0.0.1:18108
+PONG
+redis client for 127.0.0.1:18109
+PONG
+redis client for 127.0.0.1:18150
+PONG
+redis client for 127.0.0.1:18151
+PONG
+redis client for 127.0.0.1:18152
+PONG
+redis client for 127.0.0.1:18153
+PONG
+redis client for 127.0.0.1:18154
+PONG
+redis client for 127.0.0.1:18155
+PONG
+redis client for 127.0.0.1:18156
+PONG
+redis client for 127.0.0.1:18157
+PONG
+redis client for 127.0.0.1:18158
+PONG
+redis client for 127.0.0.1:18159
+PONG
+
+With INFO
command, you can get all information of each node.
> flashbase cli -h localhost -p 18101
+localhost:18101> info all
+# Server
+redis_version:3.0.7
+redis_git_sha1:29d44e4d
+redis_git_dirty:0
+redis_build_id:e5a4dd48086abff2
+redis_mode:cluster
+os:Darwin 18.7.0 x86_64
+arch_bits:64
+multiplexing_api:kqueue
+gcc_version:4.2.1
+process_id:42593
+run_id:ea34cce757c61d65e344b6c1094b940c3ab46110
+tcp_port:18101
+uptime_in_seconds:516
+uptime_in_days:0
+hz:10
+lru_clock:3282808
+config_file:/Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT/conf/redis/redis-18101.conf
+
+# Clients
+connected_clients:1
+client_longest_output_list:0
+client_biggest_input_buf:0
+blocked_clients:0
+
+
+# Memory
+isOOM:false
+used_memory:20752816
+used_memory_human:19.79M
+used_memory_rss:23941120
+used_memory_peak:20752816
+used_memory_peak_human:19.79M
+used_memory_lua:36864
+used_memory_rocksdb_total:100663872
+used_memory_rocksdb_block_cache:100663296
+used_memory_rocksdb_mem_table:576
+used_memory_rocksdb_table_readers:0
+used_memory_rocksdb_pinned_block:0
+meta_data_memory:64
+percent_of_meta_data_memory:0
+used_memory_client_buffer_peak:0
+mem_fragmentation_ratio:1.15
+mem_allocator:libc
+
+# Persistence
+loading:0
+rdb_changes_since_last_save:0
+rdb_bgsave_in_progress:0
+rdb_last_save_time:1597117812
+rdb_last_bgsave_status:ok
+rdb_last_bgsave_time_sec:-1
+rdb_current_bgsave_time_sec:-1
+aof_enabled:1
+aof_rewrite_in_progress:0
+aof_rewrite_scheduled:0
+aof_last_rewrite_time_sec:-1
+aof_current_rewrite_time_sec:-1
+aof_last_bgrewrite_status:ok
+aof_last_write_status:ok
+aof_current_size:0
+aof_base_size:0
+aof_pending_rewrite:0
+aof_buffer_length:0
+aof_rewrite_buffer_length:0
+aof_pending_bio_fsync:0
+aof_delayed_fsync:0
+
+# Stats
+total_connections_received:5
+total_commands_processed:513
+instantaneous_ops_per_sec:0
+total_net_input_bytes:33954
+total_net_output_bytes:173640
+instantaneous_input_kbps:0.02
+instantaneous_output_kbps:0.00
+rejected_connections:0
+sync_full:1
+sync_partial_ok:0
+sync_partial_err:0
+expired_keys:0
+evicted_keys:0
+keyspace_hits:0
+keyspace_misses:0
+pubsub_channels:0
+pubsub_patterns:0
+latest_fork_usec:1159
+migrate_cached_sockets:0
+
+# Replication
+role:master
+connected_slaves:1
+slave0:ip=127.0.0.1,port=18151,state=online,offset=589,lag=1
+master_repl_offset:589
+repl_backlog_active:1
+repl_backlog_size:1048576
+repl_backlog_first_byte_offset:2
+repl_backlog_histlen:588
+
+# CPU
+used_cpu_sys:0.42
+used_cpu_user:0.56
+used_cpu_sys_children:0.00
+used_cpu_user_children:0.00
+
+# Commandstats
+cmdstat_ping:calls=4,usec=19,usec_per_call=4.75,usec_std=1.00,usec_max=10
+cmdstat_psync:calls=1,usec=17,usec_per_call=17.00,usec_std=0.00,usec_max=17
+cmdstat_replconf:calls=416,usec=644,usec_per_call=1.55,usec_std=1.00,usec_max=11
+cmdstat_info:calls=2,usec=312,usec_per_call=156.00,usec_std=5.00,usec_max=183
+cmdstat_cluster:calls=90,usec=122372,usec_per_call=1359.69,usec_std=19.00,usec_max=1802
+
+# Cluster
+cluster_enabled:1
+
+# Keyspace
+
+# Tablespace
+
+# Eviction
+evictStat:sleeps=0,fullRowgroup=0,80Rowgroup=0,60Rowgroup=0,40Rowgroup=0,20Rowgroup=0,00Rowgroup=0
+recentEvictStat:recent 200 rowgroups' avg full percent:0
+
+# Storage(Disk Usage)
+DB0_TTL(sec):2592000
+DB0_size(KByte):200
+DB0_numFiles:0
+
+# CompressionRatios
+CVA_compress_algorithm:zstd
+CVA_comp_avg_ratio cannot be calculated because of not enough # of samples
+localhost:18101>
+
+You can also check the specified information of each node.
+localhost:18101> info memory
+# Memory
+isOOM:false
+used_memory:20751904
+used_memory_human:19.79M
+used_memory_rss:23949312
+used_memory_peak:20752816
+used_memory_peak_human:19.79M
+used_memory_lua:36864
+used_memory_rocksdb_total:100663872
+used_memory_rocksdb_block_cache:100663296
+used_memory_rocksdb_mem_table:576
+used_memory_rocksdb_table_readers:0
+used_memory_rocksdb_pinned_block:0
+meta_data_memory:64
+percent_of_meta_data_memory:0
+used_memory_client_buffer_peak:0
+mem_fragmentation_ratio:1.15
+mem_allocator:libc
+localhost:18101>
+localhost:18101> info storage
+# Storage(Disk Usage)
+DB0_TTL(sec):2592000
+DB0_size(KByte):200
+DB0_numFiles:0
+localhost:18101>
+
+You can check the status of the cluster with CLUSTER
command.
localhost:18101> cluster info
+cluster_state:ok
+cluster_slots_assigned:16384
+cluster_slots_ok:16384
+cluster_slots_pfail:0
+cluster_slots_fail:0
+cluster_known_nodes:20
+cluster_size:10
+cluster_current_epoch:20
+cluster_my_epoch:2
+cluster_stats_messages_ping_sent:665
+cluster_stats_messages_pong_sent:679
+cluster_stats_messages_meet_sent:15
+cluster_stats_messages_sent:1359
+cluster_stats_messages_ping_received:675
+cluster_stats_messages_pong_received:680
+cluster_stats_messages_meet_received:4
+cluster_stats_messages_received:1359
+localhost:18101>
+localhost:18101> cluster nodes
+d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107 master - 0 1597118527011 8 connected 11469-13106
+16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103 master - 0 1597118524000 4 connected 4915-6553
+7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100 master - 0 1597118521882 1 connected 0-1637
+6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158 slave ae71f4430fba6a019e4111c3d26e27e225764200 0 1597118520862 19 connected
+d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105 master - 0 1597118526000 6 connected 8192-9829
+11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159 slave b3734a60336856f8c4ef08efe763ae3ac32bb94a 0 1597118520000 20 connected
+5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104 master - 0 1597118523932 5 connected 6554-8191
+8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154 slave 5ed447baf1f1c6c454459c24809ffc197809cb6b 0 1597118521000 15 connected
+b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109 master - 0 1597118528026 10 connected 14746-16383
+f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152 slave ba39bada8a2e393f76d265ea02d3e078c9406a93 0 1597118524959 13 connected
+128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150 slave 7e72dff98fdda09cf97e02420727fd8b6564b6ae 0 1597118524000 11 connected
+c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101 myself,master - 0 1597118523000 2 connected 1638-3276
+6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106 master - 0 1597118522000 7 connected 9830-11468
+ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102 master - 0 1597118520000 3 connected 3277-4914
+f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153 slave 16da3917eff32cde8942660324c7374117902b01 0 1597118524000 14 connected
+ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108 master - 0 1597118525985 9 connected 13107-14745
+ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151 slave c3b5e673033758d77680e4534855686649fe5daa 0 1597118523000 12 connected
+208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155 slave d4cdcfdfdfb966a74a1bafce8969f956b5312094 0 1597118520000 16 connected
+bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157 slave d531628bf7b2afdc095e445d21dedc2549cc4590 0 1597118513713 18 connected
+3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156 slave 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 0 1597118523000 17 connected
+localhost:18101>
+localhost:18101> cluster slots
+ 1) 1) (integer) 11469
+ 2) (integer) 13106
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18107
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18157
+ 2) 1) (integer) 4915
+ 2) (integer) 6553
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18103
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18153
+ 3) 1) (integer) 0
+ 2) (integer) 1637
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18100
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18150
+ 4) 1) (integer) 8192
+ 2) (integer) 9829
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18105
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18155
+ 5) 1) (integer) 6554
+ 2) (integer) 8191
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18104
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18154
+ 6) 1) (integer) 14746
+ 2) (integer) 16383
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18109
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18159
+ 7) 1) (integer) 1638
+ 2) (integer) 3276
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18101
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18151
+ 8) 1) (integer) 9830
+ 2) (integer) 11468
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18106
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18156
+ 9) 1) (integer) 3277
+ 2) (integer) 4914
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18102
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18152
+10) 1) (integer) 13107
+ 2) (integer) 14745
+ 3) 1) "127.0.0.1"
+ 2) (integer) 18108
+ 4) 1) "127.0.0.1"
+ 2) (integer) 18158
+localhost:18101>
+
+With CONFIG
command, you can set or get the configuration of each feature.
1) Get
+localhost:18101> config get maxmemory
+1) "maxmemory"
+2) "300mb"
+localhost:18101> config set maxmemory 310mb
+OK
+
+2) Set
+localhost:18101> config set maxmemory 310mb
+OK
+localhost:18101> config get maxmemory
+1) "maxmemory"
+2) "310mb"
+
+3) Rewrite
+With config set
command, you can change the configuration only in memory.
To save the modification on disk, use config rewrite
after setting.
localhost:18101> config rewrite
+OK
+localhost:18101>
+
+4) DIR
+With DIR
command, you can check the path of directory that each node uses to save .rdb, .aof, db and *.conf files.
localhost:18101> config get dir
+1) "dir"
+2) "/sata_ssd/ssd_03/nvkvs/myaccount"
+
+
+
+
+
+
+
+ kubectl -n metavision exec -it pod/ltdbv2-beeline-857f578cd9-d7kc4 -- beeline.sh
+
+0: jdbc:hive2://ltdbv2-http-svc:13000> select * from files limit 3;
+
+
+CREATE TABLE IF NOT EXISTS ltdb.metavision.img_feats_noann(
+id BIGINT,
+is_s3 BOOLEAN,
+bucket STRING,
+obj_key STRING,
+features ARRAY<FLOAT>,
+meta STRING
+) USING lightning
+LOCATION '127.0.0.1:18500'
+TBLPROPERTIES ('partition.size'=2, 'partition.0'='bucket','partition.1'='id');
+
+CREATE TABLE IF NOT EXISTS ltdb.metavision.img_feats(
+id BIGINT,
+is_s3 BOOLEAN,
+bucket STRING,
+obj_key STRING,
+features ARRAY<FLOAT>,
+meta STRING
+) USING lightning
+LOCATION 'ltdbv2:6379'
+TBLPROPERTIES ('partition.size'=2, 'partition.0'='bucket','partition.1'='id', 'feature_idx'='4', 'ann_type'='1', 'feature_dim'='1024', 'ef_construction'='500', 'ann_max_elem'='10000', 'ann_m'='20');
+
+CREATE TABLE IF NOT EXISTS ltdb.parquet.temptable(
+id BIGINT,
+is_s3 BOOLEAN,
+bucket STRING,
+obj_key STRING,
+features ARRAY<FLOAT>,
+meta STRING
+) USING parquet LOCATION 's3a://upload-data/real/vision-ai-private-data_6.csv.ViT-H-14.laion2b_s32b_b79k.975.parquet';
+
+INSERT INTO ltdb.metavision.img_feats
+SELECT
+(CAST(RANDOM() * 1000000 AS INTEGER) % 400) AS id,
+is_s3,
+CONCAT('metavision-', bucket) AS bucket,
+obj_key,
+features,
+meta
+FROM
+ltdb.parquet.temptable
+LIMIT 100;
+
+SELECT * FROM ltdb.metavision.img_feats;
+SELECT count(obj_key) FROM ltdb.metavision.img_feats;
+
+DESCRIBE formatted ltdb.metavision.img_feats;
+
+DROP TABLE IF EXISTS ltdb.parquet.temptable;
+DROP TABLE IF EXISTS ltdb.metavision.img_feats;
+
+kubectl -n metavision exec -it thunderquery-68544ff5f7-9shjv -- thunderquery-cli ltdbv2-0.ltdbv2
+
+select bucket, obj_key, ann(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;
+
+select bucket, obj_key, euclideandistance(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as knn_result from ltdb.metavision.img_feats limit 2;
+
+$ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table \
+--header "Content-Type: text/plain" \
+--data "{
+'table': 'ltdb.metavision.img_feats',
+'schema': [{'name': 'id', 'typ': 'BIGINT'},
+{'name': 'is_s3', 'typ': 'BOOLEAN'},
+{'name': 'bucket', 'typ': 'STRING'},
+{'name': 'obj_key', 'typ': 'STRING'},
+{'name': 'features', 'typ': 'ARRAY<FLOAT>'},
+{'name': 'meta', 'typ': 'STRING'}],
+'loc': 'ltdbv2:6379',
+'props': [{'key': 'partition.size', 'val': '2'},
+{'key': 'partition.0', 'val': 'bucket'},
+{'key': 'partition.1', 'val': 'id'},
+{'key': 'feature_idx', 'val': '4'},
+{'key': 'ann_type', 'val': '1'},
+{'key': 'feature_dim', 'val': '1024'},
+{'key': 'ef_construction', 'val': '500'},
+{'key': 'ann_max_elem', 'val': '10000'},
+{'key': 'ann_m', 'val': '20'}]
+}"
+
+$ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/ingest/data \
+--header "Content-Type: text/plain" \
+--data "{
+'src_format': 'parquet',
+'src_loc': 's3a://upload-data/real/vision-ai-private-data_6.csv.ViT-H-14.laion2b_s32b_b79k.975.parquet',
+'dest_table': 'ltdb.metavision.img_feats',
+'limit': 100,
+'src_cols_with_random': [{'name': 'id', 'range': 400}],
+'src_cols_to_modify': [{'name': 'bucket', 'prefix': 'metavision-'}]
+}"
+
+$ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/query \
+--header "Content-Type: text/plain" \
+--data "SELECT count(obj_key) FROM ltdb.metavision.img_feats"
+
+$ curl --location --request GET http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table/ltdb.metavision.img_feats
+
+$ curl --location --request DELETE http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table/ltdb.metavision.img_feats
+
+$ curl -d 'select bucket, obj_key, ann(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;' http://metavision.k8s.lightningdb/thunderquery/sql
+
+$ curl -d 'select bucket, obj_key, euclideandistance(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;' http://metavision.k8s.lightningdb/thunderquery/sql
+
+
+
+
+
+
+
+ A distributed in-memory DBMS for real-time big data analytics
+Spark with Redis/Rocksdb key-value stores
+If you try to use LTCLI for the first time after the EC2 instance was created, please update LTCLI like below.
+pip install ltcli --upgrade --user
+
+(1) Run
+To run LTCLI, ${FBPATH} should be set. If not, the following error messages will be shown.
+To start using LTCLI, you should set env FBPATH
+ex)
+export FBPATH=$HOME/.flashbase
+
+Tip
+In the case of EC2 Instance, this path is set automatically.
+Run LTCLI by typing 'ltcli'
+$ ltcli
+
+When LTCLI starts for the first time, you need to confirm 'base_directory'.
+[~/tsr2]1 is default value.
+Type base directory of LightningDB [~/tsr2]
+~/tsr2
+OK, ~/tsr2
+
+In '${FBPATH}/.flashbase/config', you can modify 'base_directory'.
+If you logs in LTCLI normally, LTCLI starts on the last visited cluster. +In the case of the first login, '-' is shown instead of cluster number.
+root@lightningdb:->
+
+...
+...
+
+root@lightningdb:1>
+
+Tip
+In this page, '$' means that you are in Centos and '>' means that you are in LTCLI.
+(2) Log messages
+Log messages of LTCLI will be saved in '$FBPATH/logs/fb-roate.log'.
+Its max-file-size is 1GiB and rolling update will be done in case of exceeding of size limit.
+Deploy is the procedure that LightningDB is installed with the specified cluster number.
+You could make LightningDB cluster with the following command.
+> deploy 1
+
+After deploy command, you should type the following information that provides its last used value.
+Use the below option not to save the last used value.
+> deploy --history-save=False
+
+(1) Select installer
+Select installer
+
+ [ INSTALLER LIST ]
+ (1) [DOWNLOAD] lightningdb.release.master.5a6a38.bin
+ (2) [LOCAL] lightningdb.release.master.dbcb9e.bin
+ (3) [LOCAL] lightningdb.trial.master.dbcb9e-dirty.bin
+ (4) [LOCAL] lightningdb.trial.master.dbcb9e.bin
+
+Please enter the number, file path or url of the installer you want to use.
+you can also add file in list by copy to '$FBPATH/releases/'
+1
+OK, lightningdb.release.master.5a6a38.bin
+
+Tip
+LOCAL means installer file under path '$FBPATH/releases/' on your local. +DOWNLOAD refers to a file that can be downloaded and up to 5 files are displayed in the latest order. To confirm the recommended Lightning DB version, use Release Notes
+Select a number to use that file. Type DOWNLOAD will be used after downloading. The downloaded file is saved in path '$FBPATH/releases'.
+Select installer
+
+ [ INSTALLER LIST ]
+ (empty)
+
+Please enter file path or url of the installer you want to use
+you can also add file in list by copy to '$FBPATH/releases/'
+https://flashbase.s3.ap-northeast-2.amazonaws.com/latest/lightningdb.release.master.5a6a38.bin
+Downloading lightningdb.release.master.5a6a38.bin
+[==================================================] 100%
+OK, lightningdb.release.master.5a6a38.bin
+
+If the installer list is empty like above, you can also use file path or URL. If you enter URL, download the file and use it. The downloaded file is saved in path '$FBPATH/releases'.
+(2) Type Hosts
+IP address or hostname can be used. In the case of several hosts, the list can be separated by comma(',').
+Please type host list separated by comma(,) [127.0.0.1]
+
+OK, ['127.0.0.1']
+
+(3) Type Masters
+How many masters would you like to create on each host? [10]
+
+OK, 10
+Please type ports separate with a comma(,) and use a hyphen(-) for range. [18100-18109]
+
+OK, ['18100-18109']
+
+Define how many master processes will be created in the cluster per server.
+Tip
+To create a cluster, 3 master processes should be included at least.
+(4) Type information of slave
+How many replicas would you like to create on each master? [0]
+
+OK, 0
+
+Define how many slave processes will be created for a master process.
+(5) Type the count of SSD(disk) and the path of DB files
+How many ssd would you like to use? [4]
+
+OK, 4
+Type prefix of db path [/nvme/data_]
+
+OK, /nvme/data_
+
+With this setting, LightningDB will use 4 disk paths('/nvme/data_01', '/nvme/data_02', '/nvme/data_03', '/nvme/data_04').
+Tip
+In order to use this setting, the 'nvkvs' directories must be generated under all disk path and the permission setting(chmod/chown) for the directory must be configured as follows.
+$ pwd
+/nvme/data_01
+$ mkdir nvkvs
+$chown ltdb nvkvs // The current user is 'ltdb'
+$chmod 755 nvkvs
+$ ls -alh
+drwxr-xr-x 10 ltdb ltdb 4.0K 4월 27 14:34 .
+drwxr-xr-x 33 ltdb ltdb 4.0K 2월 4 10:19 ..
+drwxrwxr-x 3 ltdb ltdb 4.0K 6월 5 18:36 nvkvs // The current user is 'ltdb'
+...
+
+(6) Check all settings finally
+Finally, all settings will be shown and confirmation will be requested like below.
++--------------+---------------------------------------+
+| NAME | VALUE |
++--------------+---------------------------------------+
+| installer | lightningdb.release.master.5a6a38.bin |
+| hosts | 127.0.0.1 |
+| master ports | 18100-18109 |
+| ssd count | 4 |
+| db path | /nvme/data_ |
++--------------+---------------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+
+(7) Deploy cluster
+After deploying is completed, the following messages are shown and LTCLI of the cluster is activated.
+Check status of hosts...
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+OK
+Checking for cluster exist...
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | CLEAN |
++-----------+--------+
+OK
+Transfer installer and execute...
+ - 127.0.0.1
+Sync conf...
+Complete to deploy cluster 1.
+Cluster '1' selected.
+
+When an error occurs during deploying, error messages will be shown like below.
+(8) Errors
+Host connection error
+Check status of hosts...
++-------+------------------+
+| HOST | STATUS |
++-------+------------------+
+| nodeA | OK |
+| nodeB | SSH ERROR |
+| nodeC | UNKNOWN HOST |
+| nodeD | CONNECTION ERROR |
++-------+------------------+
+There are unavailable host.
+
+Cluster already exist
+Checking for cluster exist...
++-------+---------------+
+| HOST | STATUS |
++-------+---------------+
+| nodeA | CLEAN |
+| nodeB | CLEAN |
+| nodeC | CLUSTER EXIST |
+| nodeD | CLUSTER EXIST |
++-------+---------------+
+Cluster information exist on some hosts.
+
+Not include localhost
+ Check status of hosts...
+ +-------+------------------+
+ | HOST | STATUS |
+ +-------+------------------+
+ | nodeB | OK |
+ | nodeC | OK |
+ | nodeD | OK |
+ +-------+------------------+
+ Must include localhost.
+
+If the localhost(127.0.0.1) is not included in host information, this error occurs. Please add the localhost in the host list in this case.
+Create a cluster of LightningDB using 'cluster create' command.
+ec2-user@lightningdb:1> cluster create
+Check status of hosts...
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104|18105|18106|18107|18108|18109 ...
+Wait until all redis process up...
+cur: 10 / total: 10
+Complete all redis process up
+>>> Creating cluster
++-----------+-------+--------+
+| HOST | PORT | TYPE |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
+| 127.0.0.1 | 18105 | MASTER |
+| 127.0.0.1 | 18106 | MASTER |
+| 127.0.0.1 | 18107 | MASTER |
+| 127.0.0.1 | 18108 | MASTER |
+| 127.0.0.1 | 18109 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18107
+ - 127.0.0.1:18106
+ - 127.0.0.1:18101
+ - 127.0.0.1:18100
+ - 127.0.0.1:18103
+ - 127.0.0.1:18109
+ - 127.0.0.1:18102
+ - 127.0.0.1:18108
+ - 127.0.0.1:18105
+ - 127.0.0.1:18104
+Adding slots...
+ - 127.0.0.1:18107, 1642
+ - 127.0.0.1:18106, 1638
+ - 127.0.0.1:18101, 1638
+ - 127.0.0.1:18100, 1638
+ - 127.0.0.1:18103, 1638
+ - 127.0.0.1:18109, 1638
+ - 127.0.0.1:18102, 1638
+ - 127.0.0.1:18108, 1638
+ - 127.0.0.1:18105, 1638
+ - 127.0.0.1:18104, 1638
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+ec2-user@lightningdb:1> cli ping --all
+alive redis 10/10
+
+ec2-user@lightningdb:1>
+
+From now, you can try ingestion and query in LightningDB with Zeppelin. And for further information about commands of LTCLI, please use Command Line.
+If you type 'enter' without any text, the default value is applied. In some cases, the default value will not be provided. ↩
+1. Kafka Cluster
+(1) Install kafka
+$KAFKA_HOME
path into ~/.bash_profile
.(2) Install zookeeper
+$dataDir
, $server.1 ~ $server.n
properties in $KAFKA_HOME/config/zookeeper.properties
.my-server1
, my-server2
, set server.1
, server.2
fields.dataDir=/hdd_01/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
+
+initLimit=5
+syncLimit=2
+
+# Zookeeper will use these ports (2891, etc.) to connect the individual follower nodes to the leader nodes.
+# The other ports (3881, etc.) are used for leader election in the ensemble.
+server.1=my-server1:2888:3888
+server.2=my-server2:2888:3888
+
+${dataDir}/myid
with its own id.echo "1" > ${dataDir}/myid
in my-server1 and echo "2" > ${dataDir}/myid
in my-server2. > $KAFKA_HOME/bin/zookeeper-server-start.sh config/zookeeper.properties &
+
+(3) Start kafka broker
+$KAFKA_HOME/conf/server.properties
in each server,Broker ID
in my-server1
. broker.id=1 // '2' in case of my-server2
+
+,
as seperator. zookeeper.connect=my-server1:2181,my-server2:2181
+
+ log.dirs=/hdd_01/kafka,/hdd_02/kafka,/hdd_03/kafka,/hdd_04/kafka
+
+ # default value: 168
+ log.retention.hours=168
+
+ # '-1' means 'unlimited'.
+ log.retention.bytes=-1
+
+ # If a size of a produced message exceed this limit, the exception is thrown.
+ # If you want to create a message with many rows, increase this value and restart broker.
+ # default value: 1000012 byte
+ message.max.bytes=1000012
+
+ > $KAFKA_HOME/bin/kafka-server-start.sh config/server.properties &
+
+ # --zookeeper localhost:2181 : Need zookeeper host & clientPort, because topics and partition information are stored in zookeeper.
+ # --topic nvkvs : For example, set 'nvkvs' as topic name.
+ # --partitions 16 : For example, set 2 partitions in each disk and use 16 partitions((# of cluster nodes) X (# of disks in each node) X 2 = 2 X 4 X 2 = 16).
+ # --replication-factor 2 : Create 1 follower for each partition.
+ > $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 2 --partitions 16 --topic nvkvs
+ # Check a generated topic: A broker.id of Replicas is different with a broker.id of Leader.
+ > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --describe --topic nvkvs
+
+ Topic:nvkvs PartitionCount:16 ReplicationFactor:2 Configs:
+ Topic: nvkvs Partition: 0 Leader: 0 Replicas: 0,1 Isr: 1,0
+ Topic: nvkvs Partition: 1 Leader: 1 Replicas: 1,0 Isr: 1,0
+ Topic: nvkvs Partition: 2 Leader: 0 Replicas: 0,1 Isr: 1,0
+ Topic: nvkvs Partition: 3 Leader: 1 Replicas: 1,0 Isr: 1,0
+ Topic: nvkvs Partition: 4 Leader: 0 Replicas: 0,1 Isr: 1,0
+ Topic: nvkvs Partition: 5 Leader: 1 Replicas: 1,0 Isr: 1,0
+ Topic: nvkvs Partition: 6 Leader: 0 Replicas: 0,1 Isr: 1,0
+ Topic: nvkvs Partition: 7 Leader: 1 Replicas: 1,0 Isr: 1,0
+ Topic: nvkvs Partition: 8 Leader: 0 Replicas: 0,1 Isr: 1,0
+ Topic: nvkvs Partition: 9 Leader: 1 Replicas: 1,0 Isr: 1,0
+ Topic: nvkvs Partition: 10 Leader: 0 Replicas: 0,1 Isr: 1,0
+ Topic: nvkvs Partition: 11 Leader: 1 Replicas: 1,0 Isr: 1,0
+ Topic: nvkvs Partition: 12 Leader: 0 Replicas: 0,1 Isr: 1,0
+ Topic: nvkvs Partition: 13 Leader: 1 Replicas: 1,0 Isr: 1,0
+ Topic: nvkvs Partition: 14 Leader: 0 Replicas: 0,1 Isr: 1,0
+ Topic: nvkvs Partition: 15 Leader: 1 Replicas: 1,0 Isr: 1,0
+
+ # Topic delete Command
+ > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --delete --topic nvkvs
+
+ # Topic partition modification
+ > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181/chroot --alter --topic nvkvs --partitions 6
+
+2. Kafka Topic Information
+ > $KAFKA_HOME/bin/kafka-consumer-groups.sh --list --bootstrap-server localhost:9092
+
+ > $KAFKA_HOME/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic nvkvs --from-beginning
+
+ # Add '--group {consumer group name}'
+ > $KAFKA_HOME/bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group (Consumer group name)
+
+ TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID
+ nvkvs 4 272904 272904 0 - - -
+ nvkvs 12 272904 272904 0 - - -
+ nvkvs 15 273113 273113 0 - - -
+ nvkvs 6 272906 272906 0 - - -
+ nvkvs 0 272907 272907 0 - - -
+ nvkvs 8 272905 272905 0 - - -
+ nvkvs 3 273111 273111 0 - - -
+ nvkvs 9 273111 273111 0 - - -
+ nvkvs 13 273111 273111 0 - - -
+ nvkvs 10 272912 272912 0 - - -
+ nvkvs 1 273111 273111 0 - - -
+ nvkvs 11 273112 273112 0 - - -
+ nvkvs 14 272904 272904 0 - - -
+ nvkvs 7 273110 273110 0 - - -
+ nvkvs 5 273111 273111 0 - - -
+ nvkvs 2 272905 272905 0 - - -
+
+ # --shift-by <positive_or_negative_integer>
+ # --group < name of group to shift>
+ > $KAFKA_HOME/bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --reset-offsets --shift-by -10000 --execute --group (Consumer 그룹명) --topic nvkvs
+
+1. Kaetlyn Consumer
+ KAFKA_SERVER : Kafka Broker의 host:port
+ DRIVER_MEMORY, EXECUTOR_MEMORY : A memory of Spark Driver/Excutor의 Memory in Yarn. After start, check 'FGC' count with using 'jstat -gc' and optimize these values.
+ EXECUTERS, EXECUTER_CORES : Basically consumers as many as the number of kafka partitions are generated. With this rule, need to optimize the number of EXECUTERS, EXECUTER_CORES.
+ JSON_PATH : The path of TABLE json. Do not support hdfs path. This is relative path from tsr2-kaetlyn.
+ KAFKA_CONSUMER_GROUP_ID : consumer group id
+ KAFKA_CONSUMING_TOPIC_LIST : Topic list with seperator ','.
+ JOB_GENERATION_PERIOD : With this period, check latest-offset and execute consuming job.
+ MAX_RATE_PER_PARTITION : the maximum offset that a consumer executes within a job period.
+
+> cfc 1 (or c01)
+> tsr2-kaetlyn edit
+
+#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14040}
+
+KAFKA_SERVER=my-server1:9092
+
+###############################################################################
+# Properties for Consumer
+DRIVER_MEMORY=2g
+
+EXECUTOR_MEMORY=2g
+EXECUTERS=16
+EXECUTER_CORES=4
+
+JSON_PATH=~/Flashbase/flashbase-benchmark/json/load_no_skew
+KAFKA_CONSUMER_GROUP_ID=nvkvs_redis_connector
+KAFKA_CONSUMING_TOPIC_LIST=nvkvs
+JOB_GENERATION_PERIOD=1
+MAX_RATE_PER_PARTITION=100
+...
+
+2. Kaetlyn Consumer start/stop
+> tsr2-kaetlyn consumer start
+> tsr2-kaetlyn consumer monitor
+
+> yarn application -list
+
+> tsr2-kaetlyn consumer stop
+
+3. Kaetlyn Log level modification
+ > vi $SPARK_HOME/conf/logback-kaetlyn.xml
+
+Start kafka producer.
+kafka-console-producer.sh --broker-list localhost:9092 --topic {topic name} < {filename to ingest}
+
+To produce for a kaetlyn consumer, 2 header fields should be included.
+TABLE_ID
+SEPARATOR
+
+If you use 'kafkacat', you can produce with the additional header fields.(https://docs.confluent.io/3.3.0/app-development/kafkacat-usage.html# )
+1. How to install kafkacat
+$yum install gcc-c++
+
+$ git clone https://github.com/edenhill/librdkafka
+
+$ cd librdkafka
+$ ./configure
+$ make
+$ sudo make install
+
+$ git clone https://github.com/edenhill/kafkacat
+$ cd kafkacat
+$ ./configure
+$ make
+$ sudo make install
+
+$ ldd kafkacat
+
+Contents:
+/usr/local/lib
+
+$ ldconfig -v
+
+$kafkacat
+
+2. Producing with kafkacat
+1) Produce a single file
+kafkacat -b localhost:9092 -t {topic name} -T -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l {filename}
+
+2) Produce all files in a directory
+After moving to the directory path,
+ls | xargs -n 1 kafkacat -q -b localhost:9092 -t {topic name} -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l
+
+3. kafka-utils.sh
+With kafka-utils.sh, check the status of kafka broker.
+Because 'kafka-utils.sh' exists under sbin path of each cluster, you can use this with 'cfc {cluster number}'.
+[C:6][ltdb@d205 ~]$ which kafka-utils.sh
+~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/kafka-utils.sh
+
+After 'CONSUMER_GROUP_ID' is set, kafka-utils.sh is enabled.
+[C:6][ltdb@d205 ~]$ kafka-utils.sh help
+Please, set $CONSUMER_GROUP_ID first.
+
+Need to set'kafka-utils.sh'.
+#!/bin/bash
+
+CONSUMER_GROUP_ID='nvkvs_redis_connector' // Need to modify
+KAFKA_SERVER=localhost:9092
+ZOOKEEPER_SERVER=localhost:2181...
+
+[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh help
+kafka-utils.sh offset-check
+kafka-utils.sh offset-monitor
+kafka-utils.sh offset-earliest topic_name
+kafka-utils.sh offset-latest topic_name
+kafka-utils.sh offset-move topic_name 10000
+kafka-utils.sh error-monitor error_topic_name
+kafka-utils.sh consumer-list
+kafka-utils.sh topic-check topic_name
+kafka-utils.sh topic-create topic_name 10
+kafka-utils.sh topic-delete topic_name
+kafka-utils.sh topic-config-check topic_name
+kafka-utils.sh topic-config-set topic_name config_name config_value
+kafka-utils.sh topic-config-remove topic_name config_name
+kafka-utils.sh topic-list
+kafka-utils.sh message-earliest topic_name
+kafka-utils.sh message-latest topic_name
+
+If a command needs args, the error messages like below is shown.
+[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh offset-move
+Please, specify topic name & the size of moving offset (ex) kafka-utils.sh offset-move my-topic 100
+[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh topic-create
+Please, specify topic name and its partition count. (ex) kafka-utils.sh topic-create topic-new 10
+[C:6][ltdb@d205 ~/kafka/config]$
+
+For example,
+[C:6][ltdb@d205 ~]$ kafka-utils.sh message-earliest nvkvs3
+20160711055950|ELG|2635055200|34317|5|6091|1|25|0|11|0|100.0|0.0|0|2846|3|33|0|5|0|-1000|0.0|0.0|94932|1027|0|176|35.2|40|0|7818000000|109816071|10|0|6000000.0|164843|2.75|0|2592|6000000|0.04|1288488|1303|1338|0|530|1|88.33|0|721|67948|428|0|1|108|108.0|108|0|0.0|0|0|0|-1000|1|1|100.0|62|39.0|62.9|23.0|37.1|0|0|0|0|29|10|-7022851.0|59998.0|-117.05|-6865443.5|59998.0|-114.43|4|198060.0|59998.0|22.5|3.3|0|1|5.82|3|1.94||0|0|0|0|0|0|0|0|4|0|0|0|15|14|231|140|0|0|0|0|0|0|0|0|4|0|0|0|15|13|174|110|1|0|0|0|0|0|0|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|0|0|0.0|0.0|0.0|0.0|0.0|0.0|570.0|0.0|3.0|0.0|0.0|0.0|0.0|2.0|3.0|3.0|0.0|15.73|0.0|0.0|0.0|0.0|0.0|12.0|22.0|68.0|83.0|339.0|205.0|144.0|54.0|38.0|12.0|0.0|0.0|0.0|0.0|0.0|0.0|100.0|50.55|1:22,2:7|1.0|||||1:1,17:1,23:1|13.67|0|0|0.0|0.0|-1000||-1000||-1000|11|2|05
+Processed a total of 1 messages
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-list
+__consumer_offsets
+nvkvs3
+topic-error
+topic_name
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-create ksh 18
+Created topic ksh.
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-check ksh
+Topic:ksh PartitionCount:18 ReplicationFactor:2 Configs:
+ Topic: ksh Partition: 0 Leader: 1 Replicas: 1,3 Isr: 1,3
+ Topic: ksh Partition: 1 Leader: 2 Replicas: 2,1 Isr: 2,1
+ Topic: ksh Partition: 2 Leader: 3 Replicas: 3,2 Isr: 3,2
+ Topic: ksh Partition: 3 Leader: 1 Replicas: 1,2 Isr: 1,2
+ Topic: ksh Partition: 4 Leader: 2 Replicas: 2,3 Isr: 2,3
+ Topic: ksh Partition: 5 Leader: 3 Replicas: 3,1 Isr: 3,1
+ Topic: ksh Partition: 6 Leader: 1 Replicas: 1,3 Isr: 1,3
+ Topic: ksh Partition: 7 Leader: 2 Replicas: 2,1 Isr: 2,1
+ Topic: ksh Partition: 8 Leader: 3 Replicas: 3,2 Isr: 3,2
+ Topic: ksh Partition: 9 Leader: 1 Replicas: 1,2 Isr: 1,2
+ Topic: ksh Partition: 10 Leader: 2 Replicas: 2,3 Isr: 2,3
+ Topic: ksh Partition: 11 Leader: 3 Replicas: 3,1 Isr: 3,1
+ Topic: ksh Partition: 12 Leader: 1 Replicas: 1,3 Isr: 1,3
+ Topic: ksh Partition: 13 Leader: 2 Replicas: 2,1 Isr: 2,1
+ Topic: ksh Partition: 14 Leader: 3 Replicas: 3,2 Isr: 3,2
+ Topic: ksh Partition: 15 Leader: 1 Replicas: 1,2 Isr: 1,2
+ Topic: ksh Partition: 16 Leader: 2 Replicas: 2,3 Isr: 2,3
+ Topic: ksh Partition: 17 Leader: 3 Replicas: 3,1 Isr: 3,1
+
+
+
+
+
+
+
+ Note
+This document guides how to use 'flashbase' script for failover. +If you use LTCLI, you can check the status of failure and operate Lightning DB more easily and powerfully. +Therefore, if possible, we recommend LTCLI rather than 'flashbase' script.
+1) Redis
+2) Thriftserver
+select * from {table name} where ... limit 1;
+
+3) System resources
+1) Background
+543f81b6c5d6e29b9871ddbbd07a4524508d27e5 127.0.0.1:18202 master - 1585787616744 1585787612000 0 disconnected
+
+pFail
.Fail
.543f81b6c5d6e29b9871ddbbd07a4524508d27e5 127.0.0.1:18202 master,fail - 1585787616744 1585787612000 0 disconnected
+
+node-{port}.conf
file is lost by disk failure, the redis-server using the conf file creates new uuid.noaddr
. This noaddr
uuid should be removed with using cluster forget
command.// previous uuid of 18202
+543f81b6c5d6e29b9871ddbbd07a4524508d27e5 :0 master,fail,noaddr - 1585787799235 1585787799235 0 disconnected
+
+// new uuid of 18202
+001ce4a87de2f2fc62ff44e2b5387a3f0bb9837c 127.0.0.1:18202 master - 0 1585787800000 0 connected
+
+2) Check the status of the cluster
+1) check-distribution
+Show the distribution of master/slave in each server.
+> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+127.0.0.1 | 5 | 3
+--------------------------------
+Total nodes | 5 | 3
+
+2) find-masters
+> flashbase find-masters
+Use options(no-slave|no-slot|failovered)
+
+> flashbase find-masters no-slave
+127.0.0.1:18203
+127.0.0.1:18252
+
+> flashbase find-masters no-slot
+127.0.0.1:18202
+127.0.0.1:18253
+
+> flashbase find-masters failovered
+127.0.0.1:18250
+127.0.0.1:18252
+127.0.0.1:18253
+
+3) find-slaves
+flashbase find-slaves
+Use options(failbacked)
+
+> flashbase find-slaves failbacked
+127.0.0.1:18200
+
+4) find-masters-with-dir
+> flashbase find-masters-with-dir
+Error) Invalid arguments.
+ex. 'flashbase find-masters-with-dir 127.0.0.1 /DATA01/nvkvs/nvkvs'
+
+> flashbase find-masters-with-dir 127.0.0.1 /nvdrive0/ssd_01/nvkvs/nvkvs
+18200
+18204
+
+3) How to handle HW fault(in case of replication)
+1) cluster-failover.sh
+If some redis-servers are disconnected(killed/paused), you can do failover immediately and make the status of the cluster 'ok'.
+2) find-nodes-with-dir / find-masters-with-dir / failover-with-dir / kill-with-dir
+> flashbase find-masters-with-dir
+Error) Invalid arguments.
+ex. 'flashbase find-masters-with-dir 127.0.0.1 /DATA01/nvkvs/nvkvs'
+
+> flashbase find-masters-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs
+18200
+18204
+
+> failover-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs
+127.0.0.1:18250 will be master
+127.0.0.1:18254 will be master
+OK
+
+kill-with-dir
, kill all nodes that use the error disk.> flashbase kill-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs
+flashbase kill 18200
+flashbase kill 18204
+flashbase kill 18253
+
+> flashbase cli-all ping
+redis client for 127.0.0.1:18200
+Could not connect to Redis at 127.0.0.1:18200: Connection refused
+redis client for 127.0.0.1:18201
+PONG
+redis client for 127.0.0.1:18202
+PONG
+redis client for 127.0.0.1:18203
+PONG
+redis client for 127.0.0.1:18204
+Could not connect to Redis at 127.0.0.1:18204: Connection refused
+redis client for 127.0.0.1:18250
+PONG
+redis client for 127.0.0.1:18251
+PONG
+redis client for 127.0.0.1:18252
+PONG
+redis client for 127.0.0.1:18253
+Could not connect to Redis at 127.0.0.1:18253: Connection refused
+redis client for 127.0.0.1:18254
+PONG
+
+3) find-noaddr / forget-noaddr
+> flashbase find-noaddr // The prev uuid. Now not used anymore.
+1b5d70b57079a4549a1d2e8d0ac2bd7c50986372 :0 master,fail,noaddr - 1589853266724 1589853265000 1 disconnected
+
+> flashbase forget-noaddr // Remove the 'noaddr' uuid.
+(error) ERR Unknown node 1b5d70b57079a4549a1d2e8d0ac2bd7c50986372 // Because newly added node does not know the previous uuid.
+OK
+OK
+OK
+OK
+
+> flashbase find-noaddr // Check that the noaddr uuid is removed
+
+
+4) do-replicate
+> flashbase find-noslot > slaves
+
+> flashbase find-noslave > masters
+
+> python pairing.py slaves masters
+flashbase do-replicate 192.168.0.2:19003 192.168.0.4:19053
+flashbase do-replicate 192.168.0.2:19004 192.168.0.4:19054
+flashbase do-replicate 192.168.0.2:19005 192.168.0.4:19055
+...
+
+> flashbase do-replicate 127.0.0.1:18202 127.0.0.1:18252
+Add 127.0.0.1:18202 as slave of master(127.0.0.1:18252)
+OK
+
+> flashbase cli -p 18202 info replication
+# Replication
+role:slave
+master_host:127.0.0.1
+master_port:18252
+master_link_status:down
+master_last_io_seconds_ago:-1
+master_sync_in_progress:0
+slave_repl_offset:1
+master_link_down_since_seconds:1585912329
+slave_priority:100
+slave_read_only:1
+connected_slaves:0
+master_repl_offset:0
+repl_backlog_active:0
+repl_backlog_size:1048576
+repl_backlog_first_byte_offset:0
+repl_backlog_histlen:0
+
+> flashbase do-replicate 127.0.0.1:18253 127.0.0.1:18203
+Add 127.0.0.1:18253 as slave of master(127.0.0.1:18203)
+OK
+
+> flashbase cli -p 18253 info replication
+# Replication
+role:slave
+master_host:127.0.0.1
+master_port:18203
+master_link_status:up
+master_last_io_seconds_ago:5
+master_sync_in_progress:0
+slave_repl_offset:29
+slave_priority:100
+slave_read_only:1
+connected_slaves:0
+master_repl_offset:0
+repl_backlog_active:0
+repl_backlog_size:1048576
+repl_backlog_first_byte_offset:0
+repl_backlog_histlen:0
+
+If the slave candidate is not included in the cluster, 'do-replicate' is done after 'cluster meet'.
+> flashbase do-replicate 127.0.0.1:18252 127.0.0.1:18202
+Add 127.0.0.1:18252 as slave of master(127.0.0.1:18202)
+Fail to get masters uuid
+'cluster meet' is done
+OK // 'cluster meet' is done successfully
+OK // 'cluster replicate' is done successfully
+
+5) reset-distribution
+To initialize the node distribution, use 'reset-distribution'.
+// Check the distribution of cluster nodes.
+> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+192.168.111.35 | 4 | 4
+192.168.111.38 | 0 | 8
+192.168.111.41 | 8 | 0
+--------------------------------
+Total nodes | 12 | 12
+
+...
+
+> flashbase reset-distribution
+192.168.111.38:20600
+OK
+192.168.111.38:20601
+OK
+192.168.111.38:20602
+OK
+192.168.111.38:20603
+OK
+
+...
+
+// Check the distribution of cluster nodes again.
+> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+192.168.111.35 | 4 | 4
+192.168.111.38 | 4 | 4
+192.168.111.41 | 4 | 4
+--------------------------------
+Total nodes | 12 | 12
+
+6) force-failover
+When a server need to be shutdown by HW fault or checking, change all masters in the server to slaves by failover of those slaves.
+> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+192.168.111.35 | 4 | 4
+192.168.111.38 | 4 | 4
+192.168.111.41 | 4 | 4
+--------------------------------
+Total nodes | 12 | 12
+
+> flashbase force-failover 192.168.111.41
+all masters in 192.168.111.41 will be slaves and their slaves will promote to masters
+192.168.111.35:20651 node will be master!
+OK
+192.168.111.38:20651 node will be master!
+OK
+192.168.111.35:20653 node will be master!
+OK
+192.168.111.38:20653 node will be master!
+OK
+
+> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+192.168.111.35 | 6 | 2
+192.168.111.38 | 6 | 2
+192.168.111.41 | 0 | 5
+--------------------------------
+Total nodes | 12 | 9
+
+4) How to handle HW fault(in case of no replication)
+After disk replacement, nodes-{port number}.conf
is lost.
Therefore a new uuid is generated after restart.
+Because the previous uuid in the cluster is lost, the uuid is changed to noaddr
. This noaddr
uuid should be removed with using cluster forget
command.
Because the restarted node with the new uuid has no slot information, a slot range should be assigned by using 'addslots'.
+1) Find noaddr
node and check its slot range.
> flashbase find-noaddr
+7c84d9bb36ae3fa4caaf75318b59d3d2f6c7e9d8 :0 master,fail,noaddr - 1596769266377 1596769157081 77 disconnected 13261-13311 // '13261-13311' is the lost slot range.
+
+2) Add the slot range to the restarted node.
+> flashbase cli -h 192.168.111.35 -p 18317 cluster addslots {13261..13311}
+
+3) Increase the epoch of the node and update the cluster information.
+> flashbase cli -h 192.168.111.35 -p 18317 cluster bumpepoch
+BUMPED 321
+
+4) Remove the noaddr node.
+> flashbase forget-noaddr
+
+1) Redis
+2) yarn & spark
+spark.local.dir
of spark-default.conf
and restart thriftserver.3) Thriftserver
+select * from {table name} where ... limit 1;
+
+4) kafka & kaetlyn
+5) System resources
+Note
+This page guides how to start LightningDB automatically only for the case of AWS EC2 Instance.
+Amazon Machine Image(AMI) for LightningDB can be found in 'AWS Marketplace' and user can create EC2 Instance with the AMI.
+ +To use LightningDB in a new Instance, the size of the root volume should be 15GiB at least.
+To use Web UI of HDFS, YARN, Spark and Zeppelin, you should add the following ports to 'Edit inbound rules' of 'Security groups' in EC2 Instance.
+Service | +Port | +
---|---|
HDFS | +50070 | +
YARN | +8088 | +
Spark | +4040 | +
Zeppelin | +8080 | +
Create a EC2 Instance for LightningDB and access with 'Public IP' or 'Public DNS'.
+'*.pem' file is also required to access EC2 Instance.
+$ ssh -i /path/to/.pem ec2-user@${IP_ADDRESS}
+
+When you access EC2 Instance, the following jobs are already done.
+Warning
+Before starting LightningDB, please check if the disk mount is completed using 'lsblk' like below.
+[ec2-user@ip-172-31-34-115 ~]$ lsblk
+NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+xvda 202:0 0 10G 0 disk
+└─xvda1 202:1 0 10G 0 part /
+nvme0n1 259:0 0 1.7T 0 disk /nvme/data_01
+nvme1n1 259:1 0 1.7T 0 disk /nvme/data_02
+nvme3n1 259:2 0 1.7T 0 disk /nvme/data_03
+nvme2n1 259:3 0 1.7T 0 disk /nvme/data_04
+
+Tip
+To launch Spark application on YARN, start YARN with running 'start-dfs.sh' and 'start-yarn.sh' in order.
+LightningDB provides LTCLI that is introduced in Installation. With LTCLI, you can deploy and use LightningDB.
+LightningDB supports Zeppelin to provide the convenience of ingestion and querying data of LightningDB. About Zeppelin, Try out with Zeppelin page provides some guides.
+ + + + + + +Ver 2.0.0 CXL-CMS
+Ver 2.0.0
+Ver 1.4.3
+Ver 1.3.1
+Ver 1.2.3
+Ver 1.0
+Copy link address with Right-Clicking and paste when you try to deploy LightningDB in LTCLI. ↩
+위 leaflet plugin 설치는 이 곳을 참고해주세요.
\n
(데이터의 양과 노드의 수에 따라 차이가 날 수 있습니다.)
' + escapeHtml(summary) +'
' + noResultsText + '
'); + } +} + +function doSearch () { + var query = document.getElementById('mkdocs-search-query').value; + if (query.length > min_search_length) { + if (!window.Worker) { + displayResults(search(query)); + } else { + searchWorker.postMessage({query: query}); + } + } else { + // Clear results for short queries + displayResults([]); + } +} + +function initSearch () { + var search_input = document.getElementById('mkdocs-search-query'); + if (search_input) { + search_input.addEventListener("keyup", doSearch); + } + var term = getSearchTermFromLocation(); + if (term) { + search_input.value = term; + doSearch(); + } +} + +function onWorkerMessage (e) { + if (e.data.allowSearch) { + initSearch(); + } else if (e.data.results) { + var results = e.data.results; + displayResults(results); + } else if (e.data.config) { + min_search_length = e.data.config.min_search_length-1; + } +} + +if (!window.Worker) { + console.log('Web Worker API not supported'); + // load index in main thread + $.getScript(joinUrl(base_url, "search/worker.js")).done(function () { + console.log('Loaded worker'); + init(); + window.postMessage = function (msg) { + onWorkerMessage({data: msg}); + }; + }).fail(function (jqxhr, settings, exception) { + console.error('Could not load worker.js'); + }); +} else { + // Wrap search in a web worker + var searchWorker = new Worker(joinUrl(base_url, "search/worker.js")); + searchWorker.postMessage({init: true}); + searchWorker.onmessage = onWorkerMessage; +} diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 0000000..8e61279 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"1. LightningDB is \u00b6 A distributed in-memory DBMS for real-time big data analytics Realtime ingestion and analytics for large scale data Advantages in random small data access based on DRAM/SSD resident KV Store Optimized for time series data and geospatial data 2. Architecture \u00b6 Spark with Redis/Rocksdb key-value stores No I/O bottleneck due to redis in DRAM and RocksDB in SSDs due to the small-sized key/value I/O and DRAM/SSDs\u2019 short latency (~200us) Filter predicates push down to redis and only associated partitions are chosen to be scanned 3. Features \u00b6 Ingestion performance (500,000 records/sec/node) Extreme partitioning (up-to 2 billion partitions for a single node) Real-time query performance by using fine-grained partitions and filter acceleration (vector processing by exploiting XEON SIMD instructions) Column-store / row-store support DRAM - SSD - HDD Tiering High compression ratio and compression speed (Gzip level compression ratio w/ LZ4 level speed) Low Write Amplification for SSD lifetime Asynchronous replication with low latency and high performance Node-based scale-out(Adding new nodes and scale out without data rebalancing )","title":"Overview"},{"location":"#1-lightningdb-is","text":"A distributed in-memory DBMS for real-time big data analytics Realtime ingestion and analytics for large scale data Advantages in random small data access based on DRAM/SSD resident KV Store Optimized for time series data and geospatial data","title":"1. LightningDB is"},{"location":"#2-architecture","text":"Spark with Redis/Rocksdb key-value stores No I/O bottleneck due to redis in DRAM and RocksDB in SSDs due to the small-sized key/value I/O and DRAM/SSDs\u2019 short latency (~200us) Filter predicates push down to redis and only associated partitions are chosen to be scanned","title":"2. Architecture"},{"location":"#3-features","text":"Ingestion performance (500,000 records/sec/node) Extreme partitioning (up-to 2 billion partitions for a single node) Real-time query performance by using fine-grained partitions and filter acceleration (vector processing by exploiting XEON SIMD instructions) Column-store / row-store support DRAM - SSD - HDD Tiering High compression ratio and compression speed (Gzip level compression ratio w/ LZ4 level speed) Low Write Amplification for SSD lifetime Asynchronous replication with low latency and high performance Node-based scale-out(Adding new nodes and scale out without data rebalancing )","title":"3. Features"},{"location":"awards-recognition/","text":"Awards and Recognition \u00b6 2023 \u00b6 SK TECH SUMMIT 2023 IMAGE\ub97c \uc81c\ub300\ub85c \uc774\ud574\ud558\ub294 \u2018AI\u2019\ub294 \uc5c6\ub098\uc694? (Vision-Language Model\uc744 \ud65c\uc6a9\ud55c SKT\ub9cc\uc758 Vision Data Asset \uad6c\ucd95\uae30) \u00b6 Speaker: Sungho Kim, Jiwon Ryu(SK Telecom) 2022 \u00b6 NVIDIA GTC 22 Vision data warehouse and accelerating the analytics for massive vision data \u00b6 Speaker: Sungho Kim(SK Telecom), Allen Xu(NVIDIA) 2021 \u00b6 NVIDIA GTC 21 Deep-Learning Data-Pipeline Optimization for Network Data Analysis in SK Telecom by Employing Spark Rapids for Custom Data Source \u00b6 Speaker: Dooyoung Hwan(SK Telecom), Thomas Graves(NVIDIA) 2020 \u00b6 Spark AI Summit 2020 Vectorized Deep Learning Acceleration from Preprocessing to Inference and Training on Apache Spark in SK Telecom \u00b6 Speaker: Hongchan Roh(SK Telecom) 2019 \u00b6 Spark AI Summit Europe 2019 Spark AI Usecase in Telco: Network Quality Analysis and Prediction with Geospatial Visualization \u00b6 Speaker: Hongchan Roh, Dooyoung Hwang(SK Telecom)","title":"Awards and Recognition"},{"location":"awards-recognition/#awards-and-recognition","text":"","title":"Awards and Recognition"},{"location":"awards-recognition/#2023","text":"","title":"2023"},{"location":"awards-recognition/#sk-tech-summit-2023-image-ai-vision-language-model-skt-vision-data-asset","text":"Speaker: Sungho Kim, Jiwon Ryu(SK Telecom)","title":" SK TECH SUMMIT 2023 IMAGE\ub97c \uc81c\ub300\ub85c \uc774\ud574\ud558\ub294 \u2018AI\u2019\ub294 \uc5c6\ub098\uc694? (Vision-Language Model\uc744 \ud65c\uc6a9\ud55c SKT\ub9cc\uc758 Vision Data Asset \uad6c\ucd95\uae30)"},{"location":"awards-recognition/#2022","text":"","title":"2022"},{"location":"awards-recognition/#nvidia-gtc-22-vision-data-warehouse-and-accelerating-the-analytics-for-massive-vision-data","text":"Speaker: Sungho Kim(SK Telecom), Allen Xu(NVIDIA)","title":" NVIDIA GTC 22 Vision data warehouse and accelerating the analytics for massive vision data"},{"location":"awards-recognition/#2021","text":"","title":"2021"},{"location":"awards-recognition/#nvidia-gtc-21-deep-learning-data-pipeline-optimization-for-network-data-analysis-in-sk-telecom-by-employing-spark-rapids-for-custom-data-source","text":"Speaker: Dooyoung Hwan(SK Telecom), Thomas Graves(NVIDIA)","title":" NVIDIA GTC 21 Deep-Learning Data-Pipeline Optimization for Network Data Analysis in SK Telecom by Employing Spark Rapids for Custom Data Source"},{"location":"awards-recognition/#2020","text":"","title":"2020"},{"location":"awards-recognition/#spark-ai-summit-2020-vectorized-deep-learning-acceleration-from-preprocessing-to-inference-and-training-on-apache-spark-in-sk-telecom","text":"Speaker: Hongchan Roh(SK Telecom)","title":" Spark AI Summit 2020 Vectorized Deep Learning Acceleration from Preprocessing to Inference and Training on Apache Spark in SK Telecom"},{"location":"awards-recognition/#2019","text":"","title":"2019"},{"location":"awards-recognition/#spark-ai-summit-europe-2019-spark-ai-usecase-in-telco-network-quality-analysis-and-prediction-with-geospatial-visualization","text":"Speaker: Hongchan Roh, Dooyoung Hwang(SK Telecom)","title":" Spark AI Summit Europe 2019 Spark AI Usecase in Telco: Network Quality Analysis and Prediction\u00a0with Geospatial Visualization"},{"location":"build-lightningdb-on-k8s/","text":"Build 'LightningDB' (Admin Only) \u00b6 1. LightningDB Source Code(Private Repository) \u00b6 $ git clone https://github.com/mnms/LightningDB 2. Build \u00b6 - v1 \u00b6 Branch: release/flashbase_v1.4.3 Commands: $ ./build.sh compile $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3 $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3 - v2 \u00b6 Branch: release/flashbase_v2.0.0 Commands: $ ./build.sh compile debug $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0 $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0 - v2 CXL-CMS \u00b6 Branch: cms-integration Prerequisite(install daxctl): $ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch // json-c (version: json-c-0.14-20200419) $ git clone https://github.com/json-c/json-c.git $ cd json-c $ git checkout json-c-0.14-20200419 -b json-c-0.14-20200419 $ mkdir json-c-build $ cd json-c-build/ $ cmake ../ $ make -j48 //Required Min Version: v75 (latest version: v78) $ git clone https://github.com/pmem/ndctl $ git checkout v75 -b v75 $ meson setup build; $ meson compile -C build; $ meson install -C build; Commands: $ ./build.sh compile debug // dax-ctl \uc744 \uc774\ubbf8\uc9c0 base \uacbd\ub85c\uc5d0 \uc124\uce58 \ud544\uc694 // \ucef4\ud30c\uc77c \uc791\uc5c5 \ub514\ub809\ud1a0\ub9ac\uac00 \"/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output\"\ub77c \uac00\uc815 // ndctl github \ucef4\ud30c\uc77c \ub514\ub809\ud1a0\ub9ac\ub85c \uc774\ub3d9 $ cd ndctl $ rm -rf build $ meson -Drootprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output -Dlibdir=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output/lib build -Dprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output $ meson compile -C build; $ meson install -C build; $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration Tip How to use maximum cores to compile (e.g. max cpu core:56) In 'build.sh', use cmake --build . --target install -- -j56 and mvn clean install -DskipTests -P $RELEASE_MODE $MAVEN_OPTS -T 56 Build 'ltdb-http API Server' (Admin Only) \u00b6 1. ltdb-http Source Code(Private Repository) \u00b6 $ git clone https://github.com/mnms/ltdb-http 2. Build \u00b6 - v1 \u00b6 Branch: develop Commands: $ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision $ cd target-k8s $ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz $ cd ltdb-http $ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop $ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop - v2 / v2 CXL-CMS \u00b6 Branch: develop-v2 Commands: $ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision $ cd target-k8s $ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz $ cd ltdb-http $ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2 $ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2 Build 'Thunderquery API Server' (Admin Only) \u00b6 1. Thunderquery Source Code(Private Repository) \u00b6 $ git clone https://github.com/mnms/thunderquery_api $ git clone https://github.com/mnms/thunderquery-cli 2. Build \u00b6 Branch: develop Prerequisite(install musl-gcc): $ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch $ vi /etc/yum.repos.d/cert-forensics-tools.repo [cert-forensics-tools] name=Cert Forensics Tools Repository baseurl=https://forensics.cert.org/centos/cert/8/x86_64/ enabled=1 gpgcheck=1 gpgkey=https://forensics.cert.org/forensics.asc $ yum clean all $ yum makecache $ yum install musl-gcc.x86_64 Register public key to github $ cat ~/.ssh/id_rsa.pub Command: $ vi ~/.cargo/config.toml [net] git-fetch-with-cli = true $ cd thunderquery_api $ cargo install --path . --target=x86_64-unknown-linux-musl $ cd thunderquery-cli $ cargo install --path . --target=x86_64-unknown-linux-musl $ cd thunderquery_api ## thunderquery-cli binary \ub97c api \ub514\ub809\ud1a0\ub9ac\ub85c \ubcf5\uc0ac ## $ cp ../thunderquery-cli/target/x86_64-unknown-linux-musl/release/thunderquery-cli target/x86_64-unknown-linux-musl/release $ docker build . -t harbor.k8s.lightningdb/ltdb/thunderquery_api:develop $ docker push harbor.k8s.lightningdb/ltdb/thunderquery_api:develop","title":"Build LightningDB(Admin Only)"},{"location":"build-lightningdb-on-k8s/#build-lightningdb-admin-only","text":"","title":"Build 'LightningDB' (Admin Only)"},{"location":"build-lightningdb-on-k8s/#1-lightningdb-source-codeprivate-repository","text":"$ git clone https://github.com/mnms/LightningDB","title":"1. LightningDB Source Code(Private Repository)"},{"location":"build-lightningdb-on-k8s/#2-build","text":"","title":"2. Build"},{"location":"build-lightningdb-on-k8s/#-v1","text":"Branch: release/flashbase_v1.4.3 Commands: $ ./build.sh compile $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3 $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3","title":"- v1"},{"location":"build-lightningdb-on-k8s/#-v2","text":"Branch: release/flashbase_v2.0.0 Commands: $ ./build.sh compile debug $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0 $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0","title":"- v2"},{"location":"build-lightningdb-on-k8s/#-v2-cxl-cms","text":"Branch: cms-integration Prerequisite(install daxctl): $ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch // json-c (version: json-c-0.14-20200419) $ git clone https://github.com/json-c/json-c.git $ cd json-c $ git checkout json-c-0.14-20200419 -b json-c-0.14-20200419 $ mkdir json-c-build $ cd json-c-build/ $ cmake ../ $ make -j48 //Required Min Version: v75 (latest version: v78) $ git clone https://github.com/pmem/ndctl $ git checkout v75 -b v75 $ meson setup build; $ meson compile -C build; $ meson install -C build; Commands: $ ./build.sh compile debug // dax-ctl \uc744 \uc774\ubbf8\uc9c0 base \uacbd\ub85c\uc5d0 \uc124\uce58 \ud544\uc694 // \ucef4\ud30c\uc77c \uc791\uc5c5 \ub514\ub809\ud1a0\ub9ac\uac00 \"/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output\"\ub77c \uac00\uc815 // ndctl github \ucef4\ud30c\uc77c \ub514\ub809\ud1a0\ub9ac\ub85c \uc774\ub3d9 $ cd ndctl $ rm -rf build $ meson -Drootprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output -Dlibdir=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output/lib build -Dprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output $ meson compile -C build; $ meson install -C build; $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration Tip How to use maximum cores to compile (e.g. max cpu core:56) In 'build.sh', use cmake --build . --target install -- -j56 and mvn clean install -DskipTests -P $RELEASE_MODE $MAVEN_OPTS -T 56","title":"- v2 CXL-CMS"},{"location":"build-lightningdb-on-k8s/#build-ltdb-http-api-server-admin-only","text":"","title":"Build 'ltdb-http API Server' (Admin Only)"},{"location":"build-lightningdb-on-k8s/#1-ltdb-http-source-codeprivate-repository","text":"$ git clone https://github.com/mnms/ltdb-http","title":"1. ltdb-http Source Code(Private Repository)"},{"location":"build-lightningdb-on-k8s/#2-build_1","text":"","title":"2. Build"},{"location":"build-lightningdb-on-k8s/#-v1_1","text":"Branch: develop Commands: $ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision $ cd target-k8s $ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz $ cd ltdb-http $ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop $ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop","title":"- v1"},{"location":"build-lightningdb-on-k8s/#-v2-v2-cxl-cms","text":"Branch: develop-v2 Commands: $ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision $ cd target-k8s $ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz $ cd ltdb-http $ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2 $ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2","title":"- v2 / v2 CXL-CMS"},{"location":"build-lightningdb-on-k8s/#build-thunderquery-api-server-admin-only","text":"","title":"Build 'Thunderquery API Server' (Admin Only)"},{"location":"build-lightningdb-on-k8s/#1-thunderquery-source-codeprivate-repository","text":"$ git clone https://github.com/mnms/thunderquery_api $ git clone https://github.com/mnms/thunderquery-cli","title":"1. Thunderquery Source Code(Private Repository)"},{"location":"build-lightningdb-on-k8s/#2-build_2","text":"Branch: develop Prerequisite(install musl-gcc): $ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch $ vi /etc/yum.repos.d/cert-forensics-tools.repo [cert-forensics-tools] name=Cert Forensics Tools Repository baseurl=https://forensics.cert.org/centos/cert/8/x86_64/ enabled=1 gpgcheck=1 gpgkey=https://forensics.cert.org/forensics.asc $ yum clean all $ yum makecache $ yum install musl-gcc.x86_64 Register public key to github $ cat ~/.ssh/id_rsa.pub Command: $ vi ~/.cargo/config.toml [net] git-fetch-with-cli = true $ cd thunderquery_api $ cargo install --path . --target=x86_64-unknown-linux-musl $ cd thunderquery-cli $ cargo install --path . --target=x86_64-unknown-linux-musl $ cd thunderquery_api ## thunderquery-cli binary \ub97c api \ub514\ub809\ud1a0\ub9ac\ub85c \ubcf5\uc0ac ## $ cp ../thunderquery-cli/target/x86_64-unknown-linux-musl/release/thunderquery-cli target/x86_64-unknown-linux-musl/release $ docker build . -t harbor.k8s.lightningdb/ltdb/thunderquery_api:develop $ docker push harbor.k8s.lightningdb/ltdb/thunderquery_api:develop","title":"2. Build"},{"location":"cli-cli/","text":"1. ping \u00b6 You can use ping command to check the status of the nodes. Options All nodes cli ping --all A single node cli ping {hostname} {port} Examples matthew@lightningdb:21> cli ping --all alive redis 12/12 matthew@lightningdb:21> cli ping myServer 20101 PONG 2. config \u00b6 You can read or write the configuration values of the current cluster. Options Read All nodes cli config get {feature name} --all A sing node cli config get -h {hostname} -p {port} Write All nodes cli config set {feature name} {value} --all A sing node cli config set {feature name} {value} -h {hostname} -p {port} Examples Read and write the configuration value of all nodes. matthew@lightningdb:21> cli config get maxmemory --all +--------+----------------------+--------+ | TYPE | ADDR | RESULT | +--------+----------------------+--------+ | Master | 192.168.111.41:20100 | 300mb | | Master | 192.168.111.41:20101 | 300mb | | Master | 192.168.111.41:20102 | 300mb | | Master | 192.168.111.44:20100 | 300mb | | Master | 192.168.111.44:20101 | 300mb | | Master | 192.168.111.44:20102 | 300mb | | Slave | 192.168.111.41:20150 | 300mb | | Slave | 192.168.111.41:20151 | 300mb | | Slave | 192.168.111.41:20152 | 300mb | | Slave | 192.168.111.44:20150 | 300mb | | Slave | 192.168.111.44:20151 | 300mb | | Slave | 192.168.111.44:20152 | 300mb | +--------+----------------------+--------+ matthew@lightningdb:21> cli config set maxmemory 500mb --all success 12/12 matthew@lightningdb:21> cli config get maxmemory --all +--------+----------------------+--------+ | TYPE | ADDR | RESULT | +--------+----------------------+--------+ | Master | 192.168.111.41:20100 | 500mb | | Master | 192.168.111.41:20101 | 500mb | | Master | 192.168.111.41:20102 | 500mb | | Master | 192.168.111.44:20100 | 500mb | | Master | 192.168.111.44:20101 | 500mb | | Master | 192.168.111.44:20102 | 500mb | | Slave | 192.168.111.41:20150 | 500mb | | Slave | 192.168.111.41:20151 | 500mb | | Slave | 192.168.111.41:20152 | 500mb | | Slave | 192.168.111.44:20150 | 500mb | | Slave | 192.168.111.44:20151 | 500mb | | Slave | 192.168.111.44:20152 | 500mb | +--------+----------------------+--------+ Read and write the configuration value of a single node. matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101 500mb matthew@lightningdb:21> cli config set maxmemory 300mb -h myServer -p 20101 OK matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101 300mb matthew@lightningdb:21> 3. cluster info \u00b6 You can get the information and stats of the current cluster. matthew@lightningdb:21> cli cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:12 cluster_size:6 cluster_current_epoch:14 cluster_my_epoch:6 cluster_stats_messages_ping_sent:953859 cluster_stats_messages_pong_sent:917798 cluster_stats_messages_meet_sent:10 cluster_stats_messages_sent:1871667 cluster_stats_messages_ping_received:917795 cluster_stats_messages_pong_received:951370 cluster_stats_messages_meet_received:3 cluster_stats_messages_received:1869168 4. cluster nodes \u00b6 You can get the distribution and status of each node. matthew@lightningdb:21> cli cluster nodes 4b8fe9d135670daabe19437e3b840b1c770ffa2f 192.168.111.44:20151 slave 985a2215d2acb3f1612751a13e0d7466d874cfe5 0 1604891127367 10 connected 4dd5dff5008ccd89cf18faef736fe6492eb34d05 192.168.111.41:20152 slave 9bff873f9f5f84cd3b78288524230b5cd1c6190f 0 1604891128000 8 connected 15b3c06c1edeb5d2eeb6c0f35c9f27cf616acd11 192.168.111.44:20101 myself,slave 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 0 1604891118000 13 connected 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 192.168.111.41:20100 master - 0 1604891130372 1 connected 0-2730 9bff873f9f5f84cd3b78288524230b5cd1c6190f 192.168.111.44:20102 master - 0 1604891126000 6 connected 8193-10923 60f88a9db445997112cf8947931988152767878f 192.168.111.44:20152 slave 974c0540741d89c7569b63345faa852361043e8b 0 1604891122000 11 connected 985a2215d2acb3f1612751a13e0d7466d874cfe5 192.168.111.41:20101 master - 0 1604891125365 5 connected 2731-5461 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 192.168.111.44:20100 master - 0 1604891129371 4 connected 13654-16383 974c0540741d89c7569b63345faa852361043e8b 192.168.111.41:20102 master - 0 1604891124363 2 connected 5462-8192 9c6aef212b6d68d2a0298c1902629e1fdc95f943 192.168.111.41:20150 slave 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 0 1604891128370 4 connected 474303b3b9e9f7b84b157ecf52ce11e153a28716 192.168.111.44:20150 slave 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 0 1604891126366 13 connected 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 192.168.111.41:20151 master - 0 1604891131375 14 connected 10924-13653 5. cluster slots \u00b6 You can get the slot information. matthew@lightningdb:21> cli cluster slots +-------+-------+----------------+--------+----------------+----------+ | start | end | m_ip | m_port | s_ip_0 | s_port_0 | +-------+-------+----------------+--------+----------------+----------+ | 0 | 2730 | 192.168.111.41 | 20100 | 192.168.111.44 | 20150 | | 2731 | 5461 | 192.168.111.41 | 20101 | 192.168.111.44 | 20151 | | 5462 | 8192 | 192.168.111.41 | 20102 | 192.168.111.44 | 20152 | | 8193 | 10923 | 192.168.111.44 | 20102 | 192.168.111.41 | 20152 | | 10924 | 13653 | 192.168.111.41 | 20151 | 192.168.111.44 | 20101 | | 13654 | 16383 | 192.168.111.44 | 20100 | 192.168.111.41 | 20150 | +-------+-------+----------------+--------+----------------+----------+","title":"Redis3 cli (LightningDB v1.x)"},{"location":"cli-cli/#1-ping","text":"You can use ping command to check the status of the nodes. Options All nodes cli ping --all A single node cli ping {hostname} {port} Examples matthew@lightningdb:21> cli ping --all alive redis 12/12 matthew@lightningdb:21> cli ping myServer 20101 PONG","title":"1. ping"},{"location":"cli-cli/#2-config","text":"You can read or write the configuration values of the current cluster. Options Read All nodes cli config get {feature name} --all A sing node cli config get -h {hostname} -p {port} Write All nodes cli config set {feature name} {value} --all A sing node cli config set {feature name} {value} -h {hostname} -p {port} Examples Read and write the configuration value of all nodes. matthew@lightningdb:21> cli config get maxmemory --all +--------+----------------------+--------+ | TYPE | ADDR | RESULT | +--------+----------------------+--------+ | Master | 192.168.111.41:20100 | 300mb | | Master | 192.168.111.41:20101 | 300mb | | Master | 192.168.111.41:20102 | 300mb | | Master | 192.168.111.44:20100 | 300mb | | Master | 192.168.111.44:20101 | 300mb | | Master | 192.168.111.44:20102 | 300mb | | Slave | 192.168.111.41:20150 | 300mb | | Slave | 192.168.111.41:20151 | 300mb | | Slave | 192.168.111.41:20152 | 300mb | | Slave | 192.168.111.44:20150 | 300mb | | Slave | 192.168.111.44:20151 | 300mb | | Slave | 192.168.111.44:20152 | 300mb | +--------+----------------------+--------+ matthew@lightningdb:21> cli config set maxmemory 500mb --all success 12/12 matthew@lightningdb:21> cli config get maxmemory --all +--------+----------------------+--------+ | TYPE | ADDR | RESULT | +--------+----------------------+--------+ | Master | 192.168.111.41:20100 | 500mb | | Master | 192.168.111.41:20101 | 500mb | | Master | 192.168.111.41:20102 | 500mb | | Master | 192.168.111.44:20100 | 500mb | | Master | 192.168.111.44:20101 | 500mb | | Master | 192.168.111.44:20102 | 500mb | | Slave | 192.168.111.41:20150 | 500mb | | Slave | 192.168.111.41:20151 | 500mb | | Slave | 192.168.111.41:20152 | 500mb | | Slave | 192.168.111.44:20150 | 500mb | | Slave | 192.168.111.44:20151 | 500mb | | Slave | 192.168.111.44:20152 | 500mb | +--------+----------------------+--------+ Read and write the configuration value of a single node. matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101 500mb matthew@lightningdb:21> cli config set maxmemory 300mb -h myServer -p 20101 OK matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101 300mb matthew@lightningdb:21>","title":"2. config"},{"location":"cli-cli/#3-cluster-info","text":"You can get the information and stats of the current cluster. matthew@lightningdb:21> cli cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:12 cluster_size:6 cluster_current_epoch:14 cluster_my_epoch:6 cluster_stats_messages_ping_sent:953859 cluster_stats_messages_pong_sent:917798 cluster_stats_messages_meet_sent:10 cluster_stats_messages_sent:1871667 cluster_stats_messages_ping_received:917795 cluster_stats_messages_pong_received:951370 cluster_stats_messages_meet_received:3 cluster_stats_messages_received:1869168","title":"3. cluster info"},{"location":"cli-cli/#4-cluster-nodes","text":"You can get the distribution and status of each node. matthew@lightningdb:21> cli cluster nodes 4b8fe9d135670daabe19437e3b840b1c770ffa2f 192.168.111.44:20151 slave 985a2215d2acb3f1612751a13e0d7466d874cfe5 0 1604891127367 10 connected 4dd5dff5008ccd89cf18faef736fe6492eb34d05 192.168.111.41:20152 slave 9bff873f9f5f84cd3b78288524230b5cd1c6190f 0 1604891128000 8 connected 15b3c06c1edeb5d2eeb6c0f35c9f27cf616acd11 192.168.111.44:20101 myself,slave 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 0 1604891118000 13 connected 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 192.168.111.41:20100 master - 0 1604891130372 1 connected 0-2730 9bff873f9f5f84cd3b78288524230b5cd1c6190f 192.168.111.44:20102 master - 0 1604891126000 6 connected 8193-10923 60f88a9db445997112cf8947931988152767878f 192.168.111.44:20152 slave 974c0540741d89c7569b63345faa852361043e8b 0 1604891122000 11 connected 985a2215d2acb3f1612751a13e0d7466d874cfe5 192.168.111.41:20101 master - 0 1604891125365 5 connected 2731-5461 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 192.168.111.44:20100 master - 0 1604891129371 4 connected 13654-16383 974c0540741d89c7569b63345faa852361043e8b 192.168.111.41:20102 master - 0 1604891124363 2 connected 5462-8192 9c6aef212b6d68d2a0298c1902629e1fdc95f943 192.168.111.41:20150 slave 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 0 1604891128370 4 connected 474303b3b9e9f7b84b157ecf52ce11e153a28716 192.168.111.44:20150 slave 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 0 1604891126366 13 connected 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 192.168.111.41:20151 master - 0 1604891131375 14 connected 10924-13653","title":"4. cluster nodes"},{"location":"cli-cli/#5-cluster-slots","text":"You can get the slot information. matthew@lightningdb:21> cli cluster slots +-------+-------+----------------+--------+----------------+----------+ | start | end | m_ip | m_port | s_ip_0 | s_port_0 | +-------+-------+----------------+--------+----------------+----------+ | 0 | 2730 | 192.168.111.41 | 20100 | 192.168.111.44 | 20150 | | 2731 | 5461 | 192.168.111.41 | 20101 | 192.168.111.44 | 20151 | | 5462 | 8192 | 192.168.111.41 | 20102 | 192.168.111.44 | 20152 | | 8193 | 10923 | 192.168.111.44 | 20102 | 192.168.111.41 | 20152 | | 10924 | 13653 | 192.168.111.41 | 20151 | 192.168.111.44 | 20101 | | 13654 | 16383 | 192.168.111.44 | 20100 | 192.168.111.41 | 20150 | +-------+-------+----------------+--------+----------------+----------+","title":"5. cluster slots"},{"location":"cli-cli2/","text":"Note By default, we support all of the features provided in LightningDB v1.x, and we only point you to the ones that have been added and changed. 1. createTable \u00b6 Command \"TABLE.META.WRITE\" \"createTable\" \"catalog name\" \"namespace name\" \"table name\" \"schema binary\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"createTable\" TABLE.META.WRITE createTable catalog.namespace.table arrow::schema summary: Create a new table since: 2.0.0 group: table.meta 127.0.0.1:7389> \"TABLE.META.WRITE\" \"createTable\" \"cat_1.test.table\" \"\\x10\\x00\\x00\\x00\\x00\\x00\\n\\x00\\x0e\\x00\\x06\\x00\\r\\x00\\b\\x00\\n\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x10\\x00\\x00\\x00\\x00\\x01\\n\\x00\\x0c\\x00\\x00\\x00\\b\\x00\\x04\\x00\\n\\x00\\x00\\x00\\b\\x00\\x00\\x00\\xc4\\x01\\x00\\x00\\t\\x00\\x00\\x00\\x80\\x01\\x00\\x00D\\x01\\x00\\x00\\x18\\x01\\x00\\x00\\xec\\x00\\x00\\x00\\xc0\\x00\\x00\\x00\\x98\\x00\\x00\\x00h\\x00\\x00\\x00@\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xac\\xfe\\xff\\xff\\b\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x0e\\x00\\x00\\x00127.0.0.1:7389\\x00\\x00\\x13\\x00\\x00\\x00properties.location\\x00\\xe4\\xfe\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00job\\x00\\x0b\\x00\\x00\\x00partition.1\\x00\\b\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x01\\x00\\x00\\x001\\x00\\x00\\x00\\x10\\x00\\x00\\x00internal.version\\x00\\x00\\x00\\x004\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00age\\x00\\x0b\\x00\\x00\\x00partition.0\\x00X\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x01\\x00\\x00\\x002\\x00\\x00\\x00\\x0e\\x00\\x00\\x00partition.size\\x00\\x00\\x80\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00512\\x00\\x0c\\x00\\x00\\x00cva.capacity\\x00\\x00\\x00\\x00\\xa8\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x02\\x00\\x00\\x0024\\x00\\x00\\x0e\\x00\\x00\\x00properties.ttl\\x00\\x00\\xd0\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x04\\x00\\x00\\x002560\\x00\\x00\\x00\\x00\\x11\\x00\\x00\\x00rowgroup.capacity\\x00\\x00\\x00\\b\\x00\\x0c\\x00\\b\\x00\\x04\\x00\\b\\x00\\x00\\x00\\b\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x0e\\x00\\x00\\x00127.0.0.1:7379\\x00\\x00\\x14\\x00\\x00\\x00properties.metastore\\x00\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x88\\x00\\x00\\x004\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x96\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x00\\x00\\x05\\x01\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x84\\xff\\xff\\xff\\x03\\x00\\x00\\x00job\\x00\\xc2\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\\x02\\x01 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\b\\x00\\x0c\\x00\\b\\x00\\a\\x00\\b\\x00\\x00\\x00\\x00\\x00\\x00\\x01 \\x00\\x00\\x00\\x03\\x00\\x00\\x00age\\x00\\x00\\x00\\x12\\x00\\x18\\x00\\x14\\x00\\x13\\x00\\x12\\x00\\x0c\\x00\\x00\\x00\\b\\x00\\x04\\x00\\x12\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x00\\x00\\x05\\x01\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x04\\x00\\x04\\x00\\x00\\x00\\x04\\x00\\x00\\x00name\\x00\\x00\\x00\\x00\" 2. truncateTable \u00b6 Command \"TABLE.META.WRITE\" \"truncateTable\" \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"truncateTable\" TABLE.META.WRITE truncateTable catalog.namespace.table summary: Truncate the table(Remove all data in the table) since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 11) \"32\\x1eProfessor\" 12) \"32\\x1eSales Manager\" 13) \"33\\x1eProfessor\" 14) \"36\\x1eProfessor\" 15) \"41\\x1eBanker\" 16) \"43\\x1eSales Manager\" 17) \"45\\x1eBanker\" 18) \"47\\x1eBanker\" 19) \"48\\x1eCEO\" 127.0.0.1:7389> TABLE.META.WRITE truncateTable \"cat_1.test.table\" \"OK\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" (empty list or set) 3. dropTable \u00b6 Command \"TABLE.META.WRITE\" \"dropTable\" \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"dropTable\" TABLE.META.WRITE dropTable catalog.namespace.table summary: Drop the table(Remove all data and the schema) since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.WRITE dropTable \"cat_1.test.table\" \"OK\" 127.0.0.1:7389> TABLE.META.READ showTables (empty list or set) 4. dropAllTables \u00b6 Command \"TABLE.META.WRITE\" \"dropAllTables\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"dropAllTables\" TABLE.META.WRITE dropAllTables - summary: Drop all tables since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.WRITE dropAllTables 1 tables are deleted. 5. setTableTtl \u00b6 Command \"TABLE.META.WRITE\" \"settablettl\" \"{catalog name}.{namespace name}.{table name}\" \"{ttl time(unit: msec)}\" Example 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"seTtableTtl\" TABLE.META.WRITE setTableTtl catalog.namespace.table ttl(msec) summary: Set the ttl of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.WRITE setTableTtl \"cat_1.test.table\" 30000 OK 6. showTables \u00b6 Command \"TABLE.META.READ\" \"showTables\" Examples 127.0.0.1:7389> help TABLE.META.READ showTables TABLE.META.READ showTables - summary: Get the list of tables with their own version since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 7. describeTable \u00b6 Command \"TABLE.META.READ\" \"describeTable\" \"table name\" Examples 127.0.0.1:7389> help TABLE.META.READ describeTables TABLE.META.READ describeTables catalog.namespace.table summary: Get all columns and partitions of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.READ describeTables \"cat_1.test.table\" 1) \"name: string\" 2) \"age: int32\" 3) \"job: string\" 4) \"[ partitions: age job ]\" 8. getTableTtl \u00b6 Command \"TABLE.META.READ\" gettablettl \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help TABLE.META.READ getTableTtl TABLE.META.READ getTableTtl catalog.namespace.table summary: Get the ttl of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.READ getTableTtl * 1) \"cat_1.test.network_table\" 2) \"86400000\" 3) \"cat_1.test.table\" 4) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.* 1) \"cat_1.test.network_table\" 2) \"86400000\" 3) \"cat_1.test.table\" 4) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl *.network_table 1) \"cat_1.test.network_table\" 2) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.test.network_table 1) \"cat_1.test.network_table\" 2) \"86400000\" 127.0.0.1:7389> 9. getPartitionTtl \u00b6 Command \"TABLE.META.READ\" getPartitionTtl \"{catalog name}.{namespace name}.{table name}\" \"partition string with regular expression\" Examples 127.0.0.1:7389> help TABLE.META.READ getPartitionTtl TABLE.META.READ getPartitionTtl partition-string summary: Get the ttl of the partition in the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"86350123\" 3) \"22\\x1eTutor\" 4) \"86350139\" 5) \"23\\x1eBanker\" 6) \"86350126\" 7) \"23\\x1eProfessor\" 8) \"86350125\" 9) \"23\\x1eSales Manager\" 10) \"86350137\" 11) \"24\\x1eStudent\" 12) \"86350121\" 13) \"26\\x1eStudent\" 14) \"86350124\" 15) \"27\\x1eSales Manager\" 16) \"86350132\" 17) \"29\\x1eBanker\" 18) \"86350124\" 19) \"29\\x1eProfessor\" 20) \"86350125\" 21) \"32\\x1eProfessor\" 22) \"86350127\" 23) \"32\\x1eSales Manager\" 24) \"86350123\" 25) \"33\\x1eProfessor\" 26) \"86350120\" 27) \"36\\x1eProfessor\" 28) \"86350134\" 29) \"40\\x1eBanker\" 30) \"86350119\" 31) \"41\\x1eBanker\" 32) \"86350120\" 33) \"43\\x1eSales Manager\" 34) \"86350133\" 35) \"45\\x1eBanker\" 36) \"86350128\" 37) \"47\\x1eBanker\" 38) \"86350124\" 39) \"48\\x1eCEO\" 40) \"86350138\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"23*\" 1) \"23\\x1eBanker\" 2) \"86343642\" 3) \"23\\x1eProfessor\" 4) \"86343641\" 5) \"23\\x1eSales Manager\" 6) \"86343653\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"*CEO\" 1) \"48\\x1eCEO\" 2) \"86336153\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"45\\x1eBanker\" 1) \"45\\x1eBanker\" 2) \"86324848\" 127.0.0.1:7389> 10. insert \u00b6 - Command - \"TABLE.DATA.WRITE\" \"Insert\" \"{catalog name}.{namespace name}.{table name}\" \"table version\" \"partition string\" \"binaries... ...\" - Examples 127.0.0.1:7389> help \"TABLE.DATA.WRITE\" \"Insert\" TABLE.DATA.WRITE insert catalog.namespace.table table-version partition-string data summary: Insert a new data(row) since: 2.0.0 group: table.data 1636425657.602951 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"40\\x1eBanker\" \"Jeannie\" \"(\\x00\\x00\\x00\" \"Banker\" 1636425657.604043 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"33\\x1eProfessor\" \"Ardith\" \"!\\x00\\x00\\x00\" \"Professor\" 1636425657.604529 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"41\\x1eBanker\" \"Elena\" \")\\x00\\x00\\x00\" \"Banker\" 1636425657.605351 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"24\\x1eStudent\" \"Corliss\" \"\\x18\\x00\\x00\\x00\" \"Student\" 1636425657.607351 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"41\\x1eBanker\" \"Kiyoko\" \")\\x00\\x00\\x00\" \"Banker\" 1636425657.608057 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"21\\x1eSales Manager\" \"Hilton\" \"\\x15\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.608455 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"32\\x1eSales Manager\" \"Becky\" \" \\x00\\x00\\x00\" \"Sales Manager\" 1636425657.609218 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"29\\x1eBanker\" \"Wendie\" \"\\x1d\\x00\\x00\\x00\" \"Banker\" 1636425657.609940 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"26\\x1eStudent\" \"Carolina\" \"\\x1a\\x00\\x00\\x00\" \"Student\" 1636425657.610284 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"47\\x1eBanker\" \"Laquita\" \"/\\x00\\x00\\x00\" \"Banker\" 1636425657.610638 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eProfessor\" \"Stephani\" \"\\x17\\x00\\x00\\x00\" \"Professor\" 1636425657.610964 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"29\\x1eProfessor\" \"Emile\" \"\\x1d\\x00\\x00\\x00\" \"Professor\" 1636425657.612257 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eBanker\" \"Cherri\" \"\\x17\\x00\\x00\\x00\" \"Banker\" 1636425657.612630 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"47\\x1eBanker\" \"Raleigh\" \"/\\x00\\x00\\x00\" \"Banker\" 1636425657.612943 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"32\\x1eProfessor\" \"Hollis\" \" \\x00\\x00\\x00\" \"Professor\" 1636425657.614136 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"45\\x1eBanker\" \"Brigette\" \"-\\x00\\x00\\x00\" \"Banker\" 1636425657.615558 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"21\\x1eSales Manager\" \"Damian\" \"\\x15\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.617321 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"27\\x1eSales Manager\" \"Star\" \"\\x1b\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.618819 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"43\\x1eSales Manager\" \"Elba\" \"+\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.619621 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"36\\x1eProfessor\" \"Lourie\" \"$\\x00\\x00\\x00\" \"Professor\" 1636425657.622977 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eSales Manager\" \"\\xea\\xb0\\x80\\xeb\\x82\\x98\\xeb\\x82\\x98\\xeb\\x82\\x98\\xea\\xb0\\x80\\xeb\\x82\\x98\\xeb\\x82\\x98\" \"\\x17\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.623555 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"48\\x1eCEO\" \"Elon\" \"0\\x00\\x00\\x00\" \"CEO\" 1636425657.624359 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"22\\x1eTutor\" \"Kijung\" \"\\x16\\x00\\x00\\x00\" \"Tutor\" 11. partitions \u00b6 a. Query with a pattern \u00b6 Commnad \"TABLE.DATA.READ\" \"partitions\" \"{catalog name}.{namespace name}.{table name}\" \"pattern(normaly '*')\" Examples 127.0.0.1:7389> help TABLE.DATA.READ partitions TABLE.DATA.READ partitions catalog.namespace.table pattern partition-filter(optional) summary: Get the list of partitions with the pattern and filter since: 2.0.0 group: table.data 127.0.0.1:7389> 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 11) \"32\\x1eProfessor\" 12) \"32\\x1eSales Manager\" 13) \"33\\x1eProfessor\" 14) \"36\\x1eProfessor\" 15) \"40\\x1eBanker\" 16) \"41\\x1eBanker\" 17) \"43\\x1eSales Manager\" 18) \"45\\x1eBanker\" 19) \"47\\x1eBanker\" 20) \"48\\x1eCEO\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"29*\" 1) \"29\\x1eBanker\" 2) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*Professor\" 1) \"23\\x1eProfessor\" 2) \"29\\x1eProfessor\" 3) \"32\\x1eProfessor\" 4) \"33\\x1eProfessor\" 5) \"36\\x1eProfessor\" b. Query with a pattern and filters \u00b6 Command \"TABLE.DATA.READ\" \"partitions\" \"catalog name\" \"namespace name\" \"table name\" \"pattern(normaly '*')\" \"partition filter\" Examples 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e30\\x1eLTE\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eEQ\" 1) \"32\\x1eProfessor\" 2) \"32\\x1eSales Manager\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eLT\\x1ejob\\x1eCEO\\x1eLTE\\x1eAND\" 1) \"23\\x1eBanker\" 2) \"29\\x1eBanker\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eLT\\x1ejob\\x1eCEO\\x1eGTE\\x1eAND\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eProfessor\" 4) \"23\\x1eSales Manager\" 5) \"24\\x1eStudent\" 6) \"26\\x1eStudent\" 7) \"27\\x1eSales Manager\" 8) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eGT\\x1ejob\\x1eCEO\\x1eGTE\\x1eAND\" 1) \"33\\x1eProfessor\" 2) \"36\\x1eProfessor\" 3) \"43\\x1eSales Manager\" 4) \"48\\x1eCEO\" 12. select \u00b6 Command \"TABLE.DATA.READ\" \"select\" \"catalog name\" \"namespace name\" \"table name\" \"pattern(normaly '*')\" \"partition filter\" \"data filter\" Examples 127.0.0.1:7389> help TABLE.DATA.READ select TABLE.DATA.READ select catalog.namespace.table projection partition-filter data-filter summary: Get the data with the pattern and filter since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ select xxx .... 13. getPartitionRowCount \u00b6 Command \"TABLE.DATA.READ\" \"getPartitionRowCount\" \"{catalog name}.{namespace name}.{table name}\" \"partition string with regular expression\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowCount TABLE.DATA.READ getPartitionRowCount catalog.namespace.table partition-string summary: Get the count of the rows in the partition since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" * 1) \"21\\x1eSales Manager\" 2) \"2\" 3) \"22\\x1eTutor\" 4) \"1\" 5) \"23\\x1eBanker\" 6) \"1\" 7) \"23\\x1eProfessor\" 8) \"1\" 9) \"23\\x1eSales Manager\" 10) \"1\" 11) \"24\\x1eStudent\" 12) \"1\" 13) \"26\\x1eStudent\" 14) \"1\" 15) \"27\\x1eSales Manager\" 16) \"1\" 17) \"29\\x1eBanker\" 18) \"1\" 19) \"29\\x1eProfessor\" 20) \"1\" 21) \"32\\x1eProfessor\" 22) \"1\" 23) \"32\\x1eSales Manager\" 24) \"1\" 25) \"33\\x1eProfessor\" 26) \"1\" 27) \"36\\x1eProfessor\" 28) \"1\" 29) \"40\\x1eBanker\" 30) \"1\" 31) \"41\\x1eBanker\" 32) \"2\" 33) \"43\\x1eSales Manager\" 34) \"1\" 35) \"45\\x1eBanker\" 36) \"1\" 37) \"47\\x1eBanker\" 38) \"2\" 39) \"48\\x1eCEO\" 40) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"23*\" 1) \"23\\x1eBanker\" 2) \"1\" 3) \"23\\x1eProfessor\" 4) \"1\" 5) \"23\\x1eSales Manager\" 6) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"*Professor\" 1) \"23\\x1eProfessor\" 2) \"1\" 3) \"29\\x1eProfessor\" 4) \"1\" 5) \"32\\x1eProfessor\" 6) \"1\" 7) \"33\\x1eProfessor\" 8) \"1\" 9) \"36\\x1eProfessor\" 10) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"45\\x1eBanker\" 1) \"45\\x1eBanker\" 2) \"1\" 14. getPartitionRowGroup \u00b6 Command \"TABLE.DATA.READ\" \"getPartitionRowGroup\" \"{catalog name}.{namespace name}.{table name}\" \"partition string\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowGroup TABLE.DATA.READ getPartitionRowGroup catalog.namespace.table partition-string summary: Get the count of the rows in the each row-group of the partition since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowGroup \"cat_1.test.table\" \"21\\x1eSales Manager\" 1) \"0\" 2) \"1\" 3) \"1\" 4) \"2\" 127.0.0.1:7389> 15. getTableRowCount \u00b6 Command \"TABLE.DATA.READ\" \"gettablerowcount\" \"{catalog name}.{namespace name}.{table name} with regular expression\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getTableRowCount TABLE.DATA.READ getTableRowCount - summary: Get the row count of each table since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getTableRowCount * 1) \"cat_1.test.network_table\" 2) \"33229\" 3) \"cat_1.test.table\" 4) \"23\" 127.0.0.1:7389>","title":"Redis5 cli (LightningDB v2.x)"},{"location":"cli-cli2/#1-createtable","text":"Command \"TABLE.META.WRITE\" \"createTable\" \"catalog name\" \"namespace name\" \"table name\" \"schema binary\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"createTable\" TABLE.META.WRITE createTable catalog.namespace.table arrow::schema summary: Create a new table since: 2.0.0 group: table.meta 127.0.0.1:7389> \"TABLE.META.WRITE\" \"createTable\" \"cat_1.test.table\" \"\\x10\\x00\\x00\\x00\\x00\\x00\\n\\x00\\x0e\\x00\\x06\\x00\\r\\x00\\b\\x00\\n\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x10\\x00\\x00\\x00\\x00\\x01\\n\\x00\\x0c\\x00\\x00\\x00\\b\\x00\\x04\\x00\\n\\x00\\x00\\x00\\b\\x00\\x00\\x00\\xc4\\x01\\x00\\x00\\t\\x00\\x00\\x00\\x80\\x01\\x00\\x00D\\x01\\x00\\x00\\x18\\x01\\x00\\x00\\xec\\x00\\x00\\x00\\xc0\\x00\\x00\\x00\\x98\\x00\\x00\\x00h\\x00\\x00\\x00@\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xac\\xfe\\xff\\xff\\b\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x0e\\x00\\x00\\x00127.0.0.1:7389\\x00\\x00\\x13\\x00\\x00\\x00properties.location\\x00\\xe4\\xfe\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00job\\x00\\x0b\\x00\\x00\\x00partition.1\\x00\\b\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x01\\x00\\x00\\x001\\x00\\x00\\x00\\x10\\x00\\x00\\x00internal.version\\x00\\x00\\x00\\x004\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00age\\x00\\x0b\\x00\\x00\\x00partition.0\\x00X\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x01\\x00\\x00\\x002\\x00\\x00\\x00\\x0e\\x00\\x00\\x00partition.size\\x00\\x00\\x80\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00512\\x00\\x0c\\x00\\x00\\x00cva.capacity\\x00\\x00\\x00\\x00\\xa8\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x02\\x00\\x00\\x0024\\x00\\x00\\x0e\\x00\\x00\\x00properties.ttl\\x00\\x00\\xd0\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x04\\x00\\x00\\x002560\\x00\\x00\\x00\\x00\\x11\\x00\\x00\\x00rowgroup.capacity\\x00\\x00\\x00\\b\\x00\\x0c\\x00\\b\\x00\\x04\\x00\\b\\x00\\x00\\x00\\b\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x0e\\x00\\x00\\x00127.0.0.1:7379\\x00\\x00\\x14\\x00\\x00\\x00properties.metastore\\x00\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x88\\x00\\x00\\x004\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x96\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x00\\x00\\x05\\x01\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x84\\xff\\xff\\xff\\x03\\x00\\x00\\x00job\\x00\\xc2\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\\x02\\x01 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\b\\x00\\x0c\\x00\\b\\x00\\a\\x00\\b\\x00\\x00\\x00\\x00\\x00\\x00\\x01 \\x00\\x00\\x00\\x03\\x00\\x00\\x00age\\x00\\x00\\x00\\x12\\x00\\x18\\x00\\x14\\x00\\x13\\x00\\x12\\x00\\x0c\\x00\\x00\\x00\\b\\x00\\x04\\x00\\x12\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x00\\x00\\x05\\x01\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x04\\x00\\x04\\x00\\x00\\x00\\x04\\x00\\x00\\x00name\\x00\\x00\\x00\\x00\"","title":"1. createTable"},{"location":"cli-cli2/#2-truncatetable","text":"Command \"TABLE.META.WRITE\" \"truncateTable\" \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"truncateTable\" TABLE.META.WRITE truncateTable catalog.namespace.table summary: Truncate the table(Remove all data in the table) since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 11) \"32\\x1eProfessor\" 12) \"32\\x1eSales Manager\" 13) \"33\\x1eProfessor\" 14) \"36\\x1eProfessor\" 15) \"41\\x1eBanker\" 16) \"43\\x1eSales Manager\" 17) \"45\\x1eBanker\" 18) \"47\\x1eBanker\" 19) \"48\\x1eCEO\" 127.0.0.1:7389> TABLE.META.WRITE truncateTable \"cat_1.test.table\" \"OK\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" (empty list or set)","title":"2. truncateTable"},{"location":"cli-cli2/#3-droptable","text":"Command \"TABLE.META.WRITE\" \"dropTable\" \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"dropTable\" TABLE.META.WRITE dropTable catalog.namespace.table summary: Drop the table(Remove all data and the schema) since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.WRITE dropTable \"cat_1.test.table\" \"OK\" 127.0.0.1:7389> TABLE.META.READ showTables (empty list or set)","title":"3. dropTable"},{"location":"cli-cli2/#4-dropalltables","text":"Command \"TABLE.META.WRITE\" \"dropAllTables\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"dropAllTables\" TABLE.META.WRITE dropAllTables - summary: Drop all tables since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.WRITE dropAllTables 1 tables are deleted.","title":"4. dropAllTables"},{"location":"cli-cli2/#5-settablettl","text":"Command \"TABLE.META.WRITE\" \"settablettl\" \"{catalog name}.{namespace name}.{table name}\" \"{ttl time(unit: msec)}\" Example 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"seTtableTtl\" TABLE.META.WRITE setTableTtl catalog.namespace.table ttl(msec) summary: Set the ttl of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.WRITE setTableTtl \"cat_1.test.table\" 30000 OK","title":"5. setTableTtl"},{"location":"cli-cli2/#6-showtables","text":"Command \"TABLE.META.READ\" \"showTables\" Examples 127.0.0.1:7389> help TABLE.META.READ showTables TABLE.META.READ showTables - summary: Get the list of tables with their own version since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\"","title":"6. showTables"},{"location":"cli-cli2/#7-describetable","text":"Command \"TABLE.META.READ\" \"describeTable\" \"table name\" Examples 127.0.0.1:7389> help TABLE.META.READ describeTables TABLE.META.READ describeTables catalog.namespace.table summary: Get all columns and partitions of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.READ describeTables \"cat_1.test.table\" 1) \"name: string\" 2) \"age: int32\" 3) \"job: string\" 4) \"[ partitions: age job ]\"","title":"7. describeTable"},{"location":"cli-cli2/#8-gettablettl","text":"Command \"TABLE.META.READ\" gettablettl \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help TABLE.META.READ getTableTtl TABLE.META.READ getTableTtl catalog.namespace.table summary: Get the ttl of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.READ getTableTtl * 1) \"cat_1.test.network_table\" 2) \"86400000\" 3) \"cat_1.test.table\" 4) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.* 1) \"cat_1.test.network_table\" 2) \"86400000\" 3) \"cat_1.test.table\" 4) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl *.network_table 1) \"cat_1.test.network_table\" 2) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.test.network_table 1) \"cat_1.test.network_table\" 2) \"86400000\" 127.0.0.1:7389>","title":"8. getTableTtl"},{"location":"cli-cli2/#9-getpartitionttl","text":"Command \"TABLE.META.READ\" getPartitionTtl \"{catalog name}.{namespace name}.{table name}\" \"partition string with regular expression\" Examples 127.0.0.1:7389> help TABLE.META.READ getPartitionTtl TABLE.META.READ getPartitionTtl partition-string summary: Get the ttl of the partition in the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"86350123\" 3) \"22\\x1eTutor\" 4) \"86350139\" 5) \"23\\x1eBanker\" 6) \"86350126\" 7) \"23\\x1eProfessor\" 8) \"86350125\" 9) \"23\\x1eSales Manager\" 10) \"86350137\" 11) \"24\\x1eStudent\" 12) \"86350121\" 13) \"26\\x1eStudent\" 14) \"86350124\" 15) \"27\\x1eSales Manager\" 16) \"86350132\" 17) \"29\\x1eBanker\" 18) \"86350124\" 19) \"29\\x1eProfessor\" 20) \"86350125\" 21) \"32\\x1eProfessor\" 22) \"86350127\" 23) \"32\\x1eSales Manager\" 24) \"86350123\" 25) \"33\\x1eProfessor\" 26) \"86350120\" 27) \"36\\x1eProfessor\" 28) \"86350134\" 29) \"40\\x1eBanker\" 30) \"86350119\" 31) \"41\\x1eBanker\" 32) \"86350120\" 33) \"43\\x1eSales Manager\" 34) \"86350133\" 35) \"45\\x1eBanker\" 36) \"86350128\" 37) \"47\\x1eBanker\" 38) \"86350124\" 39) \"48\\x1eCEO\" 40) \"86350138\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"23*\" 1) \"23\\x1eBanker\" 2) \"86343642\" 3) \"23\\x1eProfessor\" 4) \"86343641\" 5) \"23\\x1eSales Manager\" 6) \"86343653\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"*CEO\" 1) \"48\\x1eCEO\" 2) \"86336153\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"45\\x1eBanker\" 1) \"45\\x1eBanker\" 2) \"86324848\" 127.0.0.1:7389>","title":"9. getPartitionTtl"},{"location":"cli-cli2/#10-insert","text":"- Command - \"TABLE.DATA.WRITE\" \"Insert\" \"{catalog name}.{namespace name}.{table name}\" \"table version\" \"partition string\" \"binaries... ...\" - Examples 127.0.0.1:7389> help \"TABLE.DATA.WRITE\" \"Insert\" TABLE.DATA.WRITE insert catalog.namespace.table table-version partition-string data summary: Insert a new data(row) since: 2.0.0 group: table.data 1636425657.602951 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"40\\x1eBanker\" \"Jeannie\" \"(\\x00\\x00\\x00\" \"Banker\" 1636425657.604043 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"33\\x1eProfessor\" \"Ardith\" \"!\\x00\\x00\\x00\" \"Professor\" 1636425657.604529 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"41\\x1eBanker\" \"Elena\" \")\\x00\\x00\\x00\" \"Banker\" 1636425657.605351 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"24\\x1eStudent\" \"Corliss\" \"\\x18\\x00\\x00\\x00\" \"Student\" 1636425657.607351 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"41\\x1eBanker\" \"Kiyoko\" \")\\x00\\x00\\x00\" \"Banker\" 1636425657.608057 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"21\\x1eSales Manager\" \"Hilton\" \"\\x15\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.608455 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"32\\x1eSales Manager\" \"Becky\" \" \\x00\\x00\\x00\" \"Sales Manager\" 1636425657.609218 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"29\\x1eBanker\" \"Wendie\" \"\\x1d\\x00\\x00\\x00\" \"Banker\" 1636425657.609940 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"26\\x1eStudent\" \"Carolina\" \"\\x1a\\x00\\x00\\x00\" \"Student\" 1636425657.610284 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"47\\x1eBanker\" \"Laquita\" \"/\\x00\\x00\\x00\" \"Banker\" 1636425657.610638 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eProfessor\" \"Stephani\" \"\\x17\\x00\\x00\\x00\" \"Professor\" 1636425657.610964 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"29\\x1eProfessor\" \"Emile\" \"\\x1d\\x00\\x00\\x00\" \"Professor\" 1636425657.612257 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eBanker\" \"Cherri\" \"\\x17\\x00\\x00\\x00\" \"Banker\" 1636425657.612630 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"47\\x1eBanker\" \"Raleigh\" \"/\\x00\\x00\\x00\" \"Banker\" 1636425657.612943 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"32\\x1eProfessor\" \"Hollis\" \" \\x00\\x00\\x00\" \"Professor\" 1636425657.614136 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"45\\x1eBanker\" \"Brigette\" \"-\\x00\\x00\\x00\" \"Banker\" 1636425657.615558 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"21\\x1eSales Manager\" \"Damian\" \"\\x15\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.617321 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"27\\x1eSales Manager\" \"Star\" \"\\x1b\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.618819 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"43\\x1eSales Manager\" \"Elba\" \"+\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.619621 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"36\\x1eProfessor\" \"Lourie\" \"$\\x00\\x00\\x00\" \"Professor\" 1636425657.622977 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eSales Manager\" \"\\xea\\xb0\\x80\\xeb\\x82\\x98\\xeb\\x82\\x98\\xeb\\x82\\x98\\xea\\xb0\\x80\\xeb\\x82\\x98\\xeb\\x82\\x98\" \"\\x17\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.623555 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"48\\x1eCEO\" \"Elon\" \"0\\x00\\x00\\x00\" \"CEO\" 1636425657.624359 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"22\\x1eTutor\" \"Kijung\" \"\\x16\\x00\\x00\\x00\" \"Tutor\"","title":"10. insert"},{"location":"cli-cli2/#11-partitions","text":"","title":"11. partitions"},{"location":"cli-cli2/#a-query-with-a-pattern","text":"Commnad \"TABLE.DATA.READ\" \"partitions\" \"{catalog name}.{namespace name}.{table name}\" \"pattern(normaly '*')\" Examples 127.0.0.1:7389> help TABLE.DATA.READ partitions TABLE.DATA.READ partitions catalog.namespace.table pattern partition-filter(optional) summary: Get the list of partitions with the pattern and filter since: 2.0.0 group: table.data 127.0.0.1:7389> 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 11) \"32\\x1eProfessor\" 12) \"32\\x1eSales Manager\" 13) \"33\\x1eProfessor\" 14) \"36\\x1eProfessor\" 15) \"40\\x1eBanker\" 16) \"41\\x1eBanker\" 17) \"43\\x1eSales Manager\" 18) \"45\\x1eBanker\" 19) \"47\\x1eBanker\" 20) \"48\\x1eCEO\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"29*\" 1) \"29\\x1eBanker\" 2) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*Professor\" 1) \"23\\x1eProfessor\" 2) \"29\\x1eProfessor\" 3) \"32\\x1eProfessor\" 4) \"33\\x1eProfessor\" 5) \"36\\x1eProfessor\"","title":"a. Query with a pattern"},{"location":"cli-cli2/#b-query-with-a-pattern-and-filters","text":"Command \"TABLE.DATA.READ\" \"partitions\" \"catalog name\" \"namespace name\" \"table name\" \"pattern(normaly '*')\" \"partition filter\" Examples 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e30\\x1eLTE\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eEQ\" 1) \"32\\x1eProfessor\" 2) \"32\\x1eSales Manager\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eLT\\x1ejob\\x1eCEO\\x1eLTE\\x1eAND\" 1) \"23\\x1eBanker\" 2) \"29\\x1eBanker\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eLT\\x1ejob\\x1eCEO\\x1eGTE\\x1eAND\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eProfessor\" 4) \"23\\x1eSales Manager\" 5) \"24\\x1eStudent\" 6) \"26\\x1eStudent\" 7) \"27\\x1eSales Manager\" 8) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eGT\\x1ejob\\x1eCEO\\x1eGTE\\x1eAND\" 1) \"33\\x1eProfessor\" 2) \"36\\x1eProfessor\" 3) \"43\\x1eSales Manager\" 4) \"48\\x1eCEO\"","title":"b. Query with a pattern and filters"},{"location":"cli-cli2/#12-select","text":"Command \"TABLE.DATA.READ\" \"select\" \"catalog name\" \"namespace name\" \"table name\" \"pattern(normaly '*')\" \"partition filter\" \"data filter\" Examples 127.0.0.1:7389> help TABLE.DATA.READ select TABLE.DATA.READ select catalog.namespace.table projection partition-filter data-filter summary: Get the data with the pattern and filter since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ select xxx ....","title":"12. select"},{"location":"cli-cli2/#13-getpartitionrowcount","text":"Command \"TABLE.DATA.READ\" \"getPartitionRowCount\" \"{catalog name}.{namespace name}.{table name}\" \"partition string with regular expression\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowCount TABLE.DATA.READ getPartitionRowCount catalog.namespace.table partition-string summary: Get the count of the rows in the partition since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" * 1) \"21\\x1eSales Manager\" 2) \"2\" 3) \"22\\x1eTutor\" 4) \"1\" 5) \"23\\x1eBanker\" 6) \"1\" 7) \"23\\x1eProfessor\" 8) \"1\" 9) \"23\\x1eSales Manager\" 10) \"1\" 11) \"24\\x1eStudent\" 12) \"1\" 13) \"26\\x1eStudent\" 14) \"1\" 15) \"27\\x1eSales Manager\" 16) \"1\" 17) \"29\\x1eBanker\" 18) \"1\" 19) \"29\\x1eProfessor\" 20) \"1\" 21) \"32\\x1eProfessor\" 22) \"1\" 23) \"32\\x1eSales Manager\" 24) \"1\" 25) \"33\\x1eProfessor\" 26) \"1\" 27) \"36\\x1eProfessor\" 28) \"1\" 29) \"40\\x1eBanker\" 30) \"1\" 31) \"41\\x1eBanker\" 32) \"2\" 33) \"43\\x1eSales Manager\" 34) \"1\" 35) \"45\\x1eBanker\" 36) \"1\" 37) \"47\\x1eBanker\" 38) \"2\" 39) \"48\\x1eCEO\" 40) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"23*\" 1) \"23\\x1eBanker\" 2) \"1\" 3) \"23\\x1eProfessor\" 4) \"1\" 5) \"23\\x1eSales Manager\" 6) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"*Professor\" 1) \"23\\x1eProfessor\" 2) \"1\" 3) \"29\\x1eProfessor\" 4) \"1\" 5) \"32\\x1eProfessor\" 6) \"1\" 7) \"33\\x1eProfessor\" 8) \"1\" 9) \"36\\x1eProfessor\" 10) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"45\\x1eBanker\" 1) \"45\\x1eBanker\" 2) \"1\"","title":"13. getPartitionRowCount"},{"location":"cli-cli2/#14-getpartitionrowgroup","text":"Command \"TABLE.DATA.READ\" \"getPartitionRowGroup\" \"{catalog name}.{namespace name}.{table name}\" \"partition string\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowGroup TABLE.DATA.READ getPartitionRowGroup catalog.namespace.table partition-string summary: Get the count of the rows in the each row-group of the partition since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowGroup \"cat_1.test.table\" \"21\\x1eSales Manager\" 1) \"0\" 2) \"1\" 3) \"1\" 4) \"2\" 127.0.0.1:7389>","title":"14. getPartitionRowGroup"},{"location":"cli-cli2/#15-gettablerowcount","text":"Command \"TABLE.DATA.READ\" \"gettablerowcount\" \"{catalog name}.{namespace name}.{table name} with regular expression\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getTableRowCount TABLE.DATA.READ getTableRowCount - summary: Get the row count of each table since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getTableRowCount * 1) \"cat_1.test.network_table\" 2) \"33229\" 3) \"cat_1.test.table\" 4) \"23\" 127.0.0.1:7389>","title":"15. getTableRowCount"},{"location":"cli-cluster/","text":"Note Command Line Interface(CLI) of LightningDB supports not only deploy and start command but also many commands to access and manipulate data in LightningDB. If you want to see the list of cluster commands, use the cluster command without any option. ec2-user@lightningdb:1> cluster NAME ltcli cluster - This is cluster command SYNOPSIS ltcli cluster COMMAND DESCRIPTION This is cluster command COMMANDS COMMAND is one of the following: add_slave Add slaves to cluster additionally clean Clean cluster configure create Create cluster ls Check cluster list rebalance Rebalance restart Restart redist cluster rowcount Query and show cluster row count start Start cluster stop Stop cluster use Change selected cluster 1. Deploy and Start \u00b6 (1) Cluster configure redis-{port}.conf is generated with using redis-{master/slave}.conf.template and redis.properties files. matthew@lightningdb:21> cluster configure Check status of hosts... OK sync conf +----------------+--------+ | HOST | STATUS | +----------------+--------+ | 192.168.111.44 | OK | | 192.168.111.41 | OK | +----------------+--------+ OK (2) Cluster start Backup logs of the previous master/slave nodes All log files of previous master/slave nodes in ${SR2_HOME}/logs/redis/ 1 will be moved to ${SR2_HOME}/logs/redis/backup/ . Generate directories to save data Save aof and rdb files of redis-server and RocksDB files in ${SR2_REDIS_DATA} Start redis-server process Start master and slave redis-server with ${SR2_HOME}/conf/redis/redis-{port}.conf file Log files will be saved in ${SR2_HOME}/logs/redis/ ec2-user@lightningdb:1> cluster start Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK Backup redis master log in each MASTER hosts... - 127.0.0.1 Generate redis configuration files for master hosts sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up Errors ErrorCode 11 Redis-server(master) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process} . $ cluster start ... ... [ErrorCode 11] Fail to start... Must be checked running MASTER redis processes! We estimate that redis process isYou can try LightningDB in the Zeppelin notebook.
+Firstly, deploy and start the cluster of LightningDB using Installation before launching the Zeppelin daemon.
+Secondly, to run LightningDB on the Spark, the jars in the LightningDB should be passed to the Spark.
+When EC2 Instance is initialized, the environment variable ($SPARK_SUBMIT_OPTIONS
) is configured for this reason.
+Thus just need to check the setting in zeppelin-env.sh
.
$ vim $ZEPPELIN_HOME/conf/zeppelin-env.sh
+...
+LIGHTNINGDB_LIB_PATH=$(eval echo $(cat $FBPATH/config | head -n 1 | awk {'print $2'}))/cluster_$(cat $FBPATH/HEAD)/tsr2-assembly-1.0.0-SNAPSHOT/lib/
+if [[ -e $LIGHTNINGDB_LIB_PATH ]]; then
+ export SPARK_SUBMIT_OPTIONS="--jars $(find $LIGHTNINGDB_LIB_PATH -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' -o -name 'geospark*' -o -name 'gt-*' | tr '\n' ',')"
+fi
+...
+
+Finally, start Zeppelin daemon.
+$ cd $ZEPPELIN_HOME/bin
+$ ./zeppelin-daemon.sh start
+
+After starting zeppelin daemon, you can access zeppelin UI using a browser. The URL is https://your-server-ip:8080.
+Tip
+We recommend that you proceed with the tutorial at the Chrome browser.
+There is a github page for tutorial.
+The repository includes a tool for generating sample csv data and a notebook for the tutorial.
+You can import the tutorial notebook with its URL.
+https://raw.githubusercontent.com/mnms/tutorials/master/zeppelin-notebook/note.json
+ +The tutorial runs on the spark interpreter of Zeppelin. +Please make sure that the memory of the Spark driver is at least 10GB in the Spark interpreter setting.
+ +Also, make sure that the timeout of a shell command is at least 120000 ms.
+ + + + + + + +Tutorial
++ + + + +
++
+'tsr2-tools'를 이용한 적재로 '-s' option으로 delimiter 설정 후 사용
+#!/bin/bash
+if [ $# -ne 2 ];
+then
+ echo "Usage: load_data.sh data-directory json-file"
+ echo "(e.g.: load_data.sh ./data/split ./json/106.json)"
+ echo "Warning: delimiter is '|'"
+ exit
+fi
+tsr2-tools insert java_options "-Xms1g -Xmx32g" -d $1 -s "|" -t $2 -p 40 -c 1 -i
+
+[ltdb@d205 ~/tsr2-test]$ cat ./json/cell_nvkvs.json // json file 작성.
+{
+"endpoint" : "192.168.111.205:18600",
+"id" :"9999",
+"columns" : 219,
+"partitions" : [
+216, 218, 3, 4
+],
+"rowStore" : true // 적재하는 클러스터에서 'row-store-enabled'가 동일하게 'true'인지 확인 필요. 반대의 경우 둘다 'false'로 설정하면 됨.
+}
+
+[ltdb@d205 ~/tsr2-test]$ ls -alh ./test_data_one/ // 적재할 데이터 확인. dir 및 file path 모두 가능함.
+total 8.7M
+drwxrwxr-x. 2 ltdb ltdb 50 2020-06-18 08:58:44 ./
+drwxrwxr-x. 7 ltdb ltdb 84 2020-06-18 08:58:28 ../
+-rw-rw-r--. 1 ltdb ltdb 8.7M 2020-06-18 08:58:44 ANALYSIS_CELL_ELG_20160711055950.dat
+
+[ltdb@d205 ~/tsr2-test]$ load.sh // 그냥 'load.sh'를 입력하면 args 정보가 표시됨.
+Usage: load_data.sh data-directory json-file
+(e.g.: load_data.sh ./data/split ./json/106.json)
+Warning: delimiter is '|'
+[ltdb@d205 ~/tsr2-test]$ load.sh ./test_data_one/ ./json/cell_nvkvs.json
+/home/ltdb/tsr2-test/sbin/load.sh: line 9: tsr2-tools: command not found // cluster 지정이 되지 않아 실행이 안되는 것으로 'cfc'를 사용하여 cluster 를 지정함
+
+[ltdb@d205 ~/tsr2-test]$ cfc 6
+[C:6][ltdb@d205 ~/tsr2-test]$ load.sh ./test_data_one/ ./json/cell_nvkvs.json // 적재 시작.
+SLF4J: Class path contains multiple SLF4J bindings.
+SLF4J: Found binding in [jar:file:/home/ltdb/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/lib/logback-classic-1.2.3.jar!/org/slf4j/impl/StaticLoggerBinder.class]
+
+zookeeper 및 kafka broker 설치가 선행되어야 함 ==> Kafka&Kaetlyn 설치 참고
+사용하는 topic은 크게 아래와 같이 3가지로 나누어짐
+1. 데이터 적재를 위한 topic
+ - table간 dependency가 없도록 table별로 나눠서 적재 필요
+2. error topic
+ - 'tsr2-kaetlyn edit'을 통해 'KAFKA_ERROR_REPORT_TOPIC_NAME=topic-error' 로 설정
+ - consuming 단계에서 적재 실패 시 error topic으로 에러 내용을 넣어서 적재 client가 확인할 수 있도록 함
+3. result topic(optional)
+ - consuming 후 적재 결과를 정의된 protocol에 맞게 적재 app에 전달할 수 있음
+
+Kafka&Kaetlyn 설치에서 가이드하고 있는 kaetlyn consumer를 사용하여 consumer를 설정.
+'tsr2-kaetlyn edit'을 통해 consumer 설정이 필요함
+기본적으로 '수정 필요'로 코멘트된 부분은 검토 및 수정이 필요함
+#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14040}
+
+KAFKA_SERVER=localhost:9092
+
+###############################################################################
+# Properties for Consumer
+DRIVER_MEMORY=1g // 수정 필요
+
+EXECUTOR_MEMORY=1g // 수정 필요
+EXECUTORS=12 // 수정 필요
+EXECUTOR_CORES=12 // 수정 필요
+
+[[JSON_PATH]]=~/Flashbase/flashbase-benchmark/json/load_no_skew
+JSON_PATH=/home/ltdb/tsr2-test/json // 수정 필요, json file 업데이트 시 kaetlyn 재시작 필요!
+[[HIVE_METASTORE_URI]]=thrift://localhost:9083
+HIVE_METASTORE_URI='' // 수정 필요
+KAFKA_CONSUMER_GROUP_ID=nvkvs_redis_connector // 수정 필요
+KAFKA_CONSUMING_TOPIC_LIST=nvkvs3 // 수정 필요
+JOB_GENERATION_PERIOD=1
+MAX_RATE_PER_PARTITION=20000
+KAFKA_ERROR_REPORT_TOPIC_NAME=topic-error
+TEST_MODE=false
+EXCUTOR_LOCALITY=false
+
+kafka-console-producer.sh --broker-list localhost:9092 --topic {topic name} < {적재할 filename}
+
+하지만, kaetlyn 적재를 위해서는 메시지에 아래 헤더 정보가 포함되어야 한다.
+따라서 kafkacat이라는 tool을 통해 헤더 정보와 함께 producing을 해야 한다.(https://docs.confluent.io/3.3.0/app-development/kafkacat-usage.html# 참고)
+c++ compiler 설치
+
+$yum install gcc-c++
+
+
+
+$ git clone https://github.com/edenhill/librdkafka
+
+
+
+$ cd librdkafka
+
+$ ./configure
+
+$ make
+
+$ sudo make install
+
+
+
+ /usr/local/lib 로 이동해주어 다음 명령어 실행한다.
+
+
+
+$ git clone https://github.com/edenhill/kafkacat
+
+
+
+$ cd kafkacat
+
+$ ./configure
+
+$ make
+
+$ sudo make install
+
+
+
+Lib 파일을 찾을 수 없다면
+
+$ ldd kafkacat
+
+
+
+다음의 파일을 만들고 아래를 추가 /etc/ld.so.conf.d/usrlocal.conf
+
+Contents:
+
+/usr/local/lib
+
+
+
+저장 후 아래 명령어 실행
+
+$ ldconfig -v
+
+
+
+$kafkacat
+
+Kafkacat에 대한 명령어가 나오면 성공
+
+kafkacat이 정상 설치되었으면 아래와 같이 producing이 가능함
+file 하나만 적재할 경우
+kafkacat -b localhost:9092 -t {topic name} -T -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l {적재할 filename}
+
+2. dir에 있는 모든 파일을 적재할 때에는 해당 dir로 이동한 후에,
+ls | xargs -n 1 kafkacat -q -b localhost:9092 -t {topic name} -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l
+
+** 기본적인 가이드는 Kafka&Kaetlyn 설치에 있으므로 개념 이해를 위해서는 이 페이지를 참고하면 되지만 좀 더 편리하게 사용하기 위해 kafka-utils.sh를 제공하고 있어 운영 시에는 kafka-utils.sh를 사용하면 됨.
+'kafka-utils.sh'는 각 클러스터별 sbin에 있으므로, 'cfc'로 cluster 설정 후 사용이 가능함.
+[C:6][ltdb@d205 ~]$ which kafka-utils.sh
+~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/kafka-utils.sh
+
+아래와 같이 'CONSUMER_GROUP_ID'가 지정되어 있지 않으면 실행이 되지 않으므로,
+[C:6][ltdb@d205 ~]$ kafka-utils.sh help
+Please, set $CONSUMER_GROUP_ID first.
+
+아래와 같이 'kafka-utils.sh'를 열어서 수정을 해야 함.
+#!/bin/bash
+
+CONSUMER_GROUP_ID='nvkvs_redis_connector' // 수정 필요
+KAFKA_SERVER=localhost:9092
+ZOOKEEPER_SERVER=localhost:2181...
+
+'help'를 통해 가능한 커맨드를 확인할 수 있음.
+[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh help
+kafka-utils.sh offset-check
+kafka-utils.sh offset-monitor
+kafka-utils.sh offset-earliest topic_name
+kafka-utils.sh offset-latest topic_name
+kafka-utils.sh offset-move topic_name 10000
+kafka-utils.sh error-monitor error_topic_name
+kafka-utils.sh consumer-list
+kafka-utils.sh topic-check topic_name
+kafka-utils.sh topic-create topic_name 10
+kafka-utils.sh topic-delete topic_name
+kafka-utils.sh topic-config-check topic_name
+kafka-utils.sh topic-config-set topic_name config_name config_value
+kafka-utils.sh topic-config-remove topic_name config_name
+kafka-utils.sh topic-list
+kafka-utils.sh message-earliest topic_name
+kafka-utils.sh message-latest topic_name
+
+command에 args가 필요한 경우, args없이 입력하면 아래와 같이 가이드 문구가 나옴.
+[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh offset-move
+Please, specify topic name & the size of moving offset (ex) kafka-utils.sh offset-move my-topic 100
+[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh topic-create
+Please, specify topic name and its partition count. (ex) kafka-utils.sh topic-create topic-new 10
+[C:6][ltdb@d205 ~/kafka/config]$
+
+사용 예,
+[C:6][ltdb@d205 ~]$ kafka-utils.sh message-earliest nvkvs3
+20160711055950|ELG|2635055200|34317|5|6091|1|25|0|11|0|100.0|0.0|0|2846|3|33|0|5|0|-1000|0.0|0.0|94932|1027|0|176|35.2|40|0|7818000000|109816071|10|0|6000000.0|164843|2.75|0|2592|6000000|0.04|1288488|1303|1338|0|530|1|88.33|0|721|67948|428|0|1|108|108.0|108|0|0.0|0|0|0|-1000|1|1|100.0|62|39.0|62.9|23.0|37.1|0|0|0|0|29|10|-7022851.0|59998.0|-117.05|-6865443.5|59998.0|-114.43|4|198060.0|59998.0|22.5|3.3|0|1|5.82|3|1.94||0|0|0|0|0|0|0|0|4|0|0|0|15|14|231|140|0|0|0|0|0|0|0|0|4|0|0|0|15|13|174|110|1|0|0|0|0|0|0|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|0|0|0.0|0.0|0.0|0.0|0.0|0.0|570.0|0.0|3.0|0.0|0.0|0.0|0.0|2.0|3.0|3.0|0.0|15.73|0.0|0.0|0.0|0.0|0.0|12.0|22.0|68.0|83.0|339.0|205.0|144.0|54.0|38.0|12.0|0.0|0.0|0.0|0.0|0.0|0.0|100.0|50.55|1:22,2:7|1.0|||||1:1,17:1,23:1|13.67|0|0|0.0|0.0|-1000||-1000||-1000|11|2|05
+Processed a total of 1 messages
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-list
+__consumer_offsets
+nvkvs3
+topic-error
+topic_name
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-create ksh 18
+Created topic ksh.
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-check ksh
+Topic:ksh PartitionCount:18 ReplicationFactor:2 Configs:
+ Topic: ksh Partition: 0 Leader: 1 Replicas: 1,3 Isr: 1,3
+ Topic: ksh Partition: 1 Leader: 2 Replicas: 2,1 Isr: 2,1
+ Topic: ksh Partition: 2 Leader: 3 Replicas: 3,2 Isr: 3,2
+ Topic: ksh Partition: 3 Leader: 1 Replicas: 1,2 Isr: 1,2
+ Topic: ksh Partition: 4 Leader: 2 Replicas: 2,3 Isr: 2,3
+ Topic: ksh Partition: 5 Leader: 3 Replicas: 3,1 Isr: 3,1
+ Topic: ksh Partition: 6 Leader: 1 Replicas: 1,3 Isr: 1,3
+ Topic: ksh Partition: 7 Leader: 2 Replicas: 2,1 Isr: 2,1
+ Topic: ksh Partition: 8 Leader: 3 Replicas: 3,2 Isr: 3,2
+ Topic: ksh Partition: 9 Leader: 1 Replicas: 1,2 Isr: 1,2
+ Topic: ksh Partition: 10 Leader: 2 Replicas: 2,3 Isr: 2,3
+ Topic: ksh Partition: 11 Leader: 3 Replicas: 3,1 Isr: 3,1
+ Topic: ksh Partition: 12 Leader: 1 Replicas: 1,3 Isr: 1,3
+ Topic: ksh Partition: 13 Leader: 2 Replicas: 2,1 Isr: 2,1
+ Topic: ksh Partition: 14 Leader: 3 Replicas: 3,2 Isr: 3,2
+ Topic: ksh Partition: 15 Leader: 1 Replicas: 1,2 Isr: 1,2
+ Topic: ksh Partition: 16 Leader: 2 Replicas: 2,3 Isr: 2,3
+ Topic: ksh Partition: 17 Leader: 3 Replicas: 3,1 Isr: 3,1
+
+thriftserver를 사용하여 질의를 수행할 수 있으며, hive-metastore를 사용해서 메타정보를 관리할 수 있습니다.
+각 cluster에 thriftserver 가 있으며, 특정 클러스터에서 띄울 수 있습니다.
+> cfc 6
+> which thriftserver
+~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/thriftserver
+
+실행 전, 'thriftserver edit'을 통해 설정값을 변경합니다.
+#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14050}
+EXECUTERS=12 // 수정 필요
+EXECUTER_CORES=32 // 수정필요
+
+HIVE_METASTORE_URL=''
+HIVE_HOST=${HIVE_HOST:-localhost}
+HIVE_PORT=${HIVE_PORT:-13000}
+
+COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \
+-o -name 'geospark*' -o -name 'gt-*' | tr '\n' ':')
+
+###############################################################################
+# Driver
+DRIVER_MEMORY=6g // 수정 필요
+DRIVER_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Execute
+EXECUTOR_MEMORY=2g // 수정 필요
+EXECUTOR_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Thrift Server logs
+EVENT_LOG_ENABLED=false
+EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs
+EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling
+EVENT_LOG_SAVE_MIN=60
+EXTRACTED_EVENT_LOG_SAVE_DAY=5
+SPARK_LOG_SAVE_MIN=2000
+##############
+
+
+thriftserver start
+
+thriftserver stop
+
+> yarn application -list
+20/06/25 17:04:35 INFO client.RMProxy: Connecting to ResourceManager at d205/192.168.111.205:18032
+Total number of applications (application-types: [] and states: [SUBMITTED, ACCEPTED, RUNNING]):1
+ Application-Id Application-Name Application-Type User Queue State Final-State Progress Tracking-URL
+application_1592880218288_0002 ThriftServer_d205_6 SPARK ltdb default RUNNING UNDEFINED 10% http://d205:14050
+
+0: jdbc:hive2://0.0.0.0:13000> show tables;
++-----------+-----------------------------------------------+--------------+--+
+| database | tableName | isTemporary |
++-----------+-----------------------------------------------+--------------+--+
+| default | aua_adong_cd | false |
+| default | aua_aom_log | false |
+| default | aua_cell_by10sec_sum | false |
+| default | aua_cell_by5min_sum | false |
+| default | aua_cell_cfg_inf | false |
+| default | aua_enb_nsn_ho_log | false |
+| default | aua_enb_nsn_rrc_log | false |
+| default | aua_enb_ss_csl_log | false |
+| default | aua_ra_cscore_area_5min_sum | false |
+| default | aua_ue_rf_sum | false |
+| | aua_aom_log_6fbb17bb9718a46306ec7a9766464813 | true |
++-----------+-----------------------------------------------+--------------+--+
+11 rows selected (0.045 seconds)
+
+0: jdbc:hive2://0.0.0.0:13000> show tblproperties aua_ue_rf_sum;
++---------------------------------------------+-------------------------------------------------------------------------------------------------+--+
+| key | value |
++---------------------------------------------+-------------------------------------------------------------------------------------------------+--+
+| transient_lastDdlTime | 1581872791 |
+| fb.load.kafka.bootstrap.servers | 90.90.200.182:9092,90.90.200.183:9092,90.90.200.184:9092,90.90.200.185:9092,90.90.200.186:9092 |
+| fb.transformation.column.add.IMSI_KEY | ${IMSI_NO}.length() >= 2 && ${IMSI_NO}.substring(0, 2).equals("T1") ? "T1" : "O" |
+| fb.transformation.column.add.IMSI_HASH_KEY | fnvHash(${IMSI_NO}, 5) |
+| fb.load.kafka.producer.compression.type | zstd |
+| fb.transformation.column.add.EVENT_TIME | ${EVT_DTM}.length() < 12 ? "000000000000" : ${EVT_DTM}.substring(0, 11).concat("0") |
+| fb.load.kafka.topic.name | topic-tango-dev |
+| Comment | 단말별 분석 결과 |
+| fb.load.kafka.producer.max.request.size | 1048576 |
++---------------------------------------------+-------------------------------------------------------------------------------------------------+--+
+9 rows selected (0.1 seconds)
+0: jdbc:hive2://0.0.0.0:13000>
+
+0: jdbc:hive2://0.0.0.0:13000> show create table aua_ue_rf_sum;
++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+
+| createtab_stmt |
++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+
+| CREATE TABLE `aua_ue_rf_sum` (`EVT_DTM` STRING, `VEND_ID` STRING, `ADONG_CD` STRING, `ENB_ID` STRING, `CELL_ID` STRING, `ENB_UE_S1AP_ID` STRING, `MME_UE_S1AP_ID` STRING, `IMSI_NO` STRING, `EVT_ID` STRING, `CALL_RESULT_CODE` STRING, `CALL_RESULT_MSG` STRING, `FREQ_TYP_CD` STRING, `CQI` STRING, `TA` STRING, `RSRP` STRING, `RSRQ` STRING, `DL_PACKET_LOSS_SUCC_CNT` STRING, `DL_PACKET_LOSS_LOST_CNT` STRING, `DL_PACKET_LOSS_RATE` STRING, `SINR_PUSCH` STRING, `SINR_PUCCH` STRING, `UE_TX_POWER` STRING, `PHR` STRING, `UL_PACKET_LOSS_SUCC_CNT` STRING, `UL_PACKET_LOSS_LOST_CNT` STRING, `UL_PACKET_LOSS_RATE` STRING, `RRC_LATENCY` STRING, `HO_LATENCY` STRING, `RRE_LATENCY` STRING, `DL_NO_RTP` STRING, `UL_NO_RTP` STRING, `ERAB_LATENCY` STRING, `RRC_ERAB_LATENCY` STRING, `EVENT_TIME` STRING, `IMSI_KEY` STRING, `IMSI_HASH_KEY` STRING, `UE_ENDC_STAGE` STRING)
+USING r2
+OPTIONS (
+`query_result_partition_cnt_limit` '2000000',
+`query_response_timeout` '1200000',
+`query_result_task_row_cnt_limit` '1000000',
+`host` '90.90.200.187',
+`serialization.format` '1',
+`query_result_total_row_cnt_limit` '2147483647',
+`group_size` '10',
+`port` '18600',
+`mode` 'nvkvs',
+`partitions` 'EVENT_TIME ENB_ID IMSI_KEY IMSI_HASH_KEY',
+`second_filter_enabled` 'no',
+`table` '6010'
+)
+|
++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+
+1 row selected (0.027 seconds)
+
+0: jdbc:hive2://0.0.0.0:13000> select event_time, enb_id, vend_id from aua_ue_rf_sum limit 3;
+Error: com.skt.spark.r2.exception.UnsupportedQueryTypeException: at least one partition column should be included in where predicates; caused by 6010 table. You must check to exist partition column(s) of this table or mistype condition of partition column(s) in where clause. : EVENT_TIME,ENB_ID,IMSI_KEY,IMSI_HASH_KEY (state=,code=0)
+
+0: jdbc:hive2://0.0.0.0:13000> select event_time, enb_id, vend_id from aua_ue_rf_sum where event_time='202006250000' limit 3;
++---------------+---------------------------+----------+--+
+| event_time | enb_id | vend_id |
++---------------+---------------------------+----------+--+
+| 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG |
+| 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG |
+| 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG |
++---------------+---------------------------+----------+--+
+3 rows selected (0.149 seconds)
+0: jdbc:hive2://0.0.0.0:13000>
+
+Zeppelin 튜토리얼을 진행하기 전, Zeppelin에 Lightning DB 관련 Jar 파일들이 추가하도록 설정하였는지 확인해주세요.
+아래와 같이 튜토리얼 노트 파일을 내려받거나 아래 URL을 입력하여 Zeppelin에서 불러옵니다.
+You can update LightningDB by using the 'deploy' command.
+> c 1 // alias of 'cluster use 1'
+> deploy
+(Watch out) Cluster 1 is already deployed. Do you want to deploy again? (y/n) [n]
+y
+
+(1) Select installer
+Select installer
+
+ [ INSTALLER LIST ]
+ (1) lightningdb.release.master.5a6a38.bin
+ (2) lightningdb.trial.master.dbcb9e-dirty.bin
+ (3) lightningdb.trial.master.dbcb9e.bin
+
+Please enter the number, file path or URL of the installer you want to use.
+you can also add a file in list by copy to '$FBPATH/releases/'
+1
+OK, lightningdb.release.master.5a6a38.bin
+
+(2) Restore
+Do you want to restore conf? (y/n)
+y
+
+If the current settings will be reused, type 'y'.
+(3) Check all settings finally
++-----------------+---------------------------------------------------+
+| NAME | VALUE |
++-----------------+---------------------------------------------------+
+| installer | lightningdb.release.master.5a6a38.bin |
+| nodes | nodeA |
+| | nodeB |
+| | nodeC |
+| | nodeD |
+| master ports | 18100 |
+| slave ports | 18150-18151 |
+| ssd count | 3 |
+| redis data path | ~/sata_ssd/ssd_ |
+| redis db path | ~/sata_ssd/ssd_ |
+| flash db path | ~/sata_ssd/ssd_ |
++-----------------+---------------------------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+Check status of hosts...
++-----------+--------+
+| HOST | STATUS |
++-----------+--------+
+| nodeA | OK |
+| nodeB | OK |
+| nodeC | OK |
+| nodeD | OK |
++-----------+--------+
+Checking for cluster exist...
++------+--------+
+| HOST | STATUS |
++------+--------+
+Backup conf of cluster 1...
+OK, cluster_1_conf_bak_<time-stamp>
+Backup info of cluster 1 at nodeA...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeB...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeC...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeD...
+OK, cluster_1_bak_<time-stamp>
+Transfer installer and execute...
+ - nodeA
+ - nodeB
+ - nodeC
+ - nodeD
+Sync conf...
+Complete to deploy cluster 1.
+Cluster 1 selected.
+
+(4) Restart
+> cluster restart
+
+After the restart, the new version will be applied.
+ + + + + + +In Zeppelin, you can import the Face data
file with below link.
1. Put csv files into HDFS
+hdfs dfs -mkdir /face_data
+
+hdfs dfs -put /face_data/csv/* /face_data
+
+2. Load face data
+%spark
+import org.apache.spark.sql.functions._
+import org.apache.spark.sql.r2.UDF.R2UDFs
+
+val toFloatArray = udf(R2UDFs.toFloatArray)
+val fnvHash = udf(R2UDFs.fnvHash)
+
+R2UDFs.register()
+val r2Option = Map("table" -> "300",
+ "host" -> "d201",
+ "port" -> "18400",
+ "partitions" -> "nvhash",
+ "mode" -> "nvkvs",
+ "at_least_one_partition_enabled" -> "no")
+
+spark.sqlContext.read.format("csv")
+ .option("sep", "|")
+ .option("header", "false")
+ .option("inferSchema", "true")
+ .load("/face_data/csv/*.csv")
+ .withColumnRenamed("_c0", "group")
+ .withColumnRenamed("_c1", "subgroup")
+ .withColumnRenamed("_c2", "subject")
+ .withColumnRenamed("_c3", "face")
+ .withColumnRenamed("_c4", "raw_feature")
+ .withColumn("feature", toFloatArray(split(col("raw_feature"), ",")))
+ .select("group", "subgroup", "subject", "face", "feature")
+ .withColumn("nvhash", fnvHash(col("face"), functions.lit(30)))
+ .write
+ .format("r2")
+ .options(r2Options)
+ .mode("append")
+ .save()
+
+3. Create table
+%spark
+import org.apache.spark.sql.types._
+import org.apache.spark.sql.r2.UDF.R2UDFs
+
+val fields = "group_id,app_id,subject_id,face_id,feature,nvhash".split(",").map(
+ fieldName => {
+ if ("feature".equals(fieldName)) {
+ StructField(fieldName, ArrayType(FloatType))
+ } else {
+ StructField(fieldName, StringType)
+ }
+ }
+)
+val faceTableSchema = StructType(fields)
+
+
+spark.sqlContext.read.format("r2")
+ .schema(faceTableSchema)
+ .options(r2Option)
+ .load()
+ .createOrReplaceTempView("face_1m")
+
+4. Enable ‘KNN SEARCH PUSHDOWN’ feature
+%sql
+SET spark.r2.knn.pushdown=true
+
+5. KNN search with using the udf(cosineDistance) of Lightning DB
+%sql
+SELECT group_id, app_id, subject_id, face_id, cosineDistance(feature, toFloatArray(array(0.04074662,0.07717144,-0.01387950,0.01287790,0.04414229,0.03390900,0.03808868,0.03956917,0.00592308,-0.00935156,0.04414903,-0.01830893,-0.01918902,0.00154574,-0.02383651,-0.01054291,0.12655860,0.02075430,0.10315673,0.01371782,0.01522089,0.04304991,0.03376650,0.06703991,0.03827063,-0.00063873,0.02516229,0.07061137,0.08456459,-0.04103030,0.03004974,0.00297395,0.00295535,0.01112351,0.02805021,0.04350155,-0.00448326,0.04780317,0.10815978,-0.01784242,0.03320745,0.02912348,0.00183310,0.05318154,0.00922967,-0.04507693,0.01333585,0.00048346,-0.04612860,0.00427735,0.01232839,-0.00100568,0.03865110,0.01765136,-0.00942758,0.02383475,-0.01068696,0.08959154,0.08527020,0.03379998,-0.03852739,0.00607160,0.01309861,-0.01262910,0.00418265,0.03614477,-0.02971224,0.03703139,0.04333942,-0.03143747,0.06752674,-0.02173617,0.03583429,0.07731125,-0.02637132,-0.00395790,-0.04447101,0.03351297,0.08787052,0.00647665,0.03660145,-0.00640601,-0.01004024,0.00763107,0.04762240,-0.00068071,0.00863653,0.06126453,0.04588475,-0.03891076,0.07472295,0.02470182,0.08828569,0.01660202,0.02419317,0.09363404,0.05495216,0.01202847,0.00120040,-0.02136896,0.03100559,0.07371868,0.00986731,0.03934553,0.01289396,0.04705510,-0.02040175,0.01501585,0.00678832,0.03882410,0.02261387,0.02165552,-0.05097445,0.00240807,-0.04210005,0.00760698,-0.02904095,0.06572093,0.03549200,0.06070529,0.06948626,0.02832109,0.01669887,0.00914011,-0.00024529,-0.00124402,0.06481186,0.08246713,0.07499877,0.13112830,0.01034968,0.04224777,0.01175614,0.07395388,0.04937797,0.01561183,-0.03251049,0.05449009,0.04767901,-0.01149555,-0.02055555,-0.05990825,0.06633005,0.07592525,-0.04504610,0.03348448,0.04178635,0.01327751,0.02208084,0.08780535,-0.00799043,0.02236966,0.01560906,0.01171102,0.00814554,-0.00257578,0.08387835,-0.01018093,-0.02170087,0.03230520,0.00955880,-0.01824543,0.05438962,0.01805668,0.02112979,-0.01372666,-0.01057472,0.05453142,0.03742066,0.05534794,0.00977020,0.01991821,-0.00884413,0.09644359,0.02875310,0.10519003,0.05280351,-0.01918891,0.03197290,0.02722778,0.03450845,0.02669794,0.08618007,0.09387484,0.05103674,-0.01431658,0.00783211,-0.00434245,0.02062620,-0.00611403,0.06696083,0.01333337,-0.00156842,0.04325287,-0.05481976,0.01642864,-0.02679648,-0.00642413,0.03808333,0.06134293,0.06049823,0.03818581,0.03599750,-0.01651556,0.06601544,0.01385061,0.00730943,0.03045858,-0.00200028,0.04009718,0.04393080,-0.02568381,-0.01271287,-0.01860873,0.03669106,0.00154059,-0.04202117,0.07374570,-0.00380450,0.03164477,0.00637422,-0.02361638,0.01918917,0.01680134,0.01346881,0.02424449,-0.00504802,-0.06241146,0.08241857,0.02817723,0.02132487,0.08051144,0.06332499,0.02585857,-0.04057337,0.00279212,-0.00005161,-0.06566417,0.07860317,-0.01276221,0.06822366,-0.00191142,0.08534018,0.06014366,0.07053877,-0.01962799,0.08602677,-0.00817098,0.00302233,-0.10041475,-0.01908947,0.03235617,0.00931559,0.05451865,0.02233902,-0.01173994))) AS distance
+FROM face_1m
+ORDER BY distance DESC
+LIMIT 20
+
+
+
+
+
+
+
+ In Zeppelin, you can import the NYC TAXI Benchmark
file with below link.
NYC_TAXI_BM_load_and_query.json
+1. Put csv files into HDFS
+%sh
+hdfs dfs -mkdir /nyc_taxi
+
+hdfs dfs -mkdir /nyc_taxi/csv
+
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv1/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv2/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv3/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv4/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv5/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv6/* /nyc_taxi/csv
+
+2. Create dataframe and load data
+%spark
+
+import org.apache.spark.sql.types._
+
+val taxiSchema = StructType(Array(
+ StructField("trip_id", IntegerType, true),
+ StructField("vendor_id", StringType, true),
+ StructField("pickup_datetime", TimestampType, true),
+ StructField("dropoff_datetime", TimestampType, true),
+ StructField("store_and_fwd_flag", StringType, true),
+ StructField("rate_code_id", IntegerType, true),
+ StructField("pickup_longitude", DoubleType, true),
+ StructField("pickup_latitude", DoubleType, true),
+ StructField("dropoff_longitude", DoubleType, true),
+ StructField("dropoff_latitude", DoubleType, true),
+ StructField("passenger_count", StringType, true),
+ StructField("trip_distance", DoubleType, true),
+ StructField("fare_amount", DoubleType, true),
+ StructField("extra", DoubleType, true),
+ StructField("mta_tax", DoubleType, true),
+ StructField("tip_amount", DoubleType, true),
+ StructField("tolls_amount", DoubleType, true),
+ StructField("improvement_surcharge", DoubleType, true),
+ StructField("total_amount", DoubleType, true),
+ StructField("payment_type", StringType, true),
+ StructField("trip_type", IntegerType, true),
+ StructField("cab_type", StringType, true),
+ StructField("precipitation", DoubleType, true),
+ StructField("snow_depth", DoubleType, true),
+ StructField("snowfall", DoubleType, true),
+ StructField("max_temperature", IntegerType, true),
+ StructField("min_temperature", IntegerType, true),
+ StructField("average_wind_speed", DoubleType, true),
+ StructField("pickup_nyct2010_gid", IntegerType, true),
+ StructField("pickup_ctlabel", StringType, true),
+ StructField("pickup_borocode", IntegerType, true),
+ StructField("pickup_boroname", StringType, true),
+ StructField("pickup_ct2010", StringType, true),
+ StructField("pickup_boroct2010", StringType, true),
+ StructField("pickup_cdeligibil", StringType, true),
+ StructField("pickup_ntacode", StringType, true),
+ StructField("pickup_ntaname", StringType, true),
+ StructField("pickup_puma", StringType, true),
+ StructField("dropoff_nyct2010_gid", IntegerType, true),
+ StructField("dropoff_ctlabel", StringType, true),
+ StructField("dropoff_borocode", IntegerType, true),
+ StructField("dropoff_boroname", StringType, true),
+ StructField("dropoff_ct2010", IntegerType, true),
+ StructField("dropoff_boroct2010", StringType, true),
+ StructField("dropoff_cdeligibil", StringType, true),
+ StructField("dropoff_ntacode", StringType, true),
+ StructField("dropoff_ntaname", StringType, true),
+ StructField("dropoff_puma", StringType, true)
+ ))
+
+ val taxiDF = spark.read.format("csv")
+ .option("header", "false")
+ .option("delimiter", ",")
+ .option("mode", "FAILFAST")
+ .schema(taxiSchema)
+ .load("/nyc_taxi/csv/*.csv.gz")
+
+
+4. Create temp view for the dataframe
+%spark
+taxiDF.createOrReplaceTempView("trips")
+
+5. Transform the dataframe for Lightning DB
+%spark
+import org.apache.spark.sql.functions._
+val deltaDf = taxiDF
+ .filter($"pickup_datetime".isNotNull && $"passenger_count".isNotNull && $"cab_type".isNotNull)
+ .withColumn("pickup_yyyyMMddhh", from_unixtime(unix_timestamp($"pickup_datetime"), "yyyyMMddhh"))
+ .withColumn("round_trip_distance", round($"trip_distance"))
+
+deltaDf.printSchema()
+
+6. Create temp view for Lightning DB with r2 options those support Lightning DB as the data source
+%spark
+val r2Options = Map[String, String]("table" -> "100",
+ "host" -> "192.168.111.35",
+ "port" -> "18800",
+ "partitions" -> "pickup_yyyyMMddhh passenger_count cab_type",
+ "mode" -> "nvkvs",
+ "rowstore" -> "false",
+ "group_size" -> "40",
+ "at_least_one_partition_enabled" -> "no")
+spark.sqlContext.read.format("r2").schema(deltaDf.schema).options(r2Options).load().createOrReplaceTempView("fb_trips")
+
+7. Load data from the dataframe into Lightning DB
+%spark
+deltaDf.write
+ .format("r2")
+ .insertInto("fb_trips")
+
+8. Enable ‘aggregation pushdown’ feature
+SET spark.r2.aggregation.pushdown=true
+
+9. Do ‘NYC TAXI Benchmark’
+Q1
+%sql
+SELECT cab_type, count(*) FROM fb_trips GROUP BY cab_type
+
+Q2
+%sql
+SELECT passenger_count,
+ avg(total_amount)
+FROM fb_trips
+GROUP BY passenger_count
+
+Q3
+%sql
+SELECT passenger_count,
+ substring(pickup_yyyyMMddhh, 1, 4),
+ count(*)
+FROM fb_trips
+GROUP BY passenger_count,
+ substring(pickup_yyyyMMddhh, 1, 4)
+
+Q4
+%sql
+SELECT passenger_count,
+ substring(pickup_yyyyMMddhh, 1, 4),
+ round_trip_distance,
+ count(*)
+FROM fb_trips
+GROUP BY 1,
+ 2,
+ 3
+ORDER BY 2,
+ 4 desc
+
+
+
+
+
+
+
+