diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..59f9267 --- /dev/null +++ b/404.html @@ -0,0 +1,860 @@ + + + + + + + + + + + + + + + + + + + + + + Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..09dbb47 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +docs.lightningdb.io diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000..1cf13b9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.51d95adb.min.js b/assets/javascripts/bundle.51d95adb.min.js new file mode 100644 index 0000000..b20ec68 --- /dev/null +++ b/assets/javascripts/bundle.51d95adb.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Hi=Object.create;var xr=Object.defineProperty;var Pi=Object.getOwnPropertyDescriptor;var $i=Object.getOwnPropertyNames,kt=Object.getOwnPropertySymbols,Ii=Object.getPrototypeOf,Er=Object.prototype.hasOwnProperty,an=Object.prototype.propertyIsEnumerable;var on=(e,t,r)=>t in e?xr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))Er.call(t,r)&&on(e,r,t[r]);if(kt)for(var r of kt(t))an.call(t,r)&&on(e,r,t[r]);return e};var sn=(e,t)=>{var r={};for(var n in e)Er.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&kt)for(var n of kt(e))t.indexOf(n)<0&&an.call(e,n)&&(r[n]=e[n]);return r};var Ht=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Fi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of $i(t))!Er.call(e,o)&&o!==r&&xr(e,o,{get:()=>t[o],enumerable:!(n=Pi(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Hi(Ii(e)):{},Fi(t||!e||!e.__esModule?xr(r,"default",{value:e,enumerable:!0}):r,e));var fn=Ht((wr,cn)=>{(function(e,t){typeof wr=="object"&&typeof cn!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(wr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function f(T){var Ke=T.type,We=T.tagName;return!!(We==="INPUT"&&a[Ke]&&!T.readOnly||We==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function c(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function u(T){T.hasAttribute("data-focus-visible-added")&&(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function p(T){T.metaKey||T.altKey||T.ctrlKey||(s(r.activeElement)&&c(r.activeElement),n=!0)}function m(T){n=!1}function d(T){s(T.target)&&(n||f(T.target))&&c(T.target)}function h(T){s(T.target)&&(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(T.target))}function v(T){document.visibilityState==="hidden"&&(o&&(n=!0),B())}function B(){document.addEventListener("mousemove",z),document.addEventListener("mousedown",z),document.addEventListener("mouseup",z),document.addEventListener("pointermove",z),document.addEventListener("pointerdown",z),document.addEventListener("pointerup",z),document.addEventListener("touchmove",z),document.addEventListener("touchstart",z),document.addEventListener("touchend",z)}function re(){document.removeEventListener("mousemove",z),document.removeEventListener("mousedown",z),document.removeEventListener("mouseup",z),document.removeEventListener("pointermove",z),document.removeEventListener("pointerdown",z),document.removeEventListener("pointerup",z),document.removeEventListener("touchmove",z),document.removeEventListener("touchstart",z),document.removeEventListener("touchend",z)}function z(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(n=!1,re())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),B(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var un=Ht(Sr=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},a=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(re,z){d.append(z,re)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(T){throw new Error("URL unable to set base "+c+" due to "+T)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,B=!0,re=this;["append","delete","set"].forEach(function(T){var Ke=h[T];h[T]=function(){Ke.apply(h,arguments),v&&(B=!1,re.search=h.toString(),B=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var z=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==z&&(z=this.search,B&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},a=i.prototype,s=function(f){Object.defineProperty(a,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){s(f)}),Object.defineProperty(a,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(a,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr)});var Qr=Ht((Lt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Lt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Lt=="object"?Lt.ClipboardJS=r():t.ClipboardJS=r()})(Lt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return ki}});var a=i(279),s=i.n(a),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(O){return!1}}var d=function(O){var w=p()(O);return m("cut"),w},h=d;function v(j){var O=document.documentElement.getAttribute("dir")==="rtl",w=document.createElement("textarea");w.style.fontSize="12pt",w.style.border="0",w.style.padding="0",w.style.margin="0",w.style.position="absolute",w.style[O?"right":"left"]="-9999px";var k=window.pageYOffset||document.documentElement.scrollTop;return w.style.top="".concat(k,"px"),w.setAttribute("readonly",""),w.value=j,w}var B=function(O,w){var k=v(O);w.container.appendChild(k);var F=p()(k);return m("copy"),k.remove(),F},re=function(O){var w=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},k="";return typeof O=="string"?k=B(O,w):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?k=B(O.value,w):(k=p()(O),m("copy")),k},z=re;function T(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?T=function(w){return typeof w}:T=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},T(j)}var Ke=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},w=O.action,k=w===void 0?"copy":w,F=O.container,q=O.target,Le=O.text;if(k!=="copy"&&k!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&T(q)==="object"&&q.nodeType===1){if(k==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(k==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Le)return z(Le,{container:F});if(q)return k==="cut"?h(q):z(q,{container:F})},We=Ke;function Ie(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(w){return typeof w}:Ie=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},Ie(j)}function Ti(j,O){if(!(j instanceof O))throw new TypeError("Cannot call a class as a function")}function nn(j,O){for(var w=0;w0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof F.action=="function"?F.action:this.defaultAction,this.target=typeof F.target=="function"?F.target:this.defaultTarget,this.text=typeof F.text=="function"?F.text:this.defaultText,this.container=Ie(F.container)==="object"?F.container:document.body}},{key:"listenClick",value:function(F){var q=this;this.listener=c()(F,"click",function(Le){return q.onClick(Le)})}},{key:"onClick",value:function(F){var q=F.delegateTarget||F.currentTarget,Le=this.action(q)||"copy",Rt=We({action:Le,container:this.container,target:this.target(q),text:this.text(q)});this.emit(Rt?"success":"error",{action:Le,text:Rt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(F){return yr("action",F)}},{key:"defaultTarget",value:function(F){var q=yr("target",F);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(F){return yr("text",F)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(F){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return z(F,q)}},{key:"cut",value:function(F){return h(F)}},{key:"isSupported",value:function(){var F=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof F=="string"?[F]:F,Le=!!document.queryCommandSupported;return q.forEach(function(Rt){Le=Le&&!!document.queryCommandSupported(Rt)}),Le}}]),w}(s()),ki=Ri},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,f){for(;s&&s.nodeType!==o;){if(typeof s.matches=="function"&&s.matches(f))return s;s=s.parentNode}}n.exports=a},438:function(n,o,i){var a=i(828);function s(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?s.apply(null,arguments):typeof m=="function"?s.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return s(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=a(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(n,o,i){var a=i(879),s=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(h))throw new TypeError("Third argument must be a Function");if(a.node(m))return c(m,d,h);if(a.nodeList(m))return u(m,d,h);if(a.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return s(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),a=f.toString()}return a}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,a,s){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var f=this;function c(){f.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=s.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var is=/["'&<>]/;Jo.exports=as;function as(e){var t=""+e,r=is.exec(t);if(!r)return t;var n,o="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],a;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(s){a={error:s}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||s(m,d)})})}function s(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof Xe?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){s("next",m)}function u(m){s("throw",m)}function p(m,d){m(d),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof xe=="function"?xe(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(a){return new Promise(function(s,f){a=e[i](a),o(s,f,a.done,a.value)})}}function o(i,a,s,f){Promise.resolve(f).then(function(c){i({value:c,done:s})},a)}}function A(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var $t=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function De(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=xe(a),f=s.next();!f.done;f=s.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var u=this.initialTeardown;if(A(u))try{u()}catch(v){i=v instanceof $t?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=xe(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{dn(h)}catch(v){i=i!=null?i:[],v instanceof $t?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new $t(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)dn(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&De(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&De(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Or=Fe.EMPTY;function It(e){return e instanceof Fe||e&&"closed"in e&&A(e.remove)&&A(e.add)&&A(e.unsubscribe)}function dn(e){A(e)?e():e.unsubscribe()}var Ae={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,a=o.isStopped,s=o.observers;return i||a?Or:(this.currentObservers=null,s.push(r),new Fe(function(){n.currentObservers=null,De(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,a=n.isStopped;o?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new U;return r.source=this,r},t.create=function(r,n){return new wn(r,n)},t}(U);var wn=function(e){ne(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Or},t}(E);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ne(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,a=n._infiniteTimeWindow,s=n._timestampProvider,f=n._windowTime;o||(i.push(r),!a&&i.push(s.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,a=o._buffer,s=a.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var a=r.actions;n!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Ut);var On=function(e){ne(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Wt);var we=new On(Tn);var R=new U(function(e){return e.complete()});function Dt(e){return e&&A(e.schedule)}function kr(e){return e[e.length-1]}function Qe(e){return A(kr(e))?e.pop():void 0}function Se(e){return Dt(kr(e))?e.pop():void 0}function Vt(e,t){return typeof kr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function zt(e){return A(e==null?void 0:e.then)}function Nt(e){return A(e[ft])}function qt(e){return Symbol.asyncIterator&&A(e==null?void 0:e[Symbol.asyncIterator])}function Kt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ki(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Qt=Ki();function Yt(e){return A(e==null?void 0:e[Qt])}function Gt(e){return ln(this,arguments,function(){var r,n,o,i;return Pt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,Xe(r.read())];case 3:return n=a.sent(),o=n.value,i=n.done,i?[4,Xe(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,Xe(o)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Bt(e){return A(e==null?void 0:e.getReader)}function $(e){if(e instanceof U)return e;if(e!=null){if(Nt(e))return Qi(e);if(pt(e))return Yi(e);if(zt(e))return Gi(e);if(qt(e))return _n(e);if(Yt(e))return Bi(e);if(Bt(e))return Ji(e)}throw Kt(e)}function Qi(e){return new U(function(t){var r=e[ft]();if(A(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Yi(e){return new U(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?_(function(o,i){return e(o,i,n)}):me,Oe(1),r?He(t):zn(function(){return new Xt}))}}function Nn(){for(var e=[],t=0;t=2,!0))}function fe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new E}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,f=s===void 0?!0:s;return function(c){var u,p,m,d=0,h=!1,v=!1,B=function(){p==null||p.unsubscribe(),p=void 0},re=function(){B(),u=m=void 0,h=v=!1},z=function(){var T=u;re(),T==null||T.unsubscribe()};return g(function(T,Ke){d++,!v&&!h&&B();var We=m=m!=null?m:r();Ke.add(function(){d--,d===0&&!v&&!h&&(p=jr(z,f))}),We.subscribe(Ke),!u&&d>0&&(u=new et({next:function(Ie){return We.next(Ie)},error:function(Ie){v=!0,B(),p=jr(re,o,Ie),We.error(Ie)},complete:function(){h=!0,B(),p=jr(re,a),We.complete()}}),$(T).subscribe(u))})(c)}}function jr(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function V(e,t=document){let r=se(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function se(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),N(e===_e()),Y())}function Be(e){return{x:e.offsetLeft,y:e.offsetTop}}function Yn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,we),l(()=>Be(e)),N(Be(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,we),l(()=>rr(e)),N(rr(e)))}var Bn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!zr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),xa?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!zr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ya.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Jn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Zn=typeof WeakMap!="undefined"?new WeakMap:new Bn,eo=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=Ea.getInstance(),n=new Ra(t,r,this);Zn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){eo.prototype[e]=function(){var t;return(t=Zn.get(this))[e].apply(t,arguments)}});var ka=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:eo}(),to=ka;var ro=new E,Ha=I(()=>H(new to(e=>{for(let t of e)ro.next(t)}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function de(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){return Ha.pipe(S(t=>t.observe(e)),x(t=>ro.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(()=>de(e)))),N(de(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var no=new E,Pa=I(()=>H(new IntersectionObserver(e=>{for(let t of e)no.next(t)},{threshold:0}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function sr(e){return Pa.pipe(S(t=>t.observe(e)),x(t=>no.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function oo(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=de(e),o=bt(e);return r>=o.height-n.height-t}),Y())}var cr={drawer:V("[data-md-toggle=drawer]"),search:V("[data-md-toggle=search]")};function io(e){return cr[e].checked}function qe(e,t){cr[e].checked!==t&&cr[e].click()}function je(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),N(t.checked))}function $a(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ia(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(N(!1))}function ao(){let e=b(window,"keydown").pipe(_(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:io("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),_(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!$a(n,r)}return!0}),fe());return Ia().pipe(x(t=>t?R:e))}function Me(){return new URL(location.href)}function ot(e){location.href=e.href}function so(){return new E}function co(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)co(e,r)}function M(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)co(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function fo(){return location.hash.substring(1)}function uo(e){let t=M("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Fa(){return b(window,"hashchange").pipe(l(fo),N(fo()),_(e=>e.length>0),J(1))}function po(){return Fa().pipe(l(e=>se(`[id="${e}"]`)),_(e=>typeof e!="undefined"))}function Nr(e){let t=matchMedia(e);return Zt(r=>t.addListener(()=>r(t.matches))).pipe(N(t.matches))}function lo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(N(e.matches))}function qr(e,t){return e.pipe(x(r=>r?t():R))}function ur(e,t={credentials:"same-origin"}){return ve(fetch(`${e}`,t)).pipe(ce(()=>R),x(r=>r.status!==200?Tt(()=>new Error(r.statusText)):H(r)))}function Ue(e,t){return ur(e,t).pipe(x(r=>r.json()),J(1))}function mo(e,t){let r=new DOMParser;return ur(e,t).pipe(x(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),J(1))}function pr(e){let t=M("script",{src:e});return I(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(x(()=>Tt(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),C(()=>document.head.removeChild(t)),Oe(1))))}function ho(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function bo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(ho),N(ho()))}function vo(){return{width:innerWidth,height:innerHeight}}function go(){return b(window,"resize",{passive:!0}).pipe(l(vo),N(vo()))}function yo(){return Q([bo(),go()]).pipe(l(([e,t])=>({offset:e,size:t})),J(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(X("size")),o=Q([n,r]).pipe(l(()=>Be(e)));return Q([r,t,o]).pipe(l(([{height:i},{offset:a,size:s},{x:f,y:c}])=>({offset:{x:a.x-f,y:a.y-c+i},size:s})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(a=>{let s=document.createElement("script");s.src=i,s.onload=a,document.body.appendChild(s)})),Promise.resolve())}var r=class{constructor(n){this.url=n,this.onerror=null,this.onmessage=null,this.onmessageerror=null,this.m=a=>{a.source===this.w&&(a.stopImmediatePropagation(),this.dispatchEvent(new MessageEvent("message",{data:a.data})),this.onmessage&&this.onmessage(a))},this.e=(a,s,f,c,u)=>{if(s===this.url.toString()){let p=new ErrorEvent("error",{message:a,filename:s,lineno:f,colno:c,error:u});this.dispatchEvent(p),this.onerror&&this.onerror(p)}};let o=new EventTarget;this.addEventListener=o.addEventListener.bind(o),this.removeEventListener=o.removeEventListener.bind(o),this.dispatchEvent=o.dispatchEvent.bind(o);let i=document.createElement("iframe");i.width=i.height=i.frameBorder="0",document.body.appendChild(this.iframe=i),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Awards and Recognition

+

2023

+

SK TECH SUMMIT 2023 IMAGE를 제대로 이해하는 ‘AI’는 없나요? (Vision-Language Model을 활용한 SKT만의 Vision Data Asset 구축기)

+
    +
  • Speaker: Sungho Kim, Jiwon Ryu(SK Telecom)
  • +
+

2022

+

NVIDIA GTC 22 Vision data warehouse and accelerating the analytics for massive vision data

+
    +
  • Speaker: Sungho Kim(SK Telecom), Allen Xu(NVIDIA)
  • +
+

2021

+

NVIDIA GTC 21 Deep-Learning Data-Pipeline Optimization for Network Data Analysis in SK Telecom by Employing Spark Rapids for Custom Data Source

+
    +
  • Speaker: Dooyoung Hwan(SK Telecom), Thomas Graves(NVIDIA)
  • +
+

2020

+

Spark AI Summit 2020 Vectorized Deep Learning Acceleration from Preprocessing to Inference and Training on Apache Spark in SK Telecom

+
    +
  • Speaker: Hongchan Roh(SK Telecom)
  • +
+

2019

+

Spark AI Summit Europe 2019 Spark AI Usecase in Telco: Network Quality Analysis and Prediction with Geospatial Visualization

+
    +
  • Speaker: Hongchan Roh, Dooyoung Hwang(SK Telecom)
  • +
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/build-lightningdb-on-k8s/index.html b/build-lightningdb-on-k8s/index.html new file mode 100644 index 0000000..cb9f90e --- /dev/null +++ b/build-lightningdb-on-k8s/index.html @@ -0,0 +1,1167 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Build LightningDB(Admin Only) - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Build 'LightningDB' (Admin Only)

+

1. LightningDB Source Code(Private Repository)

+
$ git clone https://github.com/mnms/LightningDB
+
+

2. Build

+

- v1

+
    +
  • Branch: release/flashbase_v1.4.3
  • +
  • Commands:
  • +
+
$ ./build.sh compile
+
+
$ cd nvkvs
+$ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3
+$ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3
+
+

- v2

+
    +
  • Branch: release/flashbase_v2.0.0
  • +
  • Commands:
  • +
+
$ ./build.sh compile debug
+
+
$ cd nvkvs
+$ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0
+$ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0
+
+

- v2 CXL-CMS

+
    +
  • Branch: cms-integration
  • +
  • Prerequisite(install daxctl):
  • +
+
$ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch
+
+
// json-c (version: json-c-0.14-20200419)
+
+$ git clone https://github.com/json-c/json-c.git
+$ cd json-c
+$ git checkout json-c-0.14-20200419 -b json-c-0.14-20200419
+$ mkdir json-c-build
+$ cd json-c-build/
+$ cmake ../
+$ make -j48
+
+
//Required Min Version: v75 (latest version: v78)
+
+$ git clone https://github.com/pmem/ndctl
+$ git checkout v75 -b v75
+$ meson setup build;
+$ meson compile -C build;
+$ meson install -C build;
+
+
    +
  • Commands:
  • +
+
$ ./build.sh compile debug
+
+
// dax-ctl 을 이미지 base 경로에 설치 필요
+// 컴파일 작업 디렉토리가 "/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output"라 가정
+// ndctl github 컴파일 디렉토리로 이동
+
+$ cd ndctl
+$ rm -rf build
+$ meson -Drootprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output -Dlibdir=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output/lib build -Dprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output
+$ meson compile -C build;
+$ meson install -C build;
+
+
$ cd nvkvs
+$ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration
+$ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration
+
+
+

Tip

+

How to use maximum cores to compile (e.g. max cpu core:56)

+

In 'build.sh', use cmake --build . --target install -- -j56 and mvn clean install -DskipTests -P $RELEASE_MODE $MAVEN_OPTS -T 56

+
+

Build 'ltdb-http API Server' (Admin Only)

+

1. ltdb-http Source Code(Private Repository)

+
$ git clone https://github.com/mnms/ltdb-http
+
+

2. Build

+

- v1

+
    +
  • Branch: develop
  • +
  • Commands:
  • +
+
$ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision
+$ cd target-k8s
+$ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz
+$ cd ltdb-http
+$ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop
+$ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop
+
+

- v2 / v2 CXL-CMS

+
    +
  • Branch: develop-v2
  • +
  • Commands:
  • +
+
$ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision
+$ cd target-k8s
+$ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz
+$ cd ltdb-http
+$ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2
+$ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2
+
+

Build 'Thunderquery API Server' (Admin Only)

+

1. Thunderquery Source Code(Private Repository)

+
$ git clone https://github.com/mnms/thunderquery_api
+$ git clone https://github.com/mnms/thunderquery-cli
+
+

2. Build

+
    +
  • Branch: develop
  • +
  • Prerequisite(install musl-gcc):
  • +
+
$ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch
+
+
$ vi /etc/yum.repos.d/cert-forensics-tools.repo
+
+[cert-forensics-tools]
+name=Cert Forensics Tools Repository
+baseurl=https://forensics.cert.org/centos/cert/8/x86_64/
+enabled=1
+gpgcheck=1
+gpgkey=https://forensics.cert.org/forensics.asc
+
+$ yum clean all
+$ yum makecache
+$ yum install musl-gcc.x86_64
+
+
    +
  • Register public key to github
  • +
+
$ cat ~/.ssh/id_rsa.pub
+
+
    +
  • Command:
  • +
+
$ vi ~/.cargo/config.toml
+
+[net]
+git-fetch-with-cli = true
+
+$ cd thunderquery_api
+$ cargo install --path . --target=x86_64-unknown-linux-musl
+$ cd thunderquery-cli
+$ cargo install --path . --target=x86_64-unknown-linux-musl
+
+
+
$ cd thunderquery_api
+
+## thunderquery-cli binary 를 api 디렉토리로 복사 ##
+$ cp ../thunderquery-cli/target/x86_64-unknown-linux-musl/release/thunderquery-cli target/x86_64-unknown-linux-musl/release
+
+$ docker build . -t harbor.k8s.lightningdb/ltdb/thunderquery_api:develop
+$ docker push harbor.k8s.lightningdb/ltdb/thunderquery_api:develop
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/cli-cli/index.html b/cli-cli/index.html new file mode 100644 index 0000000..98f9300 --- /dev/null +++ b/cli-cli/index.html @@ -0,0 +1,1046 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Redis3 cli (LightningDB v1.x) - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

1. ping

+

You can use ping command to check the status of the nodes.

+

Options

+
    +
  • All nodes
      +
    • cli ping --all
    • +
    +
  • +
  • A single node
      +
    • cli ping {hostname} {port}
    • +
    +
  • +
+

Examples

+
matthew@lightningdb:21> cli ping --all
+alive redis 12/12
+
+matthew@lightningdb:21> cli ping myServer 20101
+PONG
+
+

2. config

+

You can read or write the configuration values of the current cluster.

+

Options

+
    +
  • Read
      +
    • All nodes
        +
      • cli config get {feature name} --all
      • +
      +
    • +
    • A sing node
        +
      • cli config get -h {hostname} -p {port}
      • +
      +
    • +
    +
  • +
  • Write
      +
    • All nodes
        +
      • cli config set {feature name} {value} --all
      • +
      +
    • +
    • A sing node
        +
      • cli config set {feature name} {value} -h {hostname} -p {port}
      • +
      +
    • +
    +
  • +
+

Examples

+
    +
  • Read and write the configuration value of all nodes.
  • +
+
matthew@lightningdb:21> cli config get maxmemory --all
++--------+----------------------+--------+
+| TYPE   | ADDR                 | RESULT |
++--------+----------------------+--------+
+| Master | 192.168.111.41:20100 | 300mb  |
+| Master | 192.168.111.41:20101 | 300mb  |
+| Master | 192.168.111.41:20102 | 300mb  |
+| Master | 192.168.111.44:20100 | 300mb  |
+| Master | 192.168.111.44:20101 | 300mb  |
+| Master | 192.168.111.44:20102 | 300mb  |
+| Slave  | 192.168.111.41:20150 | 300mb  |
+| Slave  | 192.168.111.41:20151 | 300mb  |
+| Slave  | 192.168.111.41:20152 | 300mb  |
+| Slave  | 192.168.111.44:20150 | 300mb  |
+| Slave  | 192.168.111.44:20151 | 300mb  |
+| Slave  | 192.168.111.44:20152 | 300mb  |
++--------+----------------------+--------+
+matthew@lightningdb:21> cli config set maxmemory 500mb --all
+success 12/12
+matthew@lightningdb:21> cli config get maxmemory --all
++--------+----------------------+--------+
+| TYPE   | ADDR                 | RESULT |
++--------+----------------------+--------+
+| Master | 192.168.111.41:20100 | 500mb  |
+| Master | 192.168.111.41:20101 | 500mb  |
+| Master | 192.168.111.41:20102 | 500mb  |
+| Master | 192.168.111.44:20100 | 500mb  |
+| Master | 192.168.111.44:20101 | 500mb  |
+| Master | 192.168.111.44:20102 | 500mb  |
+| Slave  | 192.168.111.41:20150 | 500mb  |
+| Slave  | 192.168.111.41:20151 | 500mb  |
+| Slave  | 192.168.111.41:20152 | 500mb  |
+| Slave  | 192.168.111.44:20150 | 500mb  |
+| Slave  | 192.168.111.44:20151 | 500mb  |
+| Slave  | 192.168.111.44:20152 | 500mb  |
++--------+----------------------+--------+
+
+
    +
  • Read and write the configuration value of a single node.
  • +
+
matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101
+500mb
+matthew@lightningdb:21> cli config set maxmemory 300mb -h myServer -p 20101
+OK
+matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101
+300mb
+matthew@lightningdb:21>
+
+

3. cluster info

+

You can get the information and stats of the current cluster.

+
matthew@lightningdb:21> cli cluster info
+cluster_state:ok
+cluster_slots_assigned:16384
+cluster_slots_ok:16384
+cluster_slots_pfail:0
+cluster_slots_fail:0
+cluster_known_nodes:12
+cluster_size:6
+cluster_current_epoch:14
+cluster_my_epoch:6
+cluster_stats_messages_ping_sent:953859
+cluster_stats_messages_pong_sent:917798
+cluster_stats_messages_meet_sent:10
+cluster_stats_messages_sent:1871667
+cluster_stats_messages_ping_received:917795
+cluster_stats_messages_pong_received:951370
+cluster_stats_messages_meet_received:3
+cluster_stats_messages_received:1869168
+
+

4. cluster nodes

+

You can get the distribution and status of each node.

+
matthew@lightningdb:21> cli cluster nodes
+4b8fe9d135670daabe19437e3b840b1c770ffa2f 192.168.111.44:20151 slave 985a2215d2acb3f1612751a13e0d7466d874cfe5 0 1604891127367 10 connected
+4dd5dff5008ccd89cf18faef736fe6492eb34d05 192.168.111.41:20152 slave 9bff873f9f5f84cd3b78288524230b5cd1c6190f 0 1604891128000 8 connected
+15b3c06c1edeb5d2eeb6c0f35c9f27cf616acd11 192.168.111.44:20101 myself,slave 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 0 1604891118000 13 connected
+8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 192.168.111.41:20100 master - 0 1604891130372 1 connected 0-2730
+9bff873f9f5f84cd3b78288524230b5cd1c6190f 192.168.111.44:20102 master - 0 1604891126000 6 connected 8193-10923
+60f88a9db445997112cf8947931988152767878f 192.168.111.44:20152 slave 974c0540741d89c7569b63345faa852361043e8b 0 1604891122000 11 connected
+985a2215d2acb3f1612751a13e0d7466d874cfe5 192.168.111.41:20101 master - 0 1604891125365 5 connected 2731-5461
+85de73ca2aa668a79fe5636ec74e68dee6f9b36a 192.168.111.44:20100 master - 0 1604891129371 4 connected 13654-16383
+974c0540741d89c7569b63345faa852361043e8b 192.168.111.41:20102 master - 0 1604891124363 2 connected 5462-8192
+9c6aef212b6d68d2a0298c1902629e1fdc95f943 192.168.111.41:20150 slave 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 0 1604891128370 4 connected
+474303b3b9e9f7b84b157ecf52ce11e153a28716 192.168.111.44:20150 slave 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 0 1604891126366 13 connected
+4b6bc980b33dd1eecc87babfb5762eda9e7921e7 192.168.111.41:20151 master - 0 1604891131375 14 connected 10924-13653
+
+

5. cluster slots

+

You can get the slot information.

+
matthew@lightningdb:21> cli cluster slots
++-------+-------+----------------+--------+----------------+----------+
+| start | end   | m_ip           | m_port | s_ip_0         | s_port_0 |
++-------+-------+----------------+--------+----------------+----------+
+| 0     | 2730  | 192.168.111.41 | 20100  | 192.168.111.44 | 20150    |
+| 2731  | 5461  | 192.168.111.41 | 20101  | 192.168.111.44 | 20151    |
+| 5462  | 8192  | 192.168.111.41 | 20102  | 192.168.111.44 | 20152    |
+| 8193  | 10923 | 192.168.111.44 | 20102  | 192.168.111.41 | 20152    |
+| 10924 | 13653 | 192.168.111.41 | 20151  | 192.168.111.44 | 20101    |
+| 13654 | 16383 | 192.168.111.44 | 20100  | 192.168.111.41 | 20150    |
++-------+-------+----------------+--------+----------------+----------+
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/cli-cli2/index.html b/cli-cli2/index.html new file mode 100644 index 0000000..c85ba9e --- /dev/null +++ b/cli-cli2/index.html @@ -0,0 +1,1440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Redis5 cli (LightningDB v2.x) - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +
+

Note

+

By default, we support all of the features provided in LightningDB v1.x, and we only point you to the ones that have been added and changed.

+
+

1. createTable

+
    +
  • Command
      +
    • "TABLE.META.WRITE" "createTable" "catalog name" "namespace name" "table name" "schema binary"
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help "TABLE.META.WRITE" "createTable"
+
+  TABLE.META.WRITE createTable catalog.namespace.table arrow::schema
+  summary: Create a new table
+  since: 2.0.0
+  group: table.meta
+
+127.0.0.1:7389> "TABLE.META.WRITE" "createTable" "cat_1.test.table" "\x10\x00\x00\x00\x00\x00\n\x00\x0e\x00\x06\x00\r\x00\b\x00\n\x00\x00\x00\x00\x00\x04\x00\x10\x00\x00\x00\x00\x01\n\x00\x0c\x00\x00\x00\b\x00\x04\x00\n\x00\x00\x00\b\x00\x00\x00\xc4\x01\x00\x00\t\x00\x00\x00\x80\x01\x00\x00D\x01\x00\x00\x18\x01\x00\x00\xec\x00\x00\x00\xc0\x00\x00\x00\x98\x00\x00\x00h\x00\x00\x00@\x00\x00\x00\x04\x00\x00\x00\xac\xfe\xff\xff\b\x00\x00\x00\x18\x00\x00\x00\x0e\x00\x00\x00127.0.0.1:7389\x00\x00\x13\x00\x00\x00properties.location\x00\xe4\xfe\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00job\x00\x0b\x00\x00\x00partition.1\x00\b\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x01\x00\x00\x001\x00\x00\x00\x10\x00\x00\x00internal.version\x00\x00\x00\x004\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00age\x00\x0b\x00\x00\x00partition.0\x00X\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x01\x00\x00\x002\x00\x00\x00\x0e\x00\x00\x00partition.size\x00\x00\x80\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x03\x00\x00\x00512\x00\x0c\x00\x00\x00cva.capacity\x00\x00\x00\x00\xa8\xff\xff\xff\b\x00\x00\x00\x0c\x00\x00\x00\x02\x00\x00\x0024\x00\x00\x0e\x00\x00\x00properties.ttl\x00\x00\xd0\xff\xff\xff\b\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x002560\x00\x00\x00\x00\x11\x00\x00\x00rowgroup.capacity\x00\x00\x00\b\x00\x0c\x00\b\x00\x04\x00\b\x00\x00\x00\b\x00\x00\x00\x18\x00\x00\x00\x0e\x00\x00\x00127.0.0.1:7379\x00\x00\x14\x00\x00\x00properties.metastore\x00\x00\x00\x00\x03\x00\x00\x00\x88\x00\x00\x004\x00\x00\x00\x04\x00\x00\x00\x96\xff\xff\xff\x14\x00\x00\x00\x14\x00\x00\x00\x14\x00\x00\x00\x00\x00\x05\x01\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x84\xff\xff\xff\x03\x00\x00\x00job\x00\xc2\xff\xff\xff\x14\x00\x00\x00\x14\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x02\x01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\b\x00\x0c\x00\b\x00\a\x00\b\x00\x00\x00\x00\x00\x00\x01 \x00\x00\x00\x03\x00\x00\x00age\x00\x00\x00\x12\x00\x18\x00\x14\x00\x13\x00\x12\x00\x0c\x00\x00\x00\b\x00\x04\x00\x12\x00\x00\x00\x14\x00\x00\x00\x14\x00\x00\x00\x18\x00\x00\x00\x00\x00\x05\x01\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00name\x00\x00\x00\x00"
+
+
+

2. truncateTable

+
    +
  • Command
      +
    • "TABLE.META.WRITE" "truncateTable" "{catalog name}.{namespace name}.{table name}"
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help "TABLE.META.WRITE" "truncateTable"
+
+  TABLE.META.WRITE truncateTable catalog.namespace.table
+  summary: Truncate the table(Remove all data in the table)
+  since: 2.0.0
+  group: table.meta
+
+127.0.0.1:7389>
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*"
+ 1) "21\x1eSales Manager"
+ 2) "22\x1eTutor"
+ 3) "23\x1eBanker"
+ 4) "23\x1eProfessor"
+ 5) "23\x1eSales Manager"
+ 6) "24\x1eStudent"
+ 7) "26\x1eStudent"
+ 8) "27\x1eSales Manager"
+ 9) "29\x1eBanker"
+10) "29\x1eProfessor"
+11) "32\x1eProfessor"
+12) "32\x1eSales Manager"
+13) "33\x1eProfessor"
+14) "36\x1eProfessor"
+15) "41\x1eBanker"
+16) "43\x1eSales Manager"
+17) "45\x1eBanker"
+18) "47\x1eBanker"
+19) "48\x1eCEO"
+127.0.0.1:7389> TABLE.META.WRITE truncateTable "cat_1.test.table"
+"OK"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*"
+(empty list or set)
+
+

3. dropTable

+
    +
  • Command
      +
    • "TABLE.META.WRITE" "dropTable" "{catalog name}.{namespace name}.{table name}"
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help "TABLE.META.WRITE" "dropTable"
+
+  TABLE.META.WRITE dropTable catalog.namespace.table
+  summary: Drop the table(Remove all data and the schema)
+  since: 2.0.0
+  group: table.meta
+
+127.0.0.1:7389>
+
+127.0.0.1:7389> TABLE.META.READ showTables
+1) "cat_1.test.table"
+2) "version: 1"
+127.0.0.1:7389> TABLE.META.WRITE dropTable "cat_1.test.table"
+"OK"
+127.0.0.1:7389> TABLE.META.READ showTables
+(empty list or set)
+
+

4. dropAllTables

+
    +
  • Command
      +
    • "TABLE.META.WRITE" "dropAllTables"
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help "TABLE.META.WRITE" "dropAllTables"
+
+  TABLE.META.WRITE dropAllTables -
+  summary: Drop all tables
+  since: 2.0.0
+  group: table.meta
+
+127.0.0.1:7389>
+127.0.0.1:7389> TABLE.META.READ showTables
+1) "cat_1.test.table"
+2) "version: 1"
+127.0.0.1:7389> TABLE.META.WRITE dropAllTables
+1 tables are deleted.
+
+

5. setTableTtl

+
    +
  • Command
      +
    • "TABLE.META.WRITE" "settablettl" "{catalog name}.{namespace name}.{table name}"  "{ttl time(unit: msec)}"
    • +
    +
  • +
  • Example
  • +
+
127.0.0.1:7389> help "TABLE.META.WRITE" "seTtableTtl"
+
+  TABLE.META.WRITE setTableTtl catalog.namespace.table ttl(msec)
+  summary: Set the ttl of the table
+  since: 2.0.0
+  group: table.meta
+
+127.0.0.1:7389> TABLE.META.WRITE setTableTtl "cat_1.test.table" 30000
+OK
+
+

6. showTables

+
    +
  • Command
      +
    • "TABLE.META.READ" "showTables" 
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help TABLE.META.READ showTables
+
+  TABLE.META.READ showTables -
+  summary: Get the list of tables with their own version
+  since: 2.0.0
+  group: table.meta
+
+127.0.0.1:7389>
+127.0.0.1:7389> TABLE.META.READ showTables
+1) "cat_1.test.table"
+2) "version: 1"
+
+

7. describeTable

+
    +
  • Command
      +
    • "TABLE.META.READ" "describeTable" "table name"
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help TABLE.META.READ describeTables
+
+  TABLE.META.READ describeTables catalog.namespace.table
+  summary: Get all columns and partitions of the table
+  since: 2.0.0
+  group: table.meta
+
+127.0.0.1:7389>
+
+127.0.0.1:7389> TABLE.META.READ showTables
+1) "cat_1.test.table"
+2) "version: 1"
+
+127.0.0.1:7389> TABLE.META.READ describeTables "cat_1.test.table"
+1) "name: string"
+2) "age: int32"
+3) "job: string"
+4) "[ partitions: age job ]"
+
+

8. getTableTtl

+
    +
  • Command
      +
    • "TABLE.META.READ" gettablettl  "{catalog name}.{namespace name}.{table name}"  
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help TABLE.META.READ getTableTtl
+
+  TABLE.META.READ getTableTtl catalog.namespace.table
+  summary: Get the ttl of the table
+  since: 2.0.0
+  group: table.meta
+
+127.0.0.1:7389> TABLE.META.READ getTableTtl *
+1) "cat_1.test.network_table"
+2) "86400000"
+3) "cat_1.test.table"
+4) "86400000"
+127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.*
+1) "cat_1.test.network_table"
+2) "86400000"
+3) "cat_1.test.table"
+4) "86400000"
+127.0.0.1:7389> TABLE.META.READ getTableTtl *.network_table
+1) "cat_1.test.network_table"
+2) "86400000"
+127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.test.network_table
+1) "cat_1.test.network_table"
+2) "86400000"
+127.0.0.1:7389>
+
+

9. getPartitionTtl

+
    +
  • Command
      +
    • "TABLE.META.READ" getPartitionTtl  "{catalog name}.{namespace name}.{table name}" "partition string with regular expression"
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help TABLE.META.READ getPartitionTtl
+
+  TABLE.META.READ getPartitionTtl partition-string
+  summary: Get the ttl of the partition in the table
+  since: 2.0.0
+  group: table.meta
+
+127.0.0.1:7389> TABLE.META.READ getPartitionTtl "cat_1.test.table" "*"
+ 1) "21\x1eSales Manager"
+ 2) "86350123"
+ 3) "22\x1eTutor"
+ 4) "86350139"
+ 5) "23\x1eBanker"
+ 6) "86350126"
+ 7) "23\x1eProfessor"
+ 8) "86350125"
+ 9) "23\x1eSales Manager"
+10) "86350137"
+11) "24\x1eStudent"
+12) "86350121"
+13) "26\x1eStudent"
+14) "86350124"
+15) "27\x1eSales Manager"
+16) "86350132"
+17) "29\x1eBanker"
+18) "86350124"
+19) "29\x1eProfessor"
+20) "86350125"
+21) "32\x1eProfessor"
+22) "86350127"
+23) "32\x1eSales Manager"
+24) "86350123"
+25) "33\x1eProfessor"
+26) "86350120"
+27) "36\x1eProfessor"
+28) "86350134"
+29) "40\x1eBanker"
+30) "86350119"
+31) "41\x1eBanker"
+32) "86350120"
+33) "43\x1eSales Manager"
+34) "86350133"
+35) "45\x1eBanker"
+36) "86350128"
+37) "47\x1eBanker"
+38) "86350124"
+39) "48\x1eCEO"
+40) "86350138"
+127.0.0.1:7389> TABLE.META.READ getPartitionTtl "cat_1.test.table" "23*"
+1) "23\x1eBanker"
+2) "86343642"
+3) "23\x1eProfessor"
+4) "86343641"
+5) "23\x1eSales Manager"
+6) "86343653"
+127.0.0.1:7389> TABLE.META.READ getPartitionTtl "cat_1.test.table" "*CEO"
+1) "48\x1eCEO"
+2) "86336153"
+127.0.0.1:7389> TABLE.META.READ getPartitionTtl "cat_1.test.table" "45\x1eBanker"
+1) "45\x1eBanker"
+2) "86324848"
+127.0.0.1:7389>
+
+

10. insert

+

- Command + - "TABLE.DATA.WRITE" "Insert" "{catalog name}.{namespace name}.{table name}" "table version" "partition string" "binaries... ..." +- Examples

+
127.0.0.1:7389> help "TABLE.DATA.WRITE" "Insert"
+
+  TABLE.DATA.WRITE insert catalog.namespace.table table-version partition-string data
+  summary: Insert a new data(row)
+  since: 2.0.0
+  group: table.data
+
+1636425657.602951 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "40\x1eBanker" "Jeannie" "(\x00\x00\x00" "Banker"
+1636425657.604043 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "33\x1eProfessor" "Ardith" "!\x00\x00\x00" "Professor"
+1636425657.604529 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "41\x1eBanker" "Elena" ")\x00\x00\x00" "Banker"
+1636425657.605351 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "24\x1eStudent" "Corliss" "\x18\x00\x00\x00" "Student"
+1636425657.607351 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "41\x1eBanker" "Kiyoko" ")\x00\x00\x00" "Banker"
+1636425657.608057 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "21\x1eSales Manager" "Hilton" "\x15\x00\x00\x00" "Sales Manager"
+1636425657.608455 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "32\x1eSales Manager" "Becky" " \x00\x00\x00" "Sales Manager"
+1636425657.609218 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "29\x1eBanker" "Wendie" "\x1d\x00\x00\x00" "Banker"
+1636425657.609940 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "26\x1eStudent" "Carolina" "\x1a\x00\x00\x00" "Student"
+1636425657.610284 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "47\x1eBanker" "Laquita" "/\x00\x00\x00" "Banker"
+1636425657.610638 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "23\x1eProfessor" "Stephani" "\x17\x00\x00\x00" "Professor"
+1636425657.610964 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "29\x1eProfessor" "Emile" "\x1d\x00\x00\x00" "Professor"
+1636425657.612257 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "23\x1eBanker" "Cherri" "\x17\x00\x00\x00" "Banker"
+1636425657.612630 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "47\x1eBanker" "Raleigh" "/\x00\x00\x00" "Banker"
+1636425657.612943 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "32\x1eProfessor" "Hollis" " \x00\x00\x00" "Professor"
+1636425657.614136 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "45\x1eBanker" "Brigette" "-\x00\x00\x00" "Banker"
+1636425657.615558 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "21\x1eSales Manager" "Damian" "\x15\x00\x00\x00" "Sales Manager"
+1636425657.617321 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "27\x1eSales Manager" "Star" "\x1b\x00\x00\x00" "Sales Manager"
+1636425657.618819 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "43\x1eSales Manager" "Elba" "+\x00\x00\x00" "Sales Manager"
+1636425657.619621 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "36\x1eProfessor" "Lourie" "$\x00\x00\x00" "Professor"
+1636425657.622977 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "23\x1eSales Manager" "\xea\xb0\x80\xeb\x82\x98\xeb\x82\x98\xeb\x82\x98\xea\xb0\x80\xeb\x82\x98\xeb\x82\x98" "\x17\x00\x00\x00" "Sales Manager"
+1636425657.623555 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "48\x1eCEO" "Elon" "0\x00\x00\x00" "CEO"
+1636425657.624359 [0 127.0.0.1:53881] "TABLE.DATA.WRITE" "Insert" "cat_1.test.table" "1" "22\x1eTutor" "Kijung" "\x16\x00\x00\x00" "Tutor"
+
+

11. partitions

+

a. Query with a pattern

+
    +
  • Commnad
      +
    • "TABLE.DATA.READ" "partitions"  "{catalog name}.{namespace name}.{table name}" "pattern(normaly '*')"
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help TABLE.DATA.READ partitions
+
+  TABLE.DATA.READ partitions catalog.namespace.table pattern partition-filter(optional)
+  summary: Get the list of partitions with the pattern and filter
+  since: 2.0.0
+  group: table.data
+
+127.0.0.1:7389>
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*"
+ 1) "21\x1eSales Manager"
+ 2) "22\x1eTutor"
+ 3) "23\x1eBanker"
+ 4) "23\x1eProfessor"
+ 5) "23\x1eSales Manager"
+ 6) "24\x1eStudent"
+ 7) "26\x1eStudent"
+ 8) "27\x1eSales Manager"
+ 9) "29\x1eBanker"
+10) "29\x1eProfessor"
+11) "32\x1eProfessor"
+12) "32\x1eSales Manager"
+13) "33\x1eProfessor"
+14) "36\x1eProfessor"
+15) "40\x1eBanker"
+16) "41\x1eBanker"
+17) "43\x1eSales Manager"
+18) "45\x1eBanker"
+19) "47\x1eBanker"
+20) "48\x1eCEO"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "29*"
+1) "29\x1eBanker"
+2) "29\x1eProfessor"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*Professor"
+1) "23\x1eProfessor"
+2) "29\x1eProfessor"
+3) "32\x1eProfessor"
+4) "33\x1eProfessor"
+5) "36\x1eProfessor"
+
+

b. Query with a pattern and filters

+
    +
  • Command
      +
    • "TABLE.DATA.READ" "partitions" "catalog name" "namespace name" "table name" "pattern(normaly '*')"  "partition filter"
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*" "age\x1e30\x1eLTE"
+ 1) "21\x1eSales Manager"
+ 2) "22\x1eTutor"
+ 3) "23\x1eBanker"
+ 4) "23\x1eProfessor"
+ 5) "23\x1eSales Manager"
+ 6) "24\x1eStudent"
+ 7) "26\x1eStudent"
+ 8) "27\x1eSales Manager"
+ 9) "29\x1eBanker"
+10) "29\x1eProfessor"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table" "*" "age\x1e32\x1eEQ"
+1) "32\x1eProfessor"
+2) "32\x1eSales Manager"
+
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table"  "*" "age\x1e32\x1eLT\x1ejob\x1eCEO\x1eLTE\x1eAND"
+1) "23\x1eBanker"
+2) "29\x1eBanker"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table"  "*" "age\x1e32\x1eLT\x1ejob\x1eCEO\x1eGTE\x1eAND"
+1) "21\x1eSales Manager"
+2) "22\x1eTutor"
+3) "23\x1eProfessor"
+4) "23\x1eSales Manager"
+5) "24\x1eStudent"
+6) "26\x1eStudent"
+7) "27\x1eSales Manager"
+8) "29\x1eProfessor"
+127.0.0.1:7389> TABLE.DATA.READ partitions "cat_1.test.table"  "*" "age\x1e32\x1eGT\x1ejob\x1eCEO\x1eGTE\x1eAND"
+1) "33\x1eProfessor"
+2) "36\x1eProfessor"
+3) "43\x1eSales Manager"
+4) "48\x1eCEO"
+
+

12. select

+
    +
  • Command
      +
    • "TABLE.DATA.READ" "select" "catalog name" "namespace name" "table name"  "pattern(normaly '*')"  "partition filter" "data filter" 
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help TABLE.DATA.READ select
+
+  TABLE.DATA.READ select catalog.namespace.table projection partition-filter data-filter
+  summary: Get the data with the pattern and filter
+  since: 2.0.0
+  group: table.data
+
+127.0.0.1:7389> TABLE.DATA.READ select xxx ....
+
+

13. getPartitionRowCount

+
    +
  • Command
      +
    • "TABLE.DATA.READ" "getPartitionRowCount"  "{catalog name}.{namespace name}.{table name}"  "partition string with regular expression" 
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowCount
+
+  TABLE.DATA.READ getPartitionRowCount catalog.namespace.table partition-string
+  summary: Get the count of the rows in the partition
+  since: 2.0.0
+  group: table.data
+
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount "cat_1.test.table" *
+ 1) "21\x1eSales Manager"
+ 2) "2"
+ 3) "22\x1eTutor"
+ 4) "1"
+ 5) "23\x1eBanker"
+ 6) "1"
+ 7) "23\x1eProfessor"
+ 8) "1"
+ 9) "23\x1eSales Manager"
+10) "1"
+11) "24\x1eStudent"
+12) "1"
+13) "26\x1eStudent"
+14) "1"
+15) "27\x1eSales Manager"
+16) "1"
+17) "29\x1eBanker"
+18) "1"
+19) "29\x1eProfessor"
+20) "1"
+21) "32\x1eProfessor"
+22) "1"
+23) "32\x1eSales Manager"
+24) "1"
+25) "33\x1eProfessor"
+26) "1"
+27) "36\x1eProfessor"
+28) "1"
+29) "40\x1eBanker"
+30) "1"
+31) "41\x1eBanker"
+32) "2"
+33) "43\x1eSales Manager"
+34) "1"
+35) "45\x1eBanker"
+36) "1"
+37) "47\x1eBanker"
+38) "2"
+39) "48\x1eCEO"
+40) "1"
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount "cat_1.test.table" "23*"
+1) "23\x1eBanker"
+2) "1"
+3) "23\x1eProfessor"
+4) "1"
+5) "23\x1eSales Manager"
+6) "1"
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount "cat_1.test.table" "*Professor"
+ 1) "23\x1eProfessor"
+ 2) "1"
+ 3) "29\x1eProfessor"
+ 4) "1"
+ 5) "32\x1eProfessor"
+ 6) "1"
+ 7) "33\x1eProfessor"
+ 8) "1"
+ 9) "36\x1eProfessor"
+10) "1"
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount "cat_1.test.table" "45\x1eBanker"
+1) "45\x1eBanker"
+2) "1"
+
+
+

14. getPartitionRowGroup

+
    +
  • Command
      +
    • "TABLE.DATA.READ" "getPartitionRowGroup"  + "{catalog name}.{namespace name}.{table name}"  "partition string" 
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowGroup
+
+  TABLE.DATA.READ getPartitionRowGroup catalog.namespace.table partition-string
+  summary: Get the count of the rows in the each row-group of the partition
+  since: 2.0.0
+  group: table.data
+
+127.0.0.1:7389> TABLE.DATA.READ getPartitionRowGroup "cat_1.test.table" "21\x1eSales Manager"
+1) "0"
+2) "1"
+3) "1"
+4) "2"
+127.0.0.1:7389>
+
+

15. getTableRowCount

+
    +
  • Command
      +
    • "TABLE.DATA.READ" "gettablerowcount" "{catalog name}.{namespace name}.{table name} with regular expression"
    • +
    +
  • +
  • Examples
  • +
+
127.0.0.1:7389> help TABLE.DATA.READ getTableRowCount
+
+  TABLE.DATA.READ getTableRowCount -
+  summary: Get the row count of each table
+  since: 2.0.0
+  group: table.data
+
+127.0.0.1:7389> TABLE.DATA.READ getTableRowCount *
+1) "cat_1.test.network_table"
+2) "33229"
+3) "cat_1.test.table"
+4) "23"
+127.0.0.1:7389>
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/cli-cluster/index.html b/cli-cluster/index.html new file mode 100644 index 0000000..32850b1 --- /dev/null +++ b/cli-cluster/index.html @@ -0,0 +1,2110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Cluster - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +
+

Note

+

Command Line Interface(CLI) of LightningDB supports not only deploy and start command but also many commands to access and manipulate data in LightningDB.

+
+

If you want to see the list of cluster commands, use the cluster command without any option.

+
ec2-user@lightningdb:1> cluster
+
+NAME
+    ltcli cluster - This is cluster command
+
+SYNOPSIS
+    ltcli cluster COMMAND
+
+DESCRIPTION
+    This is cluster command
+
+COMMANDS
+    COMMAND is one of the following:
+
+     add_slave
+       Add slaves to cluster additionally
+
+     clean
+       Clean cluster
+
+     configure
+
+     create
+       Create cluster
+
+     ls
+       Check cluster list
+
+     rebalance
+       Rebalance
+
+     restart
+       Restart redist cluster
+
+     rowcount
+       Query and show cluster row count
+
+     start
+       Start cluster
+
+     stop
+       Stop cluster
+
+     use
+       Change selected cluster
+
+

1. Deploy and Start

+

(1) Cluster configure

+

redis-{port}.conf is generated with using redis-{master/slave}.conf.template and redis.properties files.

+
matthew@lightningdb:21> cluster configure
+Check status of hosts...
+OK
+sync conf
++----------------+--------+
+| HOST           | STATUS |
++----------------+--------+
+| 192.168.111.44 | OK     |
+| 192.168.111.41 | OK     |
++----------------+--------+
+OK
+
+

(2) Cluster start

+
    +
  • Backup logs of the previous master/slave nodes
      +
    • All log files of previous master/slave nodes in ${SR2_HOME}/logs/redis/1 will be moved to ${SR2_HOME}/logs/redis/backup/.
    • +
    +
  • +
  • Generate directories to save data
      +
    • Save aof and rdb files of redis-server and RocksDB files in ${SR2_REDIS_DATA}
    • +
    +
  • +
  • Start redis-server process
      +
    • Start master and slave redis-server with ${SR2_HOME}/conf/redis/redis-{port}.conf file
    • +
    +
  • +
  • Log files will be saved in ${SR2_HOME}/logs/redis/
  • +
+
ec2-user@lightningdb:1> cluster start
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+Generate redis configuration files for master hosts
+sync conf
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+
+

Errors

+
    +
  • ErrorCode 11
  • +
+

Redis-server(master) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process}.

+
$ cluster start
+...
+...
+[ErrorCode 11] Fail to start... Must be checked running MASTER redis processes!
+We estimate that redis process is <alive-redis-count>.
+
+
    +
  • ErrorCode 12
  • +
+

Redis-server(slave) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process}.

+
$ cluster start
+...
+[ErrorCode 12] Fail to start... Must be checked running SLAVE redis processes!
+We estimate that redis process is <alive-redis-count>.
+
+
    +
  • Conf file not exist
  • +
+

Conf file is not found. To resove this error, use cluster configure and then cluster start.

+
$ cluster start
+...
+FileNotExistError: ${SR2_HOME}/conf/redis/redis-{port}.conf
+
+
    +
  • Max try error +​ +For detail information, please check the log files.
  • +
+
$ cluster start
+...
+ClusterRedisError: Fail to start redis: max try exceed
+Recommendation Command: 'monitor'
+
+

(3) Cluster create

+

After checking the information of the cluster, create a cluster of LightningDB.

+

Case 1) When redis-server processes are running, create a cluster only.

+
ec2-user@lightningdb:1>cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST      | PORT  | TYPE   |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18100
+ - 127.0.0.1:18103
+ - 127.0.0.1:18104
+ - 127.0.0.1:18101
+ - 127.0.0.1:18102
+Adding slots...
+ - 127.0.0.1:18100, 3280
+ - 127.0.0.1:18103, 3276
+ - 127.0.0.1:18104, 3276
+ - 127.0.0.1:18101, 3276
+ - 127.0.0.1:18102, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+
+

Case 2) When redis-server processes are not running, create a cluster after launching redis-server processes with cluster start command.

+
ec2-user@lightningdb:4>cluster create
+Check status of hosts...
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+>>> Creating cluster
++-----------+-------+--------+
+| HOST      | PORT  | TYPE   |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18103
+ - 127.0.0.1:18104
+ - 127.0.0.1:18101
+ - 127.0.0.1:18102
+ - 127.0.0.1:18100
+Adding slots...
+ - 127.0.0.1:18103, 3280
+ - 127.0.0.1:18104, 3276
+ - 127.0.0.1:18101, 3276
+ - 127.0.0.1:18102, 3276
+ - 127.0.0.1:18100, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+
+

Errors

+

When redis servers are not running, this error(Errno 111) will occur. To solve this error, use cluster start command previously.

+
ec2-user@lightningdb:1>cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST      | PORT  | TYPE   |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+127.0.0.1:18100 - [Errno 111] Connection refused
+
+

(4) Cluster stop

+

​Gracefully kill all redis-servers(master/slave) with SIGINT +​​

+
ec2-user@lightningdb:1> cluster stop
+Check status of hosts...
+OK
+Stopping master cluster of redis...
+cur: 5 / total: 5
+cur: 0 / total: 5
+Complete all redis process down
+
+

Options

+
    +
  • Force to kill all redis-servers(master/slave) with SIGKILL
  • +
+
--force
+
+

(5) Cluster clean

+

Remove conf files for redis-server and all data(aof, rdb, RocksDB) of LightningDB

+
ec2-user@lightningdb:1> cluster clean
+Removing redis generated master configuration files
+ - 127.0.0.1
+Removing flash db directory, appendonly and dump.rdb files in master
+ - 127.0.0.1
+Removing master node configuration
+ - 127.0.0.1
+
+

(6) Cluster restart​

+

Process cluster stop and then cluster start.​​

+

Options

+
    +
  • Force to kill all redis-servers(master/slave) with SIGKILL and then start again.
  • +
+
--force-stop
+
+
    +
  • Remove all data(aof, rdb, RocksDB, conf files) before starting again.
  • +
+
--reset
+
+
    +
  • Process cluster create. This command should be called with --reset.
  • +
+
--cluster
+
+

(7) Update version

+

You can update LightningDB by using the 'deploy' command.

+
> c 1 // alias of 'cluster use 1'
+> deploy
+(Watch out) Cluster 1 is already deployed. Do you want to deploy again? (y/n) [n]
+y
+
+
    +
  • Select installer
  • +
+
Select installer
+
+    [ INSTALLER LIST ]
+    (1) lightningdb.release.master.5a6a38.bin
+    (2) lightningdb.trial.master.dbcb9e-dirty.bin
+    (3) lightningdb.trial.master.dbcb9e.bin
+
+Please enter the number, file path or URL of the installer you want to use.
+you can also add a file in list by copy to '$FBPATH/releases/'
+1
+OK, lightningdb.release.master.5a6a38.bin
+
+
    +
  • Restore
  • +
+
Do you want to restore conf? (y/n)
+y
+
+

If the current settings will be reused, type 'y'.

+
    +
  • Check all settings finally
      +
    • Backup path of cluster: ${base-directory}/backup/cluster_${cluster-id}_bak_${time-stamp}
    • +
    • Backup path of conf files: $FBAPTH/conf_backup/cluster_${cluster-id}_conf_bak_${time-stamp}
    • +
    +
  • +
+
+-----------------+---------------------------------------------------+
+| NAME            | VALUE                                             |
++-----------------+---------------------------------------------------+
+| installer       | lightningdb.release.master.5a6a38.bin             |
+| nodes           | nodeA                                             |
+|                 | nodeB                                             |
+|                 | nodeC                                             |
+|                 | nodeD                                             |
+| master ports    | 18100                                             |
+| slave ports     | 18150-18151                                       |
+| ssd count       | 3                                                 |
+| redis data path | ~/sata_ssd/ssd_                                   |
+| redis db path   | ~/sata_ssd/ssd_                                   |
+| flash db path   | ~/sata_ssd/ssd_                                   |
++-----------------+---------------------------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+Check status of hosts...
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| nodeA     | OK     |
+| nodeB     | OK     |
+| nodeC     | OK     |
+| nodeD     | OK     |
++-----------+--------+
+Checking for cluster exist...
++------+--------+
+| HOST | STATUS |
++------+--------+
+Backup conf of cluster 1...
+OK, cluster_1_conf_bak_<time-stamp>
+Backup info of cluster 1 at nodeA...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeB...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeC...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeD...
+OK, cluster_1_bak_<time-stamp>
+Transfer installer and execute...
+ - nodeA
+ - nodeB
+ - nodeC
+ - nodeD
+Sync conf...
+Complete to deploy cluster 1.
+Cluster 1 selected.
+
+
    +
  • Restart
  • +
+
> cluster restart
+
+

After the restart, the new version will be applied.

+

2. Monitor

+

(1) Cluster use

+

Change the cluster to use LTCLI. Use cluster use or c commands.

+

Examples

+
ec2-user@lightningdb:2> cluster use 1
+Cluster '1' selected.
+ec2-user@lightningdb:1> c 2
+Cluster '2' selected.
+
+

(2) Cluster ls

+

List the deployed clusters.

+

Examples

+
ec2-user@lightningdb:2> cluster ls
+[1, 2]
+
+

(3) Cluster rowcount

+

Check the count of records that are stored in the cluster.

+

Examples

+
ec2-user@lightningdb:1> cluster rowcount
+0
+
+

(4) Cluster tree

+

User can check the status of master nodes and slaves and show which master and slave nodes are linked.

+

Examples

+
ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(connected)
+|__ 127.0.0.1:18952(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+

(5) Cluster distribution

+

The distribution of Master/Slave nodes are displayed with their hostnames(IP addresses).

+

Examples

+
matthew@lightningdb:21> cluster distribution
++-----------------------+--------+-------+
+| HOST                  | MASTER | SLAVE |
++-----------------------+--------+-------+
+| fbg04(192.168.111.41) | 4      | 2     |
+| fbg05(192.168.111.44) | 2      | 4     |
+| TOTAL                 | 6      | 6     |
++-----------------------+--------+-------+
+
+

3. Failover

+

(1) Cluster failover_list

+
    +
  • failovered masters
      +
    • The node, that initialized as a slave by the cluster, becomes a master by failover now.
    • +
    +
  • +
  • no-slave masters
      +
    • Masters without slaves. You need to replicate the failbacked slaves to this node.
    • +
    +
  • +
  • no-slot masters
      +
    • Not yet added into the cluster or masters without slot
    • +
    +
  • +
  • failbacked slaves
      +
    • The nodes, that initialized as a master, becomes a slave by failback now.
    • +
    +
  • +
+

Examples

+
matthew@lightningdb:21> cluster failover_list
+
+1) failovered masters:
+192.168.111.44:20152
+192.168.111.44:20153
+192.168.111.44:20156
+
+2) no-slave masters:
+192.168.111.44:20100
+192.168.111.41:20101
+
+3) no-slot masters:
+192.168.111.44:20152
+
+4) failbacked slaves:
+192.168.111.41:20102
+192.168.111.41:20105
+
+

(2) Cluster do_replicate

+

You can add a node as the slave of a master nodes like cluster do_replicate {slave's IP}:{slave's Port} {master's IP}:{master's Port}.

+

The IP addresses of masters or slaves can be replaced with their hostnames.

+

Examples

+
matthew@lightningdb:21> cluster tree
+192.168.111.44:20101(connected)
+|__ 192.168.111.44:20151(connected)
+
+192.168.111.44:20102(connected)
+|__ 192.168.111.44:20152(connected)
+
+192.168.111.44:20150(connected)
+|__ 192.168.111.44:20100(connected)
+
+matthew@lightningdb:21> cluster do_replicate 192.168.111.44:20100 192.168.111.44:20101
+Start to replicate...
+
+OK
+
+matthew@lightningdb:21> cluster tree
+192.168.111.44:20101(connected)
+|__ 192.168.111.44:20100(connected)
+|__ 192.168.111.44:20151(connected)
+
+192.168.111.44:20102(connected)
+|__ 192.168.111.44:20152(connected)
+
+192.168.111.44:20150(connected)
+
+

with hostnames,

+
matthew@lightningdb:21> cluster do_replicate fbg05:20100 fbg05:20101
+Start to replicate...
+
+OK
+
+

(3) Cluster find_noaddr & cluster forget_noaddr

+

You can find and remove 'noaddr' nodes in the current cluster.

+

'noaddr' nodes are no more valid nodes.

+

Examples

+
matthew@lightningdb:21> cluster find_noaddr
+
++------------------------------------------+
+| UUID                                     |
++------------------------------------------+
+| 40675af73cd8fa1272a20fe9536ad19c398b5bca |
++------------------------------------------+
+
+matthew@lightningdb:21> cluster forget_noaddr
+
+"27" nodes have forgot "40675af73cd8fa1272a20fe9536ad19c398b5bca"
+
+matthew@lightningdb:21> cluster find_noaddr
+
++------+
+| UUID |
++------+
+
+

(4) Cluster failover

+

If a master node is killed, its slave node will automatically promote after 'cluster-node-time'2.

+

User can promote the slave node immediately by using the 'cluster failover' command.

+

Examples

+

Step 1) Check the status of the cluster

+

In this case, '127.0.0.1:18902' node is killed.

+
ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(disconnected)   <--- Killed!
+|__ 127.0.0.1:18952(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+

Step 2) Do failover with 'cluster failover' command

+
ec2-user@lightningdb:9> cluster failover
+failover 127.0.0.1:18952 for 127.0.0.1:18902
+OK
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(disconnected)   <--- Killed!
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+127.0.0.1:18952(connected)      <--- Promoted to master!
+
+

(5) Cluster failback

+

With 'cluster failback' command, the killed node is restarted and added to the cluster as the slave node.

+

Examples

+
ec2-user@lightningdb:9> cluster failback
+run 127.0.0.1:18902
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+127.0.0.1:18952(connected)       <--- Promoted to master!
+|__ 127.0.0.1:18902(connected)   <--- Failbacked. Now this node is slave!
+
+

(6) Cluster reset_distribution

+

To initialize the node distribution, use 'reset-distribution'.

+

Examples

+
matthew@lightningdb:21> cluster failover_list
+1) failovered masters:
+192.168.111.44:20152
+
+2) no-slave masters:
+
+3) no-slot masters:
+
+4) failbacked slaves:
+192.168.111.41:20101
+
+matthew@lightningdb:21> cluster reset_distribution
+'192.168.111.41:20101' will be master...
+
+OK
+
+matthew@lightningdb:21> cluster failover_list
+1) failovered masters:
+
+2) no-slave masters:
+
+3) no-slot masters:
+
+4) failbacked slaves:
+
+

(7) Cluster nodes_with_dir & Cluster masters_with_dir

+
    +
  • Cluster nodes_with_dir
      +
    • List up all nodes those are using the disk with HW fault.
    • +
    +
  • +
  • Cluster masters_with_dir
      +
    • List up all master those are using the disk with HW fault.
    • +
    +
  • +
+

Examples

+

+matthew@lightningdb:21> cluster nodes_with_dir 192.168.111.44 matthew03
++----------------+-------+------------------------------------------+
+| HOST           | PORT  | PATH                                     |
++----------------+-------+------------------------------------------+
+| 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20150 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20153 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20156 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
++----------------+-------+------------------------------------------+
+
+matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03
++----------------+-------+------------------------------------------+
+| HOST           | PORT  | PATH                                     |
++----------------+-------+------------------------------------------+
+| 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
++----------------+-------+------------------------------------------+
+
+

with hostnames,

+
matthew@lightningdb:21> cluster nodes_with_dir fbg05 matthew02
++-------+-------+------------------------------------------+
+| HOST  | PORT  | PATH                                     |
++-------+-------+------------------------------------------+
+| fbg05 | 20101 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew |
+| fbg05 | 20152 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew |
++-------+-------+------------------------------------------+
+matthew@lightningdb:21> cluster masters_with_dir fbg05 matthew02
++-------+-------+------------------------------------------+
+| HOST  | PORT  | PATH                                     |
++-------+-------+------------------------------------------+
+| fbg05 | 20101 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew |
++-------+-------+------------------------------------------+
+
+

(8) Cluster failover_with_dir

+

Do failover and change the master using the disk to the slave

+

Examples

+
matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03
++----------------+-------+------------------------------------------+
+| HOST           | PORT  | PATH                                     |
++----------------+-------+------------------------------------------+
+| 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
+| 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew |
++----------------+-------+------------------------------------------+
+
+matthew@lightningdb:21> cluster failover_list
+1) failovered masters:
+
+2) no-slave masters:
+
+3) no-slot masters:
+
+4) failbacked slaves:
+
+matthew@lightningdb:21> cluster failover_with_dir 192.168.111.44 matthew03
+'192.168.111.41:20152' will be master...
+OK
+
+'192.168.111.41:20155' will be master...
+OK
+
+matthew@lightningdb:21> cluster failover_list
+1) failovered masters:
+192.168.111.41:20152
+192.168.111.41:20155
+
+2) no-slave masters:
+
+3) no-slot masters:
+
+4) failbacked slaves:
+192.168.111.44:20102
+192.168.111.44:20105
+
+matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03
++------+------+------+
+| HOST | PORT | PATH |
++------+------+------+
+
+

with hostnames,

+
matthew@lightningdb:21> cluster masters_with_dir fbg05 matthew01
++-------+-------+------------------------------------------+
+| HOST  | PORT  | PATH                                     |
++-------+-------+------------------------------------------+
+| fbg05 | 20151 | /sata_ssd/ssd_02/matthew01/nvkvs/matthew |
++-------+-------+------------------------------------------+
+matthew@lightningdb:21> cluster tree
+192.168.111.44:20102(connected)
+|__ 192.168.111.44:20152(connected)
+
+192.168.111.44:20150(connected)
+|__ 192.168.111.44:20100(connected)
+
+192.168.111.44:20151(connected)
+|__ 192.168.111.44:20101(connected)
+
+matthew@lightningdb:21> cluster failover_with_dir fbg05 matthew01
+'192.168.111.44:20101' will be master...
+OK
+
+
+matthew@lightningdb:21> cluster tree
+192.168.111.44:20101(connected)
+|__ 192.168.111.44:20151(connected)
+
+192.168.111.44:20102(connected)
+|__ 192.168.111.44:20152(connected)
+
+192.168.111.44:20150(connected)
+|__ 192.168.111.44:20100(connected)
+
+

(9) Cluster force_failover

+

When a server need to be shutdown by HW fault or checking, change all masters in the server to slaves by failover of those slaves.

+

Examples

+
matthew@lightningdb:21> cluster distribution
++----------------+--------+-------+
+| HOST           | MASTER | SLAVE |
++----------------+--------+-------+
+| 192.168.111.44 | 7      | 7     |
+| 192.168.111.41 | 7      | 7     |
+| TOTAL          | 14     | 14    |
++----------------+--------+-------+
+
+
+matthew@lightningdb:21> cluster force_failover 192.168.111.41
+'192.168.111.44:20150' will be master...
+OK
+
+'192.168.111.44:20151' will be master...
+OK
+
+'192.168.111.44:20152' will be master...
+OK
+
+'192.168.111.44:20153' will be master...
+OK
+
+'192.168.111.44:20154' will be master...
+OK
+
+'192.168.111.44:20155' will be master...
+OK
+
+'192.168.111.44:20156' will be master...
+OK
+
+matthew@lightningdb:21> cluster distribution
++----------------+--------+-------+
+| HOST           | MASTER | SLAVE |
++----------------+--------+-------+
+| 192.168.111.44 | 14     | 0     |
+| 192.168.111.41 | 0      | 14    |
+| TOTAL          | 14     | 14    |
++----------------+--------+-------+
+matthew@lightningdb:21>
+
+

4. Scale out

+

(1) Cluster add_slave

+
+

Warning

+

Before using the add-slave command, ingestion to master nodes should be stopped. After replication and sync between master and slave are completed, ingestion will be available again.

+
+

You can add a slave to a cluster that is configured only with the master without redundancy.

+
    +
  • +

    Create cluster only with masters

    +
      +
    • Procedure for configuring the test environment. If cluster with the only masters already exists, go to the add slave info.
    • +
    +
  • +
  • +

    Proceed with the deploy.

    +
      +
    • Enter 0 in replicas as shown below when deploy.
    • +
    +
  • +
+
ec2-user@lightningdb:2> deploy 3
+Select installer
+
+    [ INSTALLER LIST ]
+    (1) lightningdb.dev.master.5a6a38.bin
+
+Please enter the number, file path or url of the installer you want to use.
+you can also add file in list by copy to '$FBPATH/releases/'
+https://flashbase.s3.ap-northeast-2.amazonaws.com/lightningdb.release.master.5a6a38.bin
+Downloading lightningdb.release.master.5a6a38.bin
+[==================================================] 100%
+OK, lightningdb.release.master.5a6a38.bin
+Please type host list separated by comma(,) [127.0.0.1]
+
+OK, ['127.0.0.1']
+How many masters would you like to create on each host? [5]
+
+OK, 5
+Please type ports separate with comma(,) and use hyphen(-) for range. [18300-18304]
+
+OK, ['18300-18304']
+How many replicas would you like to create on each master? [0]
+
+OK, 0
+How many ssd would you like to use? [3]
+
+OK, 3
+Type prefix of db path [~/sata_ssd/ssd_]
+
+OK, ~/sata_ssd/ssd_
++--------------+---------------------------------+
+| NAME         | VALUE                           |
++--------------+---------------------------------+
+| installer    | lightningdb.dev.master.5a6a38.bin |
+| hosts        | 127.0.0.1                       |
+| master ports | 18300-18304                     |
+| ssd count    | 3                               |
+| db path      | ~/sata_ssd/ssd_                 |
++--------------+---------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+Check status of hosts...
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+OK
+Checking for cluster exist...
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | CLEAN  |
++-----------+--------+
+OK
+Transfer installer and execute...
+ - 127.0.0.1
+Sync conf...
+Complete to deploy cluster 3.
+Cluster '3' selected.
+
+
    +
  • When the deploy is complete, start and create the cluster.
  • +
+
ec2-user@lightningdb:3> cluster start
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18300|18301|18302|18303|18304 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+ec2-user@lightningdb:3> cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST      | PORT  | TYPE   |
++-----------+-------+--------+
+| 127.0.0.1 | 18300 | MASTER |
+| 127.0.0.1 | 18301 | MASTER |
+| 127.0.0.1 | 18302 | MASTER |
+| 127.0.0.1 | 18303 | MASTER |
+| 127.0.0.1 | 18304 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18300
+ - 127.0.0.1:18303
+ - 127.0.0.1:18304
+ - 127.0.0.1:18301
+ - 127.0.0.1:18302
+Adding slots...
+ - 127.0.0.1:18300, 3280
+ - 127.0.0.1:18303, 3276
+ - 127.0.0.1:18304, 3276
+ - 127.0.0.1:18301, 3276
+ - 127.0.0.1:18302, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+ec2-user@lightningdb:3>
+
+
    +
  • Add slave info
  • +
+

Open the conf file.

+
ec2-user@lightningdb:3> conf cluster
+
+

You can modify redis.properties by entering the command as shown above.

+
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) )
+
+## Slave hosts and ports (optional)
+[[export]] SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" )
+[[export]] SR2_REDIS_SLAVE_PORTS=( $(seq 18600 18609) )
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="~/sata_ssd/ssd_"
+export SR2_REDIS_DB_PATH="~/sata_ssd/ssd_"
+export SR2_FLASH_DB_PATH="~/sata_ssd/ssd_"
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+

Modify SR2_REDIS_SLAVE_HOSTS and SR2_REDIS_SLAVE_PORTS as shown below.

+
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18350 18354) )
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="~/sata_ssd/ssd_"
+export SR2_REDIS_DB_PATH="~/sata_ssd/ssd_"
+export SR2_FLASH_DB_PATH="~/sata_ssd/ssd_"
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+

Save the modification and exit.

+
ec2-user@lightningdb:3> conf cluster
+Check status of hosts...
+OK
+sync conf
+OK
+Complete edit
+
+
    +
  • Execute cluster add-slave command
  • +
+
ec2-user@lightningdb:3> cluster add-slave
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+clean redis conf, node conf, db data of master
+clean redis conf, node conf, db data of slave
+ - 127.0.0.1
+Backup redis slave log in each SLAVE hosts...
+ - 127.0.0.1
+create redis data directory in each SLAVE hosts
+ - 127.0.0.1
+sync conf
+OK
+Starting slave nodes : 127.0.0.1 : 18350|18351|18352|18353|18354 ...
+Wait until all redis process up...
+cur: 10 / total: 10
+Complete all redis process up
+replicate [M] 127.0.0.1 18300 - [S] 127.0.0.1 18350
+replicate [M] 127.0.0.1 18301 - [S] 127.0.0.1 18351
+replicate [M] 127.0.0.1 18302 - [S] 127.0.0.1 18352
+replicate [M] 127.0.0.1 18303 - [S] 127.0.0.1 18353
+replicate [M] 127.0.0.1 18304 - [S] 127.0.0.1 18354
+5 / 5 meet complete.
+
+
    +
  • Check configuration information
  • +
+
ec2-user@lightningdb:3> cli cluster nodes
+0549ec03031213f95121ceff6c9c13800aef848c 127.0.0.1:18303 master - 0 1574132251126 3 connected 3280-6555
+1b09519d37ebb1c09095158b4f1c9f318ddfc747 127.0.0.1:18352 slave a6a8013cf0032f0f36baec3162122b3d993dd2c8 0 1574132251025 6 connected
+c7dc4815e24054104dff61cac6b13256a84ac4ae 127.0.0.1:18353 slave 0549ec03031213f95121ceff6c9c13800aef848c 0 1574132251126 3 connected
+0ab96cb79165ddca7d7134f80aea844bd49ae2e1 127.0.0.1:18351 slave 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 0 1574132250724 4 connected
+7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 127.0.0.1:18301 master - 0 1574132250524 4 connected 9832-13107
+e67005a46984445e559a1408dd0a4b24a8c92259 127.0.0.1:18304 master - 0 1574132251126 5 connected 6556-9831
+a6a8013cf0032f0f36baec3162122b3d993dd2c8 127.0.0.1:18302 master - 0 1574132251126 2 connected 13108-16383
+492cdf4b1dedab5fb94e7129da2a0e05f6c46c4f 127.0.0.1:18350 slave 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 0 1574132251126 6 connected
+f9f7fcee9009f25618e63d2771ee2529f814c131 127.0.0.1:18354 slave e67005a46984445e559a1408dd0a4b24a8c92259 0 1574132250724 5 connected
+83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 127.0.0.1:18300 myself,master - 0 1574132250000 1 connected 0-3279
+
+
+

(2) Scale out

+

You can scale out the current cluster with a new server.

+

Examples

+
    +
  • Check the current distribution of masters/slaves in each server.
  • +
+

+matthew@lightningdb:21> cluster distribution
++-----------------------+--------+-------+
+| HOST                  | MASTER | SLAVE |
++-----------------------+--------+-------+
+| fbg04(192.168.111.41) |  3     |  3    |
+| TOTAL                 |  3     |  3    |
++-----------------------+--------+-------+
+
+
    +
  • Scale out with the new server.
  • +
+
matthew@lightningdb:21> cluster scaleout
+Please type hosts to scaleout separated by comma(,) [127.0.0.1]
+fbg05
+OK, ['fbg05']
+Check status of hosts...
+OK
+Checking cluster exist...
+ - fbg04
+ - fbg05
+OK
++-------+-------+--------+
+| HOST  | PORT  | TYPE   |
++-------+-------+--------+
+| fbg04 | 20100 | MASTER |
+| fbg04 | 20101 | MASTER |
+| fbg04 | 20102 | MASTER |
+| fbg05 | 20100 | MASTER |
+| fbg05 | 20101 | MASTER |
+| fbg05 | 20102 | MASTER |
+| fbg04 | 20150 | SLAVE  |
+| fbg04 | 20151 | SLAVE  |
+| fbg04 | 20152 | SLAVE  |
+| fbg05 | 20150 | SLAVE  |
+| fbg05 | 20151 | SLAVE  |
+| fbg05 | 20152 | SLAVE  |
++-------+-------+--------+
+replicas: 1
+Do you want to proceed with replicate according to the above information? (y/n)
+y
+Backup redis master log in each MASTER hosts...
+ - fbg04
+ - fbg05
+Backup redis slave log in each SLAVE hosts...
+ - fbg04
+ - fbg05
+create redis data directory in each MASTER
+ - fbg04
+ - fbg05
+create redis data directory in each SLAVE
+ - fbg04
+ - fbg05
+sync conf
+OK
+Starting master nodes : fbg04 : 20100|20101|20102 ...
+Starting master nodes : fbg05 : 20100|20101|20102 ...
+Starting slave nodes : fbg04 : 20150|20151|20152 ...
+Starting slave nodes : fbg05 : 20150|20151|20152 ...
+Wait until all redis process up...
+alive redis 12/12
+Complete all redis process up.
+Replicate [M] fbg04:20100 - [S] fbg05:20150
+Replicate [M] fbg04:20101 - [S] fbg05:20151
+Replicate [M] fbg04:20102 - [S] fbg05:20152
+Replicate [M] fbg05:20100 - [S] fbg04:20150
+Replicate [M] fbg05:20101 - [S] fbg04:20151
+Replicate [M] fbg05:20102 - [S] fbg04:20152
+6 / 6 replicate completion.
+M: 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 192.168.111.41 20100 slots:5462-10922 (5461 slots)
+M: 2ee3d14c92321132e12cddb90dde8240ea6b8768 192.168.111.44 20101 slots: (0 slots)
+S: 0516e827969880b2322ae112e70e809b395c6d46 192.168.111.44 20151 slots: (0 slots)
+S: fd1466ec198951cbe7e172ae34bd5b3db66aa309 192.168.111.44 20150 slots: (0 slots)
+S: 28e4d04419c90c7b1bb4b067f9e15d4012d313b1 192.168.111.44 20152 slots: (0 slots)
+S: 56e1d3ab563b23bbf857a8f502d1c4b24ce74a3c 192.168.111.41 20151 slots: (0 slots)
+M: 00d9cea97499097645eecd0bddf0f4679a6f1be1 192.168.111.44 20100 slots: (0 slots)
+S: 9a21e798fc8d69a4b04910b9e4b87a69417d33fe 192.168.111.41 20150 slots: (0 slots)
+M: 6afbfe0ed8d701d269d8b2837253678d3452fb70 192.168.111.41 20102 slots:0-5461 (5462 slots)
+M: 7e2e3de6daebd6e144365d58db19629cfb1b87d1 192.168.111.41 20101 slots:10923-16383 (5461 slots)
+S: 1df738824e9d41622158a4102ba4aab355225747 192.168.111.41 20152 slots: (0 slots)
+M: 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 192.168.111.44 20102 slots: (0 slots)
+>>> Performing Cluster Check (using node 192.168.111.41:20100)
+[OK] All nodes agree about slots configuration.
+>>> Check for open slots...
+>>> Check slots coverage...
+[OK] All 16384 slots covered
+err_perc: 50.009156
+err_perc: 50.018308
+err_perc: 50.009156
+>>> Rebalancing across 6 nodes. Total weight = 6
+2ee3d14c92321132e12cddb90dde8240ea6b8768 balance is -2732
+00d9cea97499097645eecd0bddf0f4679a6f1be1 balance is -2731
+71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 balance is -2731
+47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 balance is 2731
+7e2e3de6daebd6e144365d58db19629cfb1b87d1 balance is 2731
+6afbfe0ed8d701d269d8b2837253678d3452fb70 balance is 2732
+Moving 2732 slots from 6afbfe0ed8d701d269d8b2837253678d3452fb70 to 2ee3d14c92321132e12cddb90dde8240ea6b8768
+############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
+
+Moving 2731 slots from 7e2e3de6daebd6e144365d58db19629cfb1b87d1 to 00d9cea97499097645eecd0bddf0f4679a6f1be1
+###########################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
+
+Moving 2731 slots from 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 to 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891
+###########################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
+
+OK
+
+
    +
  • The result of scale out
  • +
+
matthew@lightningdb:21> cluster distribution
++-----------------------+--------+-------+
+| HOST                  | MASTER | SLAVE |
++-----------------------+--------+-------+
+| fbg04(192.168.111.41) | 3      | 3     |
+| fbg05(192.168.111.44) | 3      | 3     |
+| TOTAL                 | 6      | 6     |
++-----------------------+--------+-------+
+
+
+
+
    +
  1. +

    If user types 'cfc 1', ${SR2_HOME} will be '~/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT'. 

    +
  2. +
  3. +

    'cluster-node-time' can be set with using 'config set' command. Its default time is 1200,000 msec. 

    +
  4. +
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/cli-conf/index.html b/cli-conf/index.html new file mode 100644 index 0000000..bdd8a76 --- /dev/null +++ b/cli-conf/index.html @@ -0,0 +1,1021 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Configuration - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

With conf commands, you can configure out the cluster.

+

You can open the template file with the below options(cluster/master/thriftserver)

+

After saving the template file, the configuration will be synchronized with all nodes in the current cluster.

+

1. cluster

+

conf cluster will open redis.properties file of the current cluster.

+
matthew@lightningdb:21> conf cluster
+Check status of hosts...
+OK
+Sync conf...
+OK
+Complete edit.
+
+
    +
  • Example of redis.properties
  • +
+
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "192.168.111.41" "192.168.111.44" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 20100 20102) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "192.168.111.41"  "192.168.111.44" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 20150 20152) )
+
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+export SR2_REDIS_DATA="/sata_ssd/ssd_02/matthew"
+export SR2_REDIS_DB_PATH="/sata_ssd/ssd_02/matthew"
+export SR2_FLASH_DB_PATH="/sata_ssd/ssd_02/matthew"
+
+

2. master

+

conf master will open redis-master.conf.template file of the current cluster. This file will configure all redis-servers in the current cluster.

+
matthew@lightningdb:21> conf master
+Check status of hosts...
+OK
+Sync conf...
+OK
+Complete edit.
+
+
    +
  • Example of redis-master.conf.template
  • +
+
# In short... if you have slaves attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for slave
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory <bytes>
+# maxmemory should be greater than 51mb in TSR2
+maxmemory 300mb
+
+

3. thrifserver

+

conf thrifserver will open thriftserver.properties file of the current thriftserver.

+
matthew@lightningdb:21> conf thriftserver
+Check status of hosts...
+OK
+Sync conf...
+OK
+Complete edit.
+
+
    +
  • Example of thriftserver.properties
  • +
+
#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14050}
+EXECUTERS=12
+EXECUTER_CORES=32
+
+HIVE_METASTORE_URL=''
+HIVE_HOST=${HIVE_HOST:-localhost}
+HIVE_PORT=${HIVE_PORT:-13000}
+
+COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \
+-o -name 'geospark*' -o -name 'gt-*' | tr '\n' ':')
+
+###############################################################################
+# Driver
+DRIVER_MEMORY=6g
+DRIVER_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Execute
+EXECUTOR_MEMORY=2g
+EXECUTOR_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Thrift Server logs
+EVENT_LOG_ENABLED=false
+EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs
+EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling
+EVENT_LOG_SAVE_MIN=60
+EXTRACTED_EVENT_LOG_SAVE_DAY=5
+SPARK_LOG_SAVE_MIN=2000
+##############
+
+########################
+# Thrift Name
+cluster_id=$(echo $SR2_HOME | awk -F "cluster_" '{print $2}' | awk -F '/' '{print $1}')
+host=$(hostname)
+THRIFT_NAME="ThriftServer_${host}_${cluster_id}"
+########################
+
+###############################################################################
+# AGGREGATION PUSHDOWN
+AGG_PUSHDOWN=true
+###############################################################################
+
+

4. sync

+

With sync {IP address} or sync {hostname} command, you can load the configurations of all clusters from the remote server to localhost.

+
matthew@lightningdb:21> sync fbg04
+Localhost already has the information on the cluster 21. Do you want to overwrite? (y/n) [n]
+y
+Localhost already has the information on the cluster 20. Do you want to overwrite? (y/n) [n]
+n
+Importing cluster complete...
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/cli-thriftserver/index.html b/cli-thriftserver/index.html new file mode 100644 index 0000000..d9a68cd --- /dev/null +++ b/cli-thriftserver/index.html @@ -0,0 +1,968 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thriftserver - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

If you want to see the list of Thrift Server commands, use the the thriftserver command without any option.

+
NAME
+    ltcli thriftserver
+
+SYNOPSIS
+    ltcli thriftserver COMMAND
+
+COMMANDS
+    COMMAND is one of the following:
+
+     beeline
+       Connect to thriftserver command line
+
+     monitor
+       Show thriftserver log
+
+     restart
+       Thriftserver restart
+
+     start
+       Start thriftserver
+
+     stop
+       Stop thriftserver
+
+

1. Thriftserver beeline

+

Connect to the thrift server

+
ec2-user@lightningdb:1> thriftserver beeline
+Connecting...
+Connecting to jdbc:hive2://localhost:13000
+19/11/19 04:45:18 INFO jdbc.Utils: Supplied authorities: localhost:13000
+19/11/19 04:45:18 INFO jdbc.Utils: Resolved authority: localhost:13000
+19/11/19 04:45:18 INFO jdbc.HiveConnection: Will try to open client transport with JDBC Uri: jdbc:hive2://localhost:13000
+Connected to: Spark SQL (version 2.3.1)
+Driver: Hive JDBC (version 1.2.1.spark2)
+Transaction isolation: TRANSACTION_REPEATABLE_READ
+Beeline version 1.2.1.spark2 by Apache Hive
+0: jdbc:hive2://localhost:13000> show tables;
++-----------+------------+--------------+--+
+| database  | tableName  | isTemporary  |
++-----------+------------+--------------+--+
++-----------+------------+--------------+--+
+No rows selected (0.55 seconds)
+
+

Default value of db url to connect is jdbc:hive2://$HIVE_HOST:$HIVE_PORT

+

You can modify $HIVE_HOST and $HIVE_PORT by the command conf thriftserver

+

2. Thriftserver monitor

+

You can view the logs of the thrift server in real-time.

+
ec2-user@lightningdb:1> thriftserver monitor
+Press Ctrl-C for exit.
+19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ip-172-31-39-147.ap-northeast-2.compute.internal:35909 with 912.3 MB RAM, BlockManagerId(4, ip-172-31-39-147.ap-northeast-2.compute.internal, 35909, None)
+19/11/19 04:43:33 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (172.31.39.147:53604) with ID 5
+19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager
+...
+
+

3. Thriftserver restart

+

Restart the thrift server.

+
ec2-user@lightningdb:1> thriftserver restart
+no org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 to stop
+starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out
+
+

4. Start thriftserver

+

Run the thrift server.

+
ec2-user@lightningdb:1> thriftserver start
+starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out
+
+

You can view the logs through the command monitor.

+

5. Stop thriftserver

+

Shut down the thrift server.

+
ec2-user@lightningdb:1> thriftserver stop
+stopping org.apache.spark.sql.hive.thriftserver.HiveThriftServer2
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/cli-version/index.html b/cli-version/index.html new file mode 100644 index 0000000..9763669 --- /dev/null +++ b/cli-version/index.html @@ -0,0 +1,912 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Version - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

1. LTCLI

+

You can check the version of LTCLI tool.

+
$ ltcli --version
+ltcli version 1.1.5
+
+

2. Lightning DB

+

You can check the version of Lightning DB that is deployed in each cluster.

+
$ ltcli
+Cluster '21' selected.
+matthew@lightningdb:21> cluster version
+- build date : 20200820-173819
+- branch: release.flashbase_v1.2.3
+- last commit-id: 45814d
+- output binary: lightningdb.release.release.flashbase_v1.2.3.45814d.bin
+matthew@lightningdb:21>
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/command-line-interface/index.html b/command-line-interface/index.html new file mode 100644 index 0000000..eeced98 --- /dev/null +++ b/command-line-interface/index.html @@ -0,0 +1,1718 @@ + + + + + + + + + + + + + + + + + + + + + + + + Command line interface - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +
+

Note

+

Command Line Interface(CLI) of LightningDB supports not only deploy and start command but also many commands to access and manipulate data in LightningDB.

+
+

1. Cluster Commands

+

If you want to see the list of cluster commands, use the cluster command without any option.

+
ec2-user@lightningdb:1> cluster
+
+NAME
+    ltcli cluster - This is cluster command
+
+SYNOPSIS
+    ltcli cluster COMMAND
+
+DESCRIPTION
+    This is cluster command
+
+COMMANDS
+    COMMAND is one of the following:
+
+     add_slave
+       Add slaves to cluster additionally
+
+     clean
+       Clean cluster
+
+     configure
+
+     create
+       Create cluster
+
+     ls
+       Check cluster list
+
+     rebalance
+       Rebalance
+
+     restart
+       Restart redist cluster
+
+     rowcount
+       Query and show cluster row count
+
+     start
+       Start cluster
+
+     stop
+       Stop cluster
+
+     use
+       Change selected cluster
+
+

(1) Cluster configure

+

redis-{port}.conf is generated with using redis-{master/slave}.conf.template and redis.properties files.

+
> cluster configure
+
+

(2) Cluster start

+
    +
  • Backup logs of the previous master/slave nodes
      +
    • All log files of previous master/slave nodes in ${SR2_HOME}/logs/redis/1 will be moved to ${SR2_HOME}/logs/redis/backup/.
    • +
    +
  • +
  • Generate directories to save data
      +
    • Save aof and rdb files of redis-server and RocksDB files in ${SR2_REDIS_DATA}
    • +
    +
  • +
  • Start redis-server process
      +
    • Start master and slave redis-server with ${SR2_HOME}/conf/redis/redis-{port}.conf file
    • +
    +
  • +
  • Log files will be saved in ${SR2_HOME}/logs/redis/
  • +
+
ec2-user@lightningdb:1> cluster start
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+Generate redis configuration files for master hosts
+sync conf
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+
+

Errors

+
    +
  • ErrorCode 11
  • +
+

Redis-server(master) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process}.

+
$ cluster start
+...
+...
+[ErrorCode 11] Fail to start... Must be checked running MASTER redis processes!
+We estimate that redis process is <alive-redis-count>.
+
+
    +
  • ErrorCode 12
  • +
+

Redis-server(slave) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process}.

+
$ cluster start
+...
+[ErrorCode 12] Fail to start... Must be checked running SLAVE redis processes!
+We estimate that redis process is <alive-redis-count>.
+
+
    +
  • Conf file not exist
  • +
+

Conf file is not found. To resove this error, use cluster configure and then cluster start.

+
$ cluster start
+...
+FileNotExistError: ${SR2_HOME}/conf/redis/redis-{port}.conf
+
+
    +
  • Max try error +​ +For detail information, please check the log files.
  • +
+
$ cluster start
+...
+ClusterRedisError: Fail to start redis: max try exceed
+Recommendation Command: 'monitor'
+
+

(3) Cluster create

+

After checking the information of the cluster, create a cluster of LightningDB.

+

Case 1) When redis-server processes are running, create a cluster only.

+
ec2-user@lightningdb:1>cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST      | PORT  | TYPE   |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18100
+ - 127.0.0.1:18103
+ - 127.0.0.1:18104
+ - 127.0.0.1:18101
+ - 127.0.0.1:18102
+Adding slots...
+ - 127.0.0.1:18100, 3280
+ - 127.0.0.1:18103, 3276
+ - 127.0.0.1:18104, 3276
+ - 127.0.0.1:18101, 3276
+ - 127.0.0.1:18102, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+
+

Case 2) When redis-server processes are not running, create a cluster after launching redis-server processes with cluster start command.

+
ec2-user@lightningdb:4>cluster create
+Check status of hosts...
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+>>> Creating cluster
++-----------+-------+--------+
+| HOST      | PORT  | TYPE   |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18103
+ - 127.0.0.1:18104
+ - 127.0.0.1:18101
+ - 127.0.0.1:18102
+ - 127.0.0.1:18100
+Adding slots...
+ - 127.0.0.1:18103, 3280
+ - 127.0.0.1:18104, 3276
+ - 127.0.0.1:18101, 3276
+ - 127.0.0.1:18102, 3276
+ - 127.0.0.1:18100, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+
+

Errors

+

When redis servers are not running, this error(Errno 111) will occur. To solve this error, use cluster start command previously.

+
ec2-user@lightningdb:1>cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST      | PORT  | TYPE   |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+127.0.0.1:18100 - [Errno 111] Connection refused
+
+

(4) Cluster stop

+

​Gracefully kill all redis-servers(master/slave) with SIGINT +​​

+
ec2-user@lightningdb:1> cluster stop
+Check status of hosts...
+OK
+Stopping master cluster of redis...
+cur: 5 / total: 5
+cur: 0 / total: 5
+Complete all redis process down
+
+

Options

+
    +
  • Force to kill all redis-servers(master/slave) with SIGKILL
  • +
+
--force
+
+

(5) Cluster clean

+

Remove conf files for redis-server and all data(aof, rdb, RocksDB) of LightningDB

+
ec2-user@lightningdb:1> cluster clean
+Removing redis generated master configuration files
+ - 127.0.0.1
+Removing flash db directory, appendonly and dump.rdb files in master
+ - 127.0.0.1
+Removing master node configuration
+ - 127.0.0.1
+
+

(6) Cluster restart​

+

Process cluster stop and then cluster start.​​

+

Options

+
    +
  • Force to kill all redis-servers(master/slave) with SIGKILL and then start again.
  • +
+
--force-stop
+
+
    +
  • Remove all data(aof, rdb, RocksDB, conf files) before starting again.
  • +
+
--reset
+
+
    +
  • Process cluster create. This command should be called with --reset.
  • +
+
--cluster
+
+

(7) Cluster ls

+

List the deployed clusters.

+
ec2-user@lightningdb:2> cluster ls
+[1, 2]
+
+

(8) Cluster use

+

Change the cluster to use LTCLI. Use cluster use or c commands.

+
ec2-user@lightningdb:2> cluster use 1
+Cluster '1' selected.
+ec2-user@lightningdb:1> c 2
+Cluster '2' selected.
+
+

(9) Cluster add_slave

+
+

Warning

+

Before using the add-slave command, ingestion to master nodes should be stopped. After replication and sync between master and slave are completed, ingestion will be available again.

+
+

You can add a slave to a cluster that is configured only with the master without redundancy.

+
    +
  • +

    Create cluster only with masters

    +
      +
    • Procedure for configuring the test environment. If cluster with the only masters already exists, go to the add slave info.
    • +
    +
  • +
  • +

    Proceed with the deploy.

    +
      +
    • Enter 0 in replicas as shown below when deploy.
    • +
    +
  • +
+
ec2-user@lightningdb:2> deploy 3
+Select installer
+
+    [ INSTALLER LIST ]
+    (1) lightningdb.dev.master.5a6a38.bin
+
+Please enter the number, file path or url of the installer you want to use.
+you can also add file in list by copy to '$FBPATH/releases/'
+https://flashbase.s3.ap-northeast-2.amazonaws.com/lightningdb.release.master.5a6a38.bin
+Downloading lightningdb.release.master.5a6a38.bin
+[==================================================] 100%
+OK, lightningdb.release.master.5a6a38.bin
+Please type host list separated by comma(,) [127.0.0.1]
+
+OK, ['127.0.0.1']
+How many masters would you like to create on each host? [5]
+
+OK, 5
+Please type ports separate with comma(,) and use hyphen(-) for range. [18300-18304]
+
+OK, ['18300-18304']
+How many replicas would you like to create on each master? [0]
+
+OK, 0
+How many ssd would you like to use? [3]
+
+OK, 3
+Type prefix of db path [~/sata_ssd/ssd_]
+
+OK, ~/sata_ssd/ssd_
++--------------+---------------------------------+
+| NAME         | VALUE                           |
++--------------+---------------------------------+
+| installer    | lightningdb.dev.master.5a6a38.bin |
+| hosts        | 127.0.0.1                       |
+| master ports | 18300-18304                     |
+| ssd count    | 3                               |
+| db path      | ~/sata_ssd/ssd_                 |
++--------------+---------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+Check status of hosts...
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+OK
+Checking for cluster exist...
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | CLEAN  |
++-----------+--------+
+OK
+Transfer installer and execute...
+ - 127.0.0.1
+Sync conf...
+Complete to deploy cluster 3.
+Cluster '3' selected.
+
+
    +
  • When the deploy is complete, start and create the cluster.
  • +
+
ec2-user@lightningdb:3> cluster start
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18300|18301|18302|18303|18304 ...
+Wait until all redis process up...
+cur: 5 / total: 5
+Complete all redis process up
+ec2-user@lightningdb:3> cluster create
+Check status of hosts...
+OK
+>>> Creating cluster
++-----------+-------+--------+
+| HOST      | PORT  | TYPE   |
++-----------+-------+--------+
+| 127.0.0.1 | 18300 | MASTER |
+| 127.0.0.1 | 18301 | MASTER |
+| 127.0.0.1 | 18302 | MASTER |
+| 127.0.0.1 | 18303 | MASTER |
+| 127.0.0.1 | 18304 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18300
+ - 127.0.0.1:18303
+ - 127.0.0.1:18304
+ - 127.0.0.1:18301
+ - 127.0.0.1:18302
+Adding slots...
+ - 127.0.0.1:18300, 3280
+ - 127.0.0.1:18303, 3276
+ - 127.0.0.1:18304, 3276
+ - 127.0.0.1:18301, 3276
+ - 127.0.0.1:18302, 3276
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+ec2-user@lightningdb:3>
+
+
    +
  • Add slave info
  • +
+

Open the conf file.

+
ec2-user@lightningdb:3> conf cluster
+
+

You can modify redis.properties by entering the command as shown above.

+
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) )
+
+## Slave hosts and ports (optional)
+[[export]] SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" )
+[[export]] SR2_REDIS_SLAVE_PORTS=( $(seq 18600 18609) )
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="~/sata_ssd/ssd_"
+export SR2_REDIS_DB_PATH="~/sata_ssd/ssd_"
+export SR2_FLASH_DB_PATH="~/sata_ssd/ssd_"
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+

Modify SR2_REDIS_SLAVE_HOSTS and SR2_REDIS_SLAVE_PORTS as shown below.

+
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18350 18354) )
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="~/sata_ssd/ssd_"
+export SR2_REDIS_DB_PATH="~/sata_ssd/ssd_"
+export SR2_FLASH_DB_PATH="~/sata_ssd/ssd_"
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+

Save the modification and exit.

+
ec2-user@lightningdb:3> conf cluster
+Check status of hosts...
+OK
+sync conf
+OK
+Complete edit
+
+
    +
  • Execute cluster add-slave command
  • +
+
ec2-user@lightningdb:3> cluster add-slave
+Check status of hosts...
+OK
+Check cluster exist...
+ - 127.0.0.1
+OK
+clean redis conf, node conf, db data of master
+clean redis conf, node conf, db data of slave
+ - 127.0.0.1
+Backup redis slave log in each SLAVE hosts...
+ - 127.0.0.1
+create redis data directory in each SLAVE hosts
+ - 127.0.0.1
+sync conf
+OK
+Starting slave nodes : 127.0.0.1 : 18350|18351|18352|18353|18354 ...
+Wait until all redis process up...
+cur: 10 / total: 10
+Complete all redis process up
+replicate [M] 127.0.0.1 18300 - [S] 127.0.0.1 18350
+replicate [M] 127.0.0.1 18301 - [S] 127.0.0.1 18351
+replicate [M] 127.0.0.1 18302 - [S] 127.0.0.1 18352
+replicate [M] 127.0.0.1 18303 - [S] 127.0.0.1 18353
+replicate [M] 127.0.0.1 18304 - [S] 127.0.0.1 18354
+5 / 5 meet complete.
+
+
    +
  • Check configuration information
  • +
+
ec2-user@lightningdb:3> cli cluster nodes
+0549ec03031213f95121ceff6c9c13800aef848c 127.0.0.1:18303 master - 0 1574132251126 3 connected 3280-6555
+1b09519d37ebb1c09095158b4f1c9f318ddfc747 127.0.0.1:18352 slave a6a8013cf0032f0f36baec3162122b3d993dd2c8 0 1574132251025 6 connected
+c7dc4815e24054104dff61cac6b13256a84ac4ae 127.0.0.1:18353 slave 0549ec03031213f95121ceff6c9c13800aef848c 0 1574132251126 3 connected
+0ab96cb79165ddca7d7134f80aea844bd49ae2e1 127.0.0.1:18351 slave 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 0 1574132250724 4 connected
+7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 127.0.0.1:18301 master - 0 1574132250524 4 connected 9832-13107
+e67005a46984445e559a1408dd0a4b24a8c92259 127.0.0.1:18304 master - 0 1574132251126 5 connected 6556-9831
+a6a8013cf0032f0f36baec3162122b3d993dd2c8 127.0.0.1:18302 master - 0 1574132251126 2 connected 13108-16383
+492cdf4b1dedab5fb94e7129da2a0e05f6c46c4f 127.0.0.1:18350 slave 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 0 1574132251126 6 connected
+f9f7fcee9009f25618e63d2771ee2529f814c131 127.0.0.1:18354 slave e67005a46984445e559a1408dd0a4b24a8c92259 0 1574132250724 5 connected
+83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 127.0.0.1:18300 myself,master - 0 1574132250000 1 connected 0-3279
+
+
+

(10) Cluster rowcount

+

Check the count of records that are stored in the cluster.

+
ec2-user@lightningdb:1> cluster rowcount
+0
+
+

(11) Check the status of cluster

+

With the following commands, you can check the status of the cluster.

+
    +
  • Send PING
  • +
+
ec2-user@lightningdb:1> cli ping --all
+alive redis 10/10
+
+

If a node does not reply, the fail node will be displayed like below.

+
+-------+-----------------+--------+
+| TYPE  | ADDR            | RESULT |
++-------+-----------------+--------+
+| Slave | 127.0.0.1:18352 | FAIL   |
++-------+-----------------+--------+
+alive redis 9/10
+
+
    +
  • Check the status of the cluster
  • +
+
ec2-user@lightningdb:1> cli cluster info
+cluster_state:ok
+cluster_slots_assigned:16384
+cluster_slots_ok:16384
+cluster_slots_pfail:0
+cluster_slots_fail:0
+cluster_known_nodes:5
+cluster_size:5
+cluster_current_epoch:4
+cluster_my_epoch:2
+cluster_stats_messages_ping_sent:12
+cluster_stats_messages_pong_sent:14
+cluster_stats_messages_sent:26
+cluster_stats_messages_ping_received:10
+cluster_stats_messages_pong_received:12
+cluster_stats_messages_meet_received:4
+cluster_stats_messages_received:26
+
+
    +
  • Check the list of the nodes those are organizing the cluster.
  • +
+
ec2-user@lightningdb:1> cli cluster nodes
+559af5e90c3f2c92f19c927c29166c268d938e8f 127.0.0.1:18104 master - 0 1574127926000 4 connected 6556-9831
+174e2a62722273fb83814c2f12e2769086c3d185 127.0.0.1:18101 myself,master - 0 1574127925000 3 connected 9832-13107
+35ab4d3f7f487c5332d7943dbf4b20d5840053ea 127.0.0.1:18100 master - 0 1574127926000 1 connected 0-3279
+f39ed05ace18e97f74c745636ea1d171ac1d456f 127.0.0.1:18103 master - 0 1574127927172 0 connected 3280-6555
+9fd612b86a9ce1b647ba9170b8f4a8bfa5c875fc 127.0.0.1:18102 master - 0 1574127926171 2 connected 13108-16383
+
+

(12) Cluster tree

+

User can check the status of master nodes and slaves and show which master and slave nodes are linked.

+
ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(connected)
+|__ 127.0.0.1:18952(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+

(13) Cluster failover

+

If a master node is killed, its slave node will automatically promote after 'cluster-node-time'2.

+

User can promote the slave node immediately by using the 'cluster failover' command.

+

Step 1) Check the status of the cluster

+

In this case, '127.0.0.1:18902' node is killed.

+
ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(disconnected)   <--- Killed!
+|__ 127.0.0.1:18952(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+

Step 2) Do failover with 'cluster failover' command

+
ec2-user@lightningdb:9> cluster failover
+failover 127.0.0.1:18952 for 127.0.0.1:18902
+OK
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18902(disconnected)   <--- Killed!
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+127.0.0.1:18952(connected)      <--- Promoted to master!
+
+

(14) Cluster failback

+

With 'cluster failback' command, the killed node is restarted and added to the cluster as the slave node.

+
ec2-user@lightningdb:9> cluster failback
+run 127.0.0.1:18902
+ec2-user@lightningdb:9> cluster tree
+127.0.0.1:18900(connected)
+|__ 127.0.0.1:18950(connected)
+
+127.0.0.1:18901(connected)
+|__ 127.0.0.1:18951(connected)
+
+127.0.0.1:18903(connected)
+|__ 127.0.0.1:18953(connected)
+
+127.0.0.1:18904(connected)
+|__ 127.0.0.1:18954(connected)
+
+127.0.0.1:18905(connected)
+|__ 127.0.0.1:18955(connected)
+
+127.0.0.1:18906(connected)
+|__ 127.0.0.1:18956(connected)
+
+127.0.0.1:18952(connected)       <--- Promoted to master!
+|__ 127.0.0.1:18902(connected)   <--- Failbacked. Now this node is slave!
+
+

2. Thrift Server Commands

+

If you want to see the list of Thrift Server commands, use the the thriftserver command without any option.

+
NAME
+    ltcli thriftserver
+
+SYNOPSIS
+    ltcli thriftserver COMMAND
+
+COMMANDS
+    COMMAND is one of the following:
+
+     beeline
+       Connect to thriftserver command line
+
+     monitor
+       Show thriftserver log
+
+     restart
+       Thriftserver restart
+
+     start
+       Start thriftserver
+
+     stop
+       Stop thriftserver
+
+

(1) Thriftserver beeline

+

Connect to the thrift server

+
ec2-user@lightningdb:1> thriftserver beeline
+Connecting...
+Connecting to jdbc:hive2://localhost:13000
+19/11/19 04:45:18 INFO jdbc.Utils: Supplied authorities: localhost:13000
+19/11/19 04:45:18 INFO jdbc.Utils: Resolved authority: localhost:13000
+19/11/19 04:45:18 INFO jdbc.HiveConnection: Will try to open client transport with JDBC Uri: jdbc:hive2://localhost:13000
+Connected to: Spark SQL (version 2.3.1)
+Driver: Hive JDBC (version 1.2.1.spark2)
+Transaction isolation: TRANSACTION_REPEATABLE_READ
+Beeline version 1.2.1.spark2 by Apache Hive
+0: jdbc:hive2://localhost:13000> show tables;
++-----------+------------+--------------+--+
+| database  | tableName  | isTemporary  |
++-----------+------------+--------------+--+
++-----------+------------+--------------+--+
+No rows selected (0.55 seconds)
+
+

Default value of db url to connect is jdbc:hive2://$HIVE_HOST:$HIVE_PORT

+

You can modify $HIVE_HOST and $HIVE_PORT by the command conf thriftserver

+

(2) Thriftserver monitor

+

You can view the logs of the thrift server in real-time.

+
ec2-user@lightningdb:1> thriftserver monitor
+Press Ctrl-C for exit.
+19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ip-172-31-39-147.ap-northeast-2.compute.internal:35909 with 912.3 MB RAM, BlockManagerId(4, ip-172-31-39-147.ap-northeast-2.compute.internal, 35909, None)
+19/11/19 04:43:33 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (172.31.39.147:53604) with ID 5
+19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager
+...
+
+

(3) Thriftserver restart

+

Restart the thrift server.

+
ec2-user@lightningdb:1> thriftserver restart
+no org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 to stop
+starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out
+
+

(4) Start thriftserver

+

Run the thrift server.

+
ec2-user@lightningdb:1> thriftserver start
+starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out
+
+

You can view the logs through the command monitor.

+

(5) Stop thriftserver

+

Shut down the thrift server.

+
ec2-user@lightningdb:1> thriftserver stop
+stopping org.apache.spark.sql.hive.thriftserver.HiveThriftServer2
+
+

(6) Conf thriftserver

+
ec2-user@lightningdb:1> conf thriftserver
+
+#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14050}
+EXECUTERS=12
+EXECUTER_CORES=32
+
+HIVE_METASTORE_URL=''
+HIVE_HOST=${HIVE_HOST:-localhost}
+HIVE_PORT=${HIVE_PORT:-13000}
+
+COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \
+-o -name 'geospark*' -o -name 'gt-*' | tr '\n' ':')
+
+###############################################################################
+# Driver
+DRIVER_MEMORY=6g
+DRIVER_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Execute
+EXECUTOR_MEMORY=2g
+EXECUTOR_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Thrift Server logs
+EVENT_LOG_ENABLED=false
+EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs
+EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling
+EVENT_LOG_SAVE_MIN=60
+EXTRACTED_EVENT_LOG_SAVE_DAY=5
+SPARK_LOG_SAVE_MIN=2000
+##############
+
+
+
+
    +
  1. +

    If user types 'cfc 1', ${SR2_HOME} will be '~/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT'. 

    +
  2. +
  3. +

    'cluster-node-time' can be set with using 'config set' command. Its default time is 1200,000 msec. 

    +
  4. +
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/data-ingestion-and-querying/index.html b/data-ingestion-and-querying/index.html new file mode 100644 index 0000000..d40010e --- /dev/null +++ b/data-ingestion-and-querying/index.html @@ -0,0 +1,975 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Data Ingestion and querying - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

1. Create a table

+

You can create tables in the metastore using standard DDL.

+
CREATE TABLE `pcell` (
+    `event_time` STRING,
+    `m_10_under` DOUBLE,
+    `m_10_19` DOUBLE,
+    `m_20_29` DOUBLE,
+    `m_30_39` DOUBLE,
+    `m_40_49` DOUBLE,
+    `m_50_59` DOUBLE,
+    `m_60_over` DOUBLE,
+    `longitude` DOUBLE,
+    `lattitude` DOUBLE,
+    `geohash` STRING)
+USING r2
+OPTIONS (
+  `table` '100',
+  `host` 'localhost',
+  `port` '18100',
+  `partitions` 'event_time geohash',
+  `mode` 'nvkvs',
+  `at_least_one_partition_enabled` 'no',
+  `rowstore` 'true'
+  )
+
+

There are various options used to describe storage properties.

+
    +
  • +

    table : Positive Integer. The identification of the table. Redis identifies a table with this value.

    +
  • +
  • +

    host/port : The host/port of representative Redis Node. Using this host and port, Spark builds a Redis cluster client that retrieves and inserts data to the Redis cluster.

    +
  • +
  • +

    partitions : The partitions columns. The partition column values are used to distribute data in Redis cluster. That is, the partition column values are concatenated with a colon(:) and used as KEY of Redis which is the criteria distributing data. For more information, you can refer to Keys distribution model page in Redis.

    +
  • +
+
+

Tip

+

Deciding a partition column properly is a crucial factor for performance because it is related to sharding data to multiple Redis nodes. It is important to try to distribute KEYs to 16384 slots of REDIS evenly and to try to map at least 200 rows for each KEY.

+
+
    +
  • +

    mode : 'nvkvs' for this field

    +
  • +
  • +

    at_least_one_partition_enabled : yes or no. If yes, the queries which do not have partition filter are not permitted.

    +
  • +
  • +

    rowstore : true or false. If yes, all columns are merged and stored in RockDB as one column. It enhances ingesting performance. However, the query performance can be dropped because there is overhead for parsing columns in the Redis layer when retrieving data from RockDB.

    +
  • +
+
+

Tip

+

The metastore of LightningDB only contains metadata/schema of tables. +The actual data are stored in Lightning DB which consists of Redis & RockDB (Abbreviation: r2), and the table information is stored in metastore.

+
+

2. Data Ingestion

+

(1) Insert data with DataFrameWriter

+

You can use DataFrameWriter to write data into LightningDB.

+

Now, LightingDB only supports "Append mode".

+
// Create source DataFrame.
+val df = spark.sqlContext.read.format("csv")
+    .option("header", "false")
+    .option("inferSchema", "true")
+    .load("/nvme/data_01/csv/")
+
+// "pcell" is a name of table which has R2 options.
+df.write.insertInto("pcell")
+
+

(2) Insert data with INSERT INTO SELECT query

+
-- pcell : table with R2 option
+-- csv_table : table with csv option
+-- udf : UDF can be used to transform original data.
+INSERT INTO pcell SELECT *, udf(event_time) FROM csv_table
+
+

3. Querying

+

You can query data with SparkSQL interfaces such as DataFrames and Spark ThriftServer. +Please refer to Spark SQL guide page.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/deploy-lightningdb-on-k8s/index.html b/deploy-lightningdb-on-k8s/index.html new file mode 100644 index 0000000..6a9699d --- /dev/null +++ b/deploy-lightningdb-on-k8s/index.html @@ -0,0 +1,1156 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Deploy LightningDB - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Deploy LightningDB and API Server

+

1. Kubernetes manifest github

+
$ git clone https://github.com/mnms/metavision2_k8s_manifests
+
+

2. Install LightningDB v1

+
    +
  • Install ltdb-operator
  • +
+
$ cd ltdb-operator
+$ kubectl create -f ltdb-operator-controller-manager.yaml
+
+
    +
  • deploy LightningDB with CRD
  • +
+
$ cd ltdb
+$ kubectl create -f ltdb.yaml -n {namespace}
+
+
    +
  • 참조
      +
    • size / storageClass / maxMemory 등 통해 클러스터 설정 조정
    • +
    • AOF/RDB 는 디폴트 옵션 on
    • +
    • fs 내 redis/rocksdb mount 위치
        +
      • /tmp-redis_rocksdb_integration_0: redis's aof/rdb, conf
      • +
      • /tmp-redis_rocksdb_integration_1: rocksdb's sst/wal
      • +
      +
    • +
    • rdb 명시적 명령: bash flashbase cli-all bgsave
    • +
    • on-premise 경우, 아래 URL 처럼 system 튜닝이 들어감. k8s 운영 상황에서도 튜닝 여부 확인 필요
        +
      • https://docs.lightningdb.io/get-started-with-scratch/
      • +
      +
    • +
    • 삭제
        +
      • STS 는 PVC 가 자동으로 삭제되지 않으므로 완전히 삭제하려면 해당 PVC 수동 삭제 필요
      • +
      +
    • +
    +
  • +
+
$ kubectl delete -f ltdb.yaml
+or
+$ kubectl delete ltdb ltdb -n metavision
+$ for i in {0..39}; do kubectl delete pvc "ltdb-data-logging-ltdb-$i" -n metavision; done
+$ for i in {0..39}; do kubectl delete pvc "ltdb-data-ltdb-$i" -n metavision; done
+
+

3. Install LightningDB v2 / Thunderquery

+
$ cd ltdbv2
+$ kubectl create -f ltdbv2-all-in-one.yaml
+$ kubectl -n metavision exec -it ltdbv2-0 -- redis-cli --cluster-yes --cluster create `kubectl -n metavision get po -o wide -l app=ltdbv2 | grep ltdbv2 | awk '{print $6":6379"}' | tr '\n' ' '`
+
+
    +
  • 참조
      +
    • Operator 없이 수동 설치
    • +
    • namespace 가 metavision 으로 명시적으로 되어 있음. namespace 를 바꾸고 싶으면 해당 부분 수정
    • +
    • 최신 버전은 ann 을 사용한다 하더라도 maxmemory-policy 를 noeviction 으로 바꿀 필요 없이 eviction rule 정상 작동하면서 사용하면 됨
    • +
    • AOF/RDB 는 디폴트 옵션 on
    • +
    • fs 내 redis/rocksdb mount 위치
        +
      • /tmp/redis: redis's aof/rdb, conf, rocksdb's sst/wal
      • +
      +
    • +
    • rdb 명시적 명령: flashbase cli-all bgrewriteaof
    • +
    • 삭제
        +
      • STS 는 PVC 가 자동으로 삭제되지 않으므로 완전히 삭제하려면 해당 PVC 수동 삭제 필요
      • +
      +
    • +
    +
  • +
+
$ kubectl delete -f ltdbv2-all-in-one.yaml
+$ for i in {0..99}; do kubectl delete pvc "ltdbv2-pvc-ltdbv2-$i" -n metavision; done
+
+

4. Install ltdb-http v1

+
$ cd ltdb-http
+$ ls -alh
+total 32
+drwxr-xr-x   6 1111462  1437349805   192B  8 31 17:53 .
+drwxr-xr-x  11 1111462  1437349805   352B  8 31 17:54 ..
+-rw-r--r--   1 1111462  1437349805   1.3K  8 31 17:53 ltdb-http-configmap.yaml
+-rw-r--r--   1 1111462  1437349805   1.5K  8 31 17:53 ltdb-http.yaml
+-rw-r--r--   1 1111462  1437349805   259B  8 31 17:53 pvc.yaml
+-rw-r--r--   1 1111462  1437349805   342B  8 31 17:53 spark-rbac.yaml
+
+
    +
  • ltdb-http.yaml만 가장 나중에 apply
  • +
+
kubectl -n metavision apply -f ltdb-http-configmap.yaml
+kubectl -n metavision apply -f spark-rbac.yaml
+kubectl -n metavision apply -f pvc.yaml
+
+kubectl -n metavision apply -f ltdb-http.yaml  // 가장 나중에...
+
+

5. Install ltdb-http v2

+
    +
  • 참조: https://www.notion.so/ltdb/LTDB-HTTP-V2-0-K8S-b47ad5741e9a43668c7bee4d40e1616e?pvs=4
  • +
  • 아이스버그 사용 안할 시, ltdb-postgresql.yaml 제외 가능
  • +
  • namespace 가 metavision 으로 명시적으로 되어 있음. namespace 를 바꾸고 싶으면 해당 부분 수정
  • +
  • s3 기능을 사용하고 싶으면, app/s3-secret.yaml 설치 필요 (분당 9층 TB에는 이미 설치 됨)
  • +
  • s3 region 은 기본값으로 ap-northeast-2 설정 됨
  • +
+
$ cd ltdbv2-http
+$ kubectl create -f ltdb-http-configmap.yaml
+$ kubectl create -f ltdb-http.yaml
+$ kubectl create -f ltdbv2-http-vs.yaml
+
+
    +
  • 삭제
  • +
+
$ kubectl delete -f ltdbv2-http-vs.yaml
+$ kubectl delete -f ltdb-http.yaml
+$ kubectl delete -f ltdb-http-configmap.yaml
+
+

6. Install ltdb-http v2 CXL-CMS

+
$ cd hynix
+$ kubectl create -f ltdbv2.yaml
+$ kubectl -n hynix exec -it ltdbv2-0 -- redis-cli --cluster-yes --cluster create `kubectl -n hynix get po -o wide -l app=ltdbv2 | grep ltdbv2 | awk '{print $6":6379"}' | tr '\n' ' '`
+$ kubectl create -f thunderquery.yaml
+$ kubectl create -f ltdbv2-http.yaml
+$ kubectl create -f istio-ingress.yaml
+
+
    +
  • 참조
      +
    • cxl-cms 에서 추가 된 config 값은 아래 같으며, cxl-cms dev 용 CSI 드라이버가 없기 때문에 STS 에서 수동으로 pod 개수 및 Node Affinity 설정 하면서 테스트 해야 함
    • +
    • dax-device-name /dev/xxx, cms-device-name /dev/yyy 형태로 잡아짐
    • +
    +
  • +
+
$ vi ltdbv2.yaml
+...
+cms-enabled no
+dax-device-name no
+cms-device-name no
+
+
    +
  • 삭제
      +
    • STS 는 PVC 가 자동으로 삭제되지 않으므로 완전히 삭제하려면 해당 PVC 수동 삭제 필요
    • +
    +
  • +
+
$ cd hynix
+$ kubectl delete -f ltdbv2-http.yaml
+$ kubectl delete -f thunderquery.yaml
+$ kubectl delete -f ltdbv2.yaml
+for i in {0..9}; do kubectl delete pvc "ltdbv2-pvc-ltdbv2-$i" -n hynix; done
+$ kubectl delete -f istio-ingress.yaml
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/get-started-with-scratch/index.html b/get-started-with-scratch/index.html new file mode 100644 index 0000000..01f0ccf --- /dev/null +++ b/get-started-with-scratch/index.html @@ -0,0 +1,1094 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Manual Installation - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +
+

Note

+

This page guides how to start LightningDB on CentOS manually. In case of using AWS EC2 Instance, please use Installation

+
+

1. Optimizing System Parameters

+

(1) Edit /etc/sysctl.conf like following

+
...
+vm.swappiness = 0
+vm.overcommit_memory = 1
+vm.overcommit_ratio = 50
+fs.file-max = 6815744
+net.ipv4.ip_local_port_range = 32768 65535
+net.core.rmem_default = 262144
+net.core.wmem_default = 262144
+net.core.rmem_max = 16777216
+net.core.wmem_max = 16777216
+net.ipv4.tcp_max_syn_backlog = 4096
+net.core.somaxconn = 65535
+...
+
+
+

Tip

+

In case of application in runtime, use sudo sysctl -p

+
+

(2) Edit /etc/security/limits.conf

+
...
+* soft core     -1
+* soft nofile 262144
+* hard nofile 262144
+* soft nproc 131072
+* hard nproc 131072
+[account name] * soft nofile 262144
+[account name] * hard nofile 262144
+[account name] * soft nproc 131072
+[account name] * hard nproc 131072
+...
+
+
+

Tip

+

In case of application in runtime, use ulimit -n 65535, ulimit -u 131072

+
+

(3) Edit /etc/fstab

+

Remove SWAP Partition (Comment out SWAP partition with using # and reboot)

+
...
+[[/dev/mapper/centos-swap]] swap swap defaults 0 0
+...
+
+
+

Tip

+

In case of application in runtime, use swapoff -a

+
+

(4) /etc/init.d/disable-transparent-hugepages

+
root@fbg01 ~] cat /etc/init.d/disable-transparent-hugepages
+#!/bin/bash
+### BEGIN INIT INFO
+# Provides:          disable-transparent-hugepages
+# Required-Start:    $local_fs
+# Required-Stop:
+# X-Start-Before:    mongod mongodb-mms-automation-agent
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: Disable Linux transparent huge pages
+# Description:       Disable Linux transparent huge pages, to improve
+#                    database performance.
+### END INIT INFO
+
+case $1 in
+start)
+    if [ -d /sys/kernel/mm/transparent_hugepage ]; then
+    thp_path=/sys/kernel/mm/transparent_hugepage
+    elif [ -d /sys/kernel/mm/redhat_transparent_hugepage ]; then
+    thp_path=/sys/kernel/mm/redhat_transparent_hugepage
+    else
+    return 0
+    fi
+
+    echo 'never' > ${thp_path}/enabled
+    echo 'never' > ${thp_path}/defrag
+
+    re='^[0-1]+$'
+    if [[ $(cat ${thp_path}/khugepaged/defrag) =~ $re ]]
+    then
+    # RHEL 7
+    echo 0  > ${thp_path}/khugepaged/defrag
+    else
+    # RHEL 6
+    echo 'no' > ${thp_path}/khugepaged/defrag
+    fi
+
+    unset re
+    unset thp_path
+    ;;
+esac
+[root@fbg01 ~]
+[root@fbg01 ~]
+[root@fbg01 ~] chmod 755 /etc/init.d/disable-transparent-hugepages
+[root@fbg01 ~] chkconfig --add disable-transparent-hugepages
+
+

2. Setup Prerequisites

+

- bash, unzip, ssh

+

- JDK 1.8 or higher

+

- gcc 4.8.5 or higher

+

- glibc 2.17 or higher

+

- epel-release

+
sudo yum install epel-release
+
+

- boost, boost-thread, boost-devel

+
sudo yum install boost boost-thread boost-devel
+
+

- Exchange SSH Key

+

For all servers that LightningDB will be deployed, SSH key should be exchanged.

+
ssh-keygen -t rsa
+chmod 0600 ~/.ssh/authorized_keys
+cat .ssh/id_rsa.pub | ssh {server name} "cat >> .ssh/authorized_keys"
+
+

- Intel MKL library

+

(1) Intel MKL 2019 library install

+
    +
  • Go to the website: https://software.intel.com/en-us/mkl/choose-download/macos
  • +
  • Register and login
  • +
  • Select product named "Intel * Math Kernel Library for Linux" or "Intel * Math Kernel Library for Mac" from the select box "Choose Product to Download"
  • +
  • Choose a Version "2019 Update 2" and download
  • +
  • Unzip the file and execute the install.sh file with root account or (sudo command)
  • +
+
    sudo ./install.sh
+
+
    +
  • Choose custom install and configure the install directory /opt/intel (with sudo, /opt/intel is the default installation path, just confirm it)
  • +
+
matthew@fbg05 /opt/intel $ pwd
+/opt/intel
+
+matthew@fbg05 /opt/intel $ ls -alh
+합계 0
+drwxr-xr-x  10 root root 307  3월 22 01:34 .
+drwxr-xr-x.  5 root root  83  3월 22 01:34 ..
+drwxr-xr-x   6 root root  72  3월 22 01:35 .pset
+drwxr-xr-x   2 root root  53  3월 22 01:34 bin
+lrwxrwxrwx   1 root root  28  3월 22 01:34 compilers_and_libraries -> compilers_and_libraries_2019
+drwxr-xr-x   3 root root  19  3월 22 01:34 compilers_and_libraries_2019
+drwxr-xr-x   4 root root  36  1월 24 23:04 compilers_and_libraries_2019.2.187
+drwxr-xr-x   6 root root  63  1월 24 22:50 conda_channel
+drwxr-xr-x   4 root root  26  1월 24 23:01 documentation_2019
+lrwxrwxrwx   1 root root  33  3월 22 01:34 lib -> compilers_and_libraries/linux/lib
+lrwxrwxrwx   1 root root  33  3월 22 01:34 mkl -> compilers_and_libraries/linux/mkl
+lrwxrwxrwx   1 root root  29  3월 22 01:34 parallel_studio_xe_2019 -> parallel_studio_xe_2019.2.057
+drwxr-xr-x   5 root root 216  3월 22 01:34 parallel_studio_xe_2019.2.057
+drwxr-xr-x   3 root root  16  3월 22 01:34 samples_2019
+lrwxrwxrwx   1 root root  33  3월 22 01:34 tbb -> compilers_and_libraries/linux/tbb
+
+

(2) Intel MKL 2019 library environment settings

+
    +
  • Append the following statement into ~/.bashrc
  • +
+
# INTEL MKL enviroment variables for ($MKLROOT, can be checked with the value export | grep MKL)
+source /opt/intel/mkl/bin/mklvars.sh intel64
+
+

- Apache Hadoop 2.6.0 (or higher)

+

- Apache Spark 2.3 on Hadoop 2.6

+

- ntp +For clock synchronization between servers over packet-switched, variable-latency data networks.

+

- Settings for core dump(Optional)

+

(1) INSTALLING ABRT AND STARTING ITS SERVICES

+

(2) Set core dump file size

+
ulimit -c unlimited
+
+

(3) Change the path of core dump files

+
echo /tmp/core.%p > /proc/sys/kernel/core_pattern
+
+

3. Session configuration files

+

'~/.bashrc'

+

Add followings

+
# .bashrc
+
+if [ -f /etc/bashrc ]; then
+. /etc/bashrc
+fi
+
+# User specific environment and startup programs
+
+PATH=$PATH:$HOME/.local/bin:$HOME/bin
+
+HADOOP_HOME=/home/nvkvs/hadoop
+HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
+YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
+SPARK_HOME=/home/nvkvs/spark
+
+PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$SPARK_HOME/bin:$SPARK_HOME/sbin:$HOME/sbin
+
+export PATH SPARK_HOME HADOOP_HOME HADOOP_CONF_DIR YARN_CONF_DIR
+alias cfc='source ~/.use_cluster'
+
+

4. Install and Start LightningDB

+

With LTCLI provided by LightningDB, users can deploy and use LightningDB.

+

Install LTCLI with the following command.

+
$ pip install ltcli --upgrade --user
+
+

After installation is completed, start LTCLI with Commands

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/how-to-scaleout/index.html b/how-to-scaleout/index.html new file mode 100644 index 0000000..f776eb3 --- /dev/null +++ b/how-to-scaleout/index.html @@ -0,0 +1,1037 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Scaleout with LightningDB - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +
+

Note

+

This document guides how to use 'flashbase' script for scale-out. +If you use LTCLI, you can operate Lightning DB more easily and powerfully. +Therefore, if possible, we recommend LTCLI rather than 'flashbase' script.

+
+

1. Check the distribution of slots

+

You can use 'redis-trib.rb check {master's IP}:{master's Port} | grep slots | grep master' command to check slots assigned to each master. Any master can be used for '{master's IP}:{master's Port}'.

+
$ redis-trib.rb check 192.168.111.201:18800 | grep slots | grep master
+
+   slots:0-818 (819 slots) master
+
+   slots:3277-4095 (819 slots) master
+
+   slots:5734-6553 (820 slots) master
+
+   slots:7373-8191 (819 slots) master
+
+   slots:13926-14745 (820 slots) master
+
+   slots:4096-4914 (819 slots) master
+
+   slots:8192-9010 (819 slots) master
+
+   slots:2458-3276 (819 slots) master
+
+   slots:9011-9829 (819 slots) master
+
+   slots:10650-11468 (819 slots) master
+
+   slots:11469-12287 (819 slots) master
+
+   slots:1638-2457 (820 slots) master
+
+   slots:12288-13106 (819 slots) master
+
+   slots:15565-16383 (819 slots) master
+
+   slots:9830-10649 (820 slots) master
+
+   slots:819-1637 (819 slots) master
+
+   slots:6554-7372 (819 slots) master
+
+   slots:4915-5733 (819 slots) master
+
+   slots:13107-13925 (819 slots) master
+
+   slots:14746-15564 (819 slots) master
+
+

2. Check the distribution of redis-servers

+
$ flashbase check-distribution
+
+check distribution of masters/slaves...
+
+SERVER NAME     | M | S
+
+--------------------------------
+
+192.168.111.201 | 10 | 10
+
+192.168.111.202 | 10 | 10
+
+--------------------------------
+
+Total nodes     | 20 | 20
+
+
+

3. Scale out

+

Open 'redis.properties' with 'flashbase edit' command.

+
$ flashbase edit
+
+

Add a new node("192.168.111.203").

+

As-is

+
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "192.168.111.201" "192.168.111.202" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18800 18809) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "192.168.111.201" "192.168.111.202" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18850 18859) )
+
+

To-be

+
#!/bin/bash
+
+## Master hosts and ports
+
+export SR2_REDIS_MASTER_HOSTS=( "192.168.111.201" "192.168.111.202" "192.168.111.203" )
+export SR2_REDIS_MASTER_PORTS=( $(seq 18800 18809) )
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "192.168.111.201" "192.168.111.202" "192.168.111.203" )
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18850 18859) )
+
+

Scale out the cluster with a 'flashbase scale-out {new node's IP}' command. If you add more than one node, you can use like 'flashbase scale-out 192.168.111.203 192.168.111.204 192.168.111.205'.

+
$ flashbase scale-out 192.168.111.203
+
+

4. Check the new distribution of slots

+
$ redis-trib.rb check 192.168.111.201:18800 | grep master | grep slot
+   slots:273-818 (546 slots) master
+   slots:11742-12287 (546 slots) master
+   slots:0-272,10650-10921,14198-14199 (547 slots) master
+   slots:10922,11469-11741,14746-15018 (547 slots) master
+   slots:6827-7372 (546 slots) master
+   slots:1912-2457 (546 slots) master
+   slots:6008-6553 (546 slots) master
+   slots:7646-8191 (546 slots) master
+   slots:1911,5734-6007,13926-14197 (547 slots) master
+   slots:5188-5733 (546 slots) master
+   slots:13380-13925 (546 slots) master
+   slots:1092-1637 (546 slots) master
+   slots:1638-1910,9830-10103 (547 slots) master
+   slots:3550-4095 (546 slots) master
+   slots:7373-7645,8192-8464 (546 slots) master
+   slots:14200-14745 (546 slots) master
+   slots:2458-2730,4096-4368 (546 slots) master
+   slots:4369-4914 (546 slots) master
+   slots:9284-9829 (546 slots) master
+   slots:12561-13106 (546 slots) master
+   slots:6554-6826,15565-15837 (546 slots) master
+   slots:9011-9283,12288-12560 (546 slots) master
+   slots:4915-5187,13107-13379 (546 slots) master
+   slots:15019-15564 (546 slots) master
+   slots:10923-11468 (546 slots) master
+   slots:819-1091,3277-3549 (546 slots) master
+   slots:8465-9010 (546 slots) master
+   slots:2731-3276 (546 slots) master
+   slots:15838-16383 (546 slots) master
+   slots:10104-10649 (546 slots) master
+
+

5. Check the new distribution of redis-servers

+
$ fb check-distribution
+check distribution of masters/slaves...
+SERVER NAME     |   M   |   S
+--------------------------------
+192.168.111.201 |   10  |   10
+192.168.111.202 |   10  |   10
+192.168.111.203 |   10  |   10
+--------------------------------
+Total nodes     |   30  |   30
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/how-to-use-flashbase/index.html b/how-to-use-flashbase/index.html new file mode 100644 index 0000000..dcd6bcc --- /dev/null +++ b/how-to-use-flashbase/index.html @@ -0,0 +1,1625 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Install with LightningDB - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +
+

Note

+

This document guides how to use 'flashbase' script for installation and operation. +If you use LTCLI, you can deploy and operate Lightning DB more easily and powerfully. +Therefore, if possible, we recommend LTCLI rather than 'flashbase' script.

+
+

1. Deploy

+

You can download the recommended version of Lightning DB in Release Notes

+

Deploy the Lightning DB binary with using deploy-flashbase.sh.

+

Type ./deploy-flashbase.sh {binary path} {cluster list} to deploy.

+
> ./deploy-flashbase.sh ./lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 1 2 // deploy cluster 1 and cluster 2 with lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin
+
+DATEMIN: 20200811113038
+INSTALLER PATH: ./lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin
+INSTALLER NAME: lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin
+======================================================
+DEPLOY CLUSTER 1
+
+CLUSTER_DIR: /Users/myaccount/tsr2/cluster_1
+SR2_HOME: /Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT
+SR2_CONF: /Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT/conf
+BACKUP_DIR: /Users/myaccount/tsr2/cluster_1_bak_20200811113038
+CONF_BACKUP_DIR: /Users/myaccount/tsr2/cluster_1_conf_bak_20200811113038
+======================================================
+backup...
+
+DEPLOY NODE localhost
+lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin                                      100%  126MB 256.8MB/s   00:00
+\e[01;32mInstalling tsr2 as full...\e[00m
+Skip to create \e[01:31m/Users/myaccount/tsr2/cluster_1\e[00m
+\e[01;32mUnarchieving to /Users/myaccount/tsr2/cluster_1...\e[00m
+\e[01;32mMaking required directories...\e[00m
+\e[01;32mProcessing a native library linkage...\e[00m
+\e[01;31mNo ldconfig in $PATH. Fix the problem and try again\e[00m
+building file list ... done
+logback-kaetlyn.xml.template
+logback.xml
+redis-master.conf.template
+redis-slave.conf.template
+redis.conf.sample
+redis.properties
+sentinel.conf.template
+thriftserver.properties
+tsr2-kaetlyn.properties
+redis/
+redis/redis-18500.conf
+redis/redis-18501.conf
+redis/redis-18502.conf
+redis/redis-18503.conf
+redis/redis-18504.conf
+redis/redis-18505.conf
+redis/redis-18506.conf
+redis/redis-18507.conf
+redis/redis-18508.conf
+redis/redis-18509.conf
+redis/redis-18600.conf
+redis/redis-18601.conf
+redis/redis-18602.conf
+redis/redis-18603.conf
+redis/redis-18604.conf
+redis/redis-18605.conf
+redis/redis-18606.conf
+redis/redis-18607.conf
+redis/redis-18608.conf
+redis/redis-18609.conf
+sample-configure/
+sample-configure/etc/
+sample-configure/etc/sysctl.conf.sample
+sample-configure/etc/profile.d/
+sample-configure/etc/profile.d/jdk.sh.sample
+sample-configure/hadoop/
+sample-configure/hadoop/core-site.xml.sample
+sample-configure/hadoop/hdfs-site.xml.sample
+sample-configure/hadoop/mapred-site.xml.sample
+sample-configure/hadoop/slaves.sample
+sample-configure/hadoop/yarn-site.xml.sample
+sample-configure/spark/
+sample-configure/spark/log4j.properties.sample
+sample-configure/spark/metrics.properties.sample
+sample-configure/spark/scheduler-site.xml.sample
+sample-configure/spark/spark-defaults.conf.sample
+
+sent 995838 bytes  received 2532 bytes  1996740.00 bytes/sec
+total size is 1161578  speedup is 1.16
+
+======================================================
+DEPLOY CLUSTER 2
+
+CLUSTER_DIR: /Users/myaccount/tsr2/cluster_2
+SR2_HOME: /Users/myaccount/tsr2/cluster_2/tsr2-assembly-1.0.0-SNAPSHOT
+SR2_CONF: /Users/myaccount/tsr2/cluster_2/tsr2-assembly-1.0.0-SNAPSHOT/conf
+BACKUP_DIR: /Users/myaccount/tsr2/cluster_2_bak_20200811113038
+CONF_BACKUP_DIR: /Users/myaccount/tsr2/cluster_2_conf_bak_20200811113038
+======================================================
+backup...
+
+DEPLOY NODE localhost
+lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin                                      100%  126MB 232.7MB/s   00:00
+\e[01;32mInstalling tsr2 as full...\e[00m
+Skip to create \e[01:31m/Users/myaccount/tsr2/cluster_2\e[00m
+\e[01;32mUnarchieving to /Users/myaccount/tsr2/cluster_2...\e[00m
+\e[01;32mMaking required directories...\e[00m
+\e[01;32mProcessing a native library linkage...\e[00m
+\e[01;31mNo ldconfig in $PATH. Fix the problem and try again\e[00m
+building file list ... done
+logback-kaetlyn.xml.template
+logback.xml
+redis-master.conf.template
+redis-slave.conf.template
+redis.conf.sample
+redis.properties
+sentinel.conf.template
+thriftserver.properties
+tsr2-kaetlyn.properties
+redis/
+redis/redis-18200.conf
+redis/redis-18201.conf
+redis/redis-18202.conf
+redis/redis-18203.conf
+redis/redis-18204.conf
+redis/redis-18205.conf
+redis/redis-18206.conf
+redis/redis-18207.conf
+redis/redis-18208.conf
+redis/redis-18209.conf
+redis/redis-18250.conf
+redis/redis-18251.conf
+redis/redis-18252.conf
+redis/redis-18253.conf
+redis/redis-18254.conf
+redis/redis-18255.conf
+redis/redis-18256.conf
+redis/redis-18257.conf
+redis/redis-18258.conf
+redis/redis-18259.conf
+sample-configure/
+sample-configure/etc/
+sample-configure/etc/sysctl.conf.sample
+sample-configure/etc/profile.d/
+sample-configure/etc/profile.d/jdk.sh.sample
+sample-configure/hadoop/
+sample-configure/hadoop/core-site.xml.sample
+sample-configure/hadoop/hdfs-site.xml.sample
+sample-configure/hadoop/mapred-site.xml.sample
+sample-configure/hadoop/slaves.sample
+sample-configure/hadoop/yarn-site.xml.sample
+sample-configure/spark/
+sample-configure/spark/log4j.properties.sample
+sample-configure/spark/metrics.properties.sample
+sample-configure/spark/scheduler-site.xml.sample
+sample-configure/spark/spark-defaults.conf.sample
+
+sent 992400 bytes  received 2532 bytes  663288.00 bytes/sec
+total size is 1165442  speedup is 1.17
+
+

2. Create and start a cluster

+

If you've deployed Lightning DB successfully, you can create and start the clusters.

+
    +
  • Choose the cluster to use
  • +
+

To choose the cluster, .use_cluster is used.

+
source ~/.use_cluster.sh 1 // 'source ~/.use_cluster.sh {cluster number}
+
+

If you add alias in .bashrc.sh like below, you can change the cluster easily.

+
alias cfc="source ~/.use_cluster"
+
+

and type cfc {cluster number} to use the specified cluster.

+
cfc 1
+
+
    +
  • Configure the cluster for initializing
  • +
+

Open and modify redis.properties file of the cluster by typing 'flashbase edit'.

+
#!/bin/bash
+
+## Master hosts and ports
+export SR2_REDIS_MASTER_HOSTS=( "127.0.0.1" )          // need to configure
+export SR2_REDIS_MASTER_PORTS=( $(seq 18100 18109) )   // need to configure
+
+## Slave hosts and ports (optional)
+export SR2_REDIS_SLAVE_HOSTS=( "127.0.0.1" )            // need to configure in case of replication
+export SR2_REDIS_SLAVE_PORTS=( $(seq 18150 18159) )     // need to configure in case of replication
+
+## only single data directory in redis db and flash db
+## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH'
+[[export]] SR2_REDIS_DATA="/nvdrive0/nvkvs/redis"
+[[export]] SR2_REDIS_DB_PATH="/nvdrive0/nvkvs/redis"
+[[export]] SR2_FLASH_DB_PATH="/nvdrive0/nvkvs/flash"
+
+## multiple data directory in redis db and flash db
+export SSD_COUNT=3     // need to configure
+[[export]] HDD_COUNT=3
+export SR2_REDIS_DATA="/sata_ssd/ssd_"      // need to configure. With this settings, '/sata_ssd/ssd_01', '/sata_ssd/ssd_02' and '/sata_ssd/ssd_03' are used. 
+export SR2_REDIS_DB_PATH="/sata_ssd/ssd_"   // need to configure
+export SR2_FLASH_DB_PATH="/sata_ssd/ssd_"   // need to configure
+
+#######################################################
+# Example : only SSD data directory
+[[export]] SSD_COUNT=3
+[[export]] SR2_REDIS_DATA="/ssd_"
+[[export]] SR2_REDIS_DB_PATH="/ssd_"
+[[export]] SR2_FLASH_DB_PATH="/ssd_"
+#######################################################
+
+
    +
  • Create the cluster
  • +
+

Type flashbase restart --reset --cluster --yes.

+
> flashbase restart --reset --cluster --yes
+\e[01;32mStopping master cluster of redis...\e[00m
+\e[01;33m - Stopping 127.0.0.1[*]...\e[00m
+\e[01;32mStopping slave cluster of redis...\e[00m
+\e[01;33m - Stopping 127.0.0.1[*]...\e[00m
+\e[01;32mRemoving master node configuration in \e[00m
+\e[01;32m - 127.0.0.1\e[00m
+\e[01;32mRemoving slave node configuration in \e[00m
+\e[01;32m - 127.0.0.1\e[00m
+\e[01;32mRemoving redis generated MASTER configuration files...\e[00m
+\e[01;32m - 127.0.0.1 \e[00m
+\e[01;32mRemoving redis generated SLAVE configuration files...\e[00m
+\e[01;32m - 127.0.0.1 \e[00m
+\e[01;33m
+Redis flashdb path is "/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/db/db-#{PORT}-#{DB_NUMBER}".\e[00m
+\e[01;33mRedis dump.rdb path is "/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/dump/dump-#{PORT}.*".\e[00m
+\e[01;33mRedis aof path is "/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/appendonly-#{PORT}.aof".
+\e[00m
+\e[01;32mRemoving flash db directory, appendonly and dump.rdb files in MASTER NODE;\e[00m
+\e[01;32m - 127.0.0.1 \e[00m
+\e[01;32mRemoving flash db directory, appendonly and dump.rdb files in SLAVE NODE;\e[00m
+\e[01;32m - 127.0.0.1 \e[00m
+\e[01;32mGenerate redis configuration files for MASTER hosts\e[00m
+\e[01;32mGenerate redis configuration files for SLAVE hosts\e[00m
+\e[01;32m- Master nodes\e[00m
+\e[01;32m -- Copying to 127.0.0.1...\e[00m
+\e[01;32m- Slave nodes\e[00m
+\e[01;32m -- Copying to 127.0.0.1...\e[00m
+\e[01;32mSuccess to configure redis.\e[00m
+netstat: t4: unknown or uninstrumented protocol
+netstat: t4: unknown or uninstrumented protocol
+\e[01;32mBackup redis master log in each MASTER hosts... \e[00m
+\e[01;33m - 127.0.0.1\e[00m
+\e[01;33m - 127.0.0.1\e[00m
+\e[01;32mStarting master nodes : 127.0.0.1 : \e[00m\e[01;32m[18100, 18101, 18102, 18103, 18104, 18105, 18106, 18107, 18108, 18109]...\e[00m
+\e[01;32mStarting slave nodes : 127.0.0.1 : \e[00m\e[01;32m[18150, 18151, 18152, 18153, 18154, 18155, 18156, 18157, 18158, 18159]...\e[00m
+total_master_num: 10
+total_slave_num: 10
+num_replica: 1
+>>> Creating cluster
+>>> Performing hash slots allocation on 20 nodes...
+Using 10 masters:
+127.0.0.1:18100
+127.0.0.1:18101
+127.0.0.1:18102
+127.0.0.1:18103
+127.0.0.1:18104
+127.0.0.1:18105
+127.0.0.1:18106
+127.0.0.1:18107
+127.0.0.1:18108
+127.0.0.1:18109
+Adding replica 127.0.0.1:18150 to 127.0.0.1:18100
+Adding replica 127.0.0.1:18151 to 127.0.0.1:18101
+Adding replica 127.0.0.1:18152 to 127.0.0.1:18102
+Adding replica 127.0.0.1:18153 to 127.0.0.1:18103
+Adding replica 127.0.0.1:18154 to 127.0.0.1:18104
+Adding replica 127.0.0.1:18155 to 127.0.0.1:18105
+Adding replica 127.0.0.1:18156 to 127.0.0.1:18106
+Adding replica 127.0.0.1:18157 to 127.0.0.1:18107
+Adding replica 127.0.0.1:18158 to 127.0.0.1:18108
+Adding replica 127.0.0.1:18159 to 127.0.0.1:18109
+M: 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100
+   slots:0-1637 (1638 slots) master
+M: c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101
+   slots:1638-3276 (1639 slots) master
+M: ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102
+   slots:3277-4914 (1638 slots) master
+M: 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103
+   slots:4915-6553 (1639 slots) master
+M: 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104
+   slots:6554-8191 (1638 slots) master
+M: d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105
+   slots:8192-9829 (1638 slots) master
+M: 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106
+   slots:9830-11468 (1639 slots) master
+M: d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107
+   slots:11469-13106 (1638 slots) master
+M: ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108
+   slots:13107-14745 (1639 slots) master
+M: b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109
+   slots:14746-16383 (1638 slots) master
+S: 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150
+   replicates 7e72dff98fdda09cf97e02420727fd8b6564b6ae
+S: ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151
+   replicates c3b5e673033758d77680e4534855686649fe5daa
+S: f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152
+   replicates ba39bada8a2e393f76d265ea02d3e078c9406a93
+S: f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153
+   replicates 16da3917eff32cde8942660324c7374117902b01
+S: 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154
+   replicates 5ed447baf1f1c6c454459c24809ffc197809cb6b
+S: 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155
+   replicates d4cdcfdfdfb966a74a1bafce8969f956b5312094
+S: 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156
+   replicates 6f89f0b44f0a515865173984b95fc3f6fe4e7d72
+S: bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157
+   replicates d531628bf7b2afdc095e445d21dedc2549cc4590
+S: 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158
+   replicates ae71f4430fba6a019e4111c3d26e27e225764200
+S: 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159
+   replicates b3734a60336856f8c4ef08efe763ae3ac32bb94a
+Can I set the above configuration? (type 'yes' to accept): >>> Nodes configuration updated
+>>> Assign a different config epoch to each node
+>>> Sending CLUSTER MEET messages to join the cluster
+Waiting for the cluster to join..................................................................................
+>>> Performing Cluster Check (using node 127.0.0.1:18100)
+M: 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100
+   slots:0-1637 (1638 slots) master
+M: c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101
+   slots:1638-3276 (1639 slots) master
+M: ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102
+   slots:3277-4914 (1638 slots) master
+M: 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103
+   slots:4915-6553 (1639 slots) master
+M: 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104
+   slots:6554-8191 (1638 slots) master
+M: d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105
+   slots:8192-9829 (1638 slots) master
+M: 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106
+   slots:9830-11468 (1639 slots) master
+M: d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107
+   slots:11469-13106 (1638 slots) master
+M: ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108
+   slots:13107-14745 (1639 slots) master
+M: b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109
+   slots:14746-16383 (1638 slots) master
+M: 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150
+   slots: (0 slots) master
+   replicates 7e72dff98fdda09cf97e02420727fd8b6564b6ae
+M: ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151
+   slots: (0 slots) master
+   replicates c3b5e673033758d77680e4534855686649fe5daa
+M: f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152
+   slots: (0 slots) master
+   replicates ba39bada8a2e393f76d265ea02d3e078c9406a93
+M: f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153
+   slots: (0 slots) master
+   replicates 16da3917eff32cde8942660324c7374117902b01
+M: 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154
+   slots: (0 slots) master
+   replicates 5ed447baf1f1c6c454459c24809ffc197809cb6b
+M: 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155
+   slots: (0 slots) master
+   replicates d4cdcfdfdfb966a74a1bafce8969f956b5312094
+M: 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156
+   slots: (0 slots) master
+   replicates 6f89f0b44f0a515865173984b95fc3f6fe4e7d72
+M: bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157
+   slots: (0 slots) master
+   replicates d531628bf7b2afdc095e445d21dedc2549cc4590
+M: 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158
+   slots: (0 slots) master
+   replicates ae71f4430fba6a019e4111c3d26e27e225764200
+M: 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159
+   slots: (0 slots) master
+   replicates b3734a60336856f8c4ef08efe763ae3ac32bb94a
+[OK] All nodes agree about slots configuration.
+>>> Check for open slots...
+>>> Check slots coverage...
+[OK] All 16384 slots covered.
+
+

3. Operations

+
    +
  • PING
  • +
+

You can simply check the status of the node with PING command.

+
> flashbase cli -h localhost -p 18101
+localhost:18101> ping
+PONG
+localhost:18101>
+
+

With using flashbase cli-all, you can check the status of all nodes.

+
> flashbase cli-all ping
+redis client for 127.0.0.1:18100
+PONG
+redis client for 127.0.0.1:18101
+PONG
+redis client for 127.0.0.1:18102
+PONG
+redis client for 127.0.0.1:18103
+PONG
+redis client for 127.0.0.1:18104
+PONG
+redis client for 127.0.0.1:18105
+PONG
+redis client for 127.0.0.1:18106
+PONG
+redis client for 127.0.0.1:18107
+PONG
+redis client for 127.0.0.1:18108
+PONG
+redis client for 127.0.0.1:18109
+PONG
+redis client for 127.0.0.1:18150
+PONG
+redis client for 127.0.0.1:18151
+PONG
+redis client for 127.0.0.1:18152
+PONG
+redis client for 127.0.0.1:18153
+PONG
+redis client for 127.0.0.1:18154
+PONG
+redis client for 127.0.0.1:18155
+PONG
+redis client for 127.0.0.1:18156
+PONG
+redis client for 127.0.0.1:18157
+PONG
+redis client for 127.0.0.1:18158
+PONG
+redis client for 127.0.0.1:18159
+PONG
+
+
    +
  • INFO
  • +
+

With INFO command, you can get all information of each node.

+
> flashbase cli -h localhost -p 18101
+localhost:18101> info all
+# Server
+redis_version:3.0.7
+redis_git_sha1:29d44e4d
+redis_git_dirty:0
+redis_build_id:e5a4dd48086abff2
+redis_mode:cluster
+os:Darwin 18.7.0 x86_64
+arch_bits:64
+multiplexing_api:kqueue
+gcc_version:4.2.1
+process_id:42593
+run_id:ea34cce757c61d65e344b6c1094b940c3ab46110
+tcp_port:18101
+uptime_in_seconds:516
+uptime_in_days:0
+hz:10
+lru_clock:3282808
+config_file:/Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT/conf/redis/redis-18101.conf
+
+# Clients
+connected_clients:1
+client_longest_output_list:0
+client_biggest_input_buf:0
+blocked_clients:0
+
+
+# Memory
+isOOM:false
+used_memory:20752816
+used_memory_human:19.79M
+used_memory_rss:23941120
+used_memory_peak:20752816
+used_memory_peak_human:19.79M
+used_memory_lua:36864
+used_memory_rocksdb_total:100663872
+used_memory_rocksdb_block_cache:100663296
+used_memory_rocksdb_mem_table:576
+used_memory_rocksdb_table_readers:0
+used_memory_rocksdb_pinned_block:0
+meta_data_memory:64
+percent_of_meta_data_memory:0
+used_memory_client_buffer_peak:0
+mem_fragmentation_ratio:1.15
+mem_allocator:libc
+
+# Persistence
+loading:0
+rdb_changes_since_last_save:0
+rdb_bgsave_in_progress:0
+rdb_last_save_time:1597117812
+rdb_last_bgsave_status:ok
+rdb_last_bgsave_time_sec:-1
+rdb_current_bgsave_time_sec:-1
+aof_enabled:1
+aof_rewrite_in_progress:0
+aof_rewrite_scheduled:0
+aof_last_rewrite_time_sec:-1
+aof_current_rewrite_time_sec:-1
+aof_last_bgrewrite_status:ok
+aof_last_write_status:ok
+aof_current_size:0
+aof_base_size:0
+aof_pending_rewrite:0
+aof_buffer_length:0
+aof_rewrite_buffer_length:0
+aof_pending_bio_fsync:0
+aof_delayed_fsync:0
+
+# Stats
+total_connections_received:5
+total_commands_processed:513
+instantaneous_ops_per_sec:0
+total_net_input_bytes:33954
+total_net_output_bytes:173640
+instantaneous_input_kbps:0.02
+instantaneous_output_kbps:0.00
+rejected_connections:0
+sync_full:1
+sync_partial_ok:0
+sync_partial_err:0
+expired_keys:0
+evicted_keys:0
+keyspace_hits:0
+keyspace_misses:0
+pubsub_channels:0
+pubsub_patterns:0
+latest_fork_usec:1159
+migrate_cached_sockets:0
+
+# Replication
+role:master
+connected_slaves:1
+slave0:ip=127.0.0.1,port=18151,state=online,offset=589,lag=1
+master_repl_offset:589
+repl_backlog_active:1
+repl_backlog_size:1048576
+repl_backlog_first_byte_offset:2
+repl_backlog_histlen:588
+
+# CPU
+used_cpu_sys:0.42
+used_cpu_user:0.56
+used_cpu_sys_children:0.00
+used_cpu_user_children:0.00
+
+# Commandstats
+cmdstat_ping:calls=4,usec=19,usec_per_call=4.75,usec_std=1.00,usec_max=10
+cmdstat_psync:calls=1,usec=17,usec_per_call=17.00,usec_std=0.00,usec_max=17
+cmdstat_replconf:calls=416,usec=644,usec_per_call=1.55,usec_std=1.00,usec_max=11
+cmdstat_info:calls=2,usec=312,usec_per_call=156.00,usec_std=5.00,usec_max=183
+cmdstat_cluster:calls=90,usec=122372,usec_per_call=1359.69,usec_std=19.00,usec_max=1802
+
+# Cluster
+cluster_enabled:1
+
+# Keyspace
+
+# Tablespace
+
+# Eviction
+evictStat:sleeps=0,fullRowgroup=0,80Rowgroup=0,60Rowgroup=0,40Rowgroup=0,20Rowgroup=0,00Rowgroup=0
+recentEvictStat:recent 200 rowgroups' avg full percent:0
+
+# Storage(Disk Usage)
+DB0_TTL(sec):2592000
+DB0_size(KByte):200
+DB0_numFiles:0
+
+# CompressionRatios
+CVA_compress_algorithm:zstd
+CVA_comp_avg_ratio cannot be calculated because of not enough # of samples
+localhost:18101>
+
+

You can also check the specified information of each node.

+
localhost:18101> info memory
+# Memory
+isOOM:false
+used_memory:20751904
+used_memory_human:19.79M
+used_memory_rss:23949312
+used_memory_peak:20752816
+used_memory_peak_human:19.79M
+used_memory_lua:36864
+used_memory_rocksdb_total:100663872
+used_memory_rocksdb_block_cache:100663296
+used_memory_rocksdb_mem_table:576
+used_memory_rocksdb_table_readers:0
+used_memory_rocksdb_pinned_block:0
+meta_data_memory:64
+percent_of_meta_data_memory:0
+used_memory_client_buffer_peak:0
+mem_fragmentation_ratio:1.15
+mem_allocator:libc
+localhost:18101>
+localhost:18101> info storage
+# Storage(Disk Usage)
+DB0_TTL(sec):2592000
+DB0_size(KByte):200
+DB0_numFiles:0
+localhost:18101>
+
+
    +
  • CLUSTER
  • +
+

You can check the status of the cluster with CLUSTER command.

+
localhost:18101> cluster info
+cluster_state:ok
+cluster_slots_assigned:16384
+cluster_slots_ok:16384
+cluster_slots_pfail:0
+cluster_slots_fail:0
+cluster_known_nodes:20
+cluster_size:10
+cluster_current_epoch:20
+cluster_my_epoch:2
+cluster_stats_messages_ping_sent:665
+cluster_stats_messages_pong_sent:679
+cluster_stats_messages_meet_sent:15
+cluster_stats_messages_sent:1359
+cluster_stats_messages_ping_received:675
+cluster_stats_messages_pong_received:680
+cluster_stats_messages_meet_received:4
+cluster_stats_messages_received:1359
+localhost:18101>
+localhost:18101> cluster nodes
+d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107 master - 0 1597118527011 8 connected 11469-13106
+16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103 master - 0 1597118524000 4 connected 4915-6553
+7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100 master - 0 1597118521882 1 connected 0-1637
+6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158 slave ae71f4430fba6a019e4111c3d26e27e225764200 0 1597118520862 19 connected
+d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105 master - 0 1597118526000 6 connected 8192-9829
+11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159 slave b3734a60336856f8c4ef08efe763ae3ac32bb94a 0 1597118520000 20 connected
+5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104 master - 0 1597118523932 5 connected 6554-8191
+8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154 slave 5ed447baf1f1c6c454459c24809ffc197809cb6b 0 1597118521000 15 connected
+b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109 master - 0 1597118528026 10 connected 14746-16383
+f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152 slave ba39bada8a2e393f76d265ea02d3e078c9406a93 0 1597118524959 13 connected
+128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150 slave 7e72dff98fdda09cf97e02420727fd8b6564b6ae 0 1597118524000 11 connected
+c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101 myself,master - 0 1597118523000 2 connected 1638-3276
+6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106 master - 0 1597118522000 7 connected 9830-11468
+ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102 master - 0 1597118520000 3 connected 3277-4914
+f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153 slave 16da3917eff32cde8942660324c7374117902b01 0 1597118524000 14 connected
+ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108 master - 0 1597118525985 9 connected 13107-14745
+ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151 slave c3b5e673033758d77680e4534855686649fe5daa 0 1597118523000 12 connected
+208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155 slave d4cdcfdfdfb966a74a1bafce8969f956b5312094 0 1597118520000 16 connected
+bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157 slave d531628bf7b2afdc095e445d21dedc2549cc4590 0 1597118513713 18 connected
+3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156 slave 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 0 1597118523000 17 connected
+localhost:18101>
+localhost:18101> cluster slots
+ 1) 1) (integer) 11469
+    2) (integer) 13106
+    3) 1) "127.0.0.1"
+       2) (integer) 18107
+    4) 1) "127.0.0.1"
+       2) (integer) 18157
+ 2) 1) (integer) 4915
+    2) (integer) 6553
+    3) 1) "127.0.0.1"
+       2) (integer) 18103
+    4) 1) "127.0.0.1"
+       2) (integer) 18153
+ 3) 1) (integer) 0
+    2) (integer) 1637
+    3) 1) "127.0.0.1"
+       2) (integer) 18100
+    4) 1) "127.0.0.1"
+       2) (integer) 18150
+ 4) 1) (integer) 8192
+    2) (integer) 9829
+    3) 1) "127.0.0.1"
+       2) (integer) 18105
+    4) 1) "127.0.0.1"
+       2) (integer) 18155
+ 5) 1) (integer) 6554
+    2) (integer) 8191
+    3) 1) "127.0.0.1"
+       2) (integer) 18104
+    4) 1) "127.0.0.1"
+       2) (integer) 18154
+ 6) 1) (integer) 14746
+    2) (integer) 16383
+    3) 1) "127.0.0.1"
+       2) (integer) 18109
+    4) 1) "127.0.0.1"
+       2) (integer) 18159
+ 7) 1) (integer) 1638
+    2) (integer) 3276
+    3) 1) "127.0.0.1"
+       2) (integer) 18101
+    4) 1) "127.0.0.1"
+       2) (integer) 18151
+ 8) 1) (integer) 9830
+    2) (integer) 11468
+    3) 1) "127.0.0.1"
+       2) (integer) 18106
+    4) 1) "127.0.0.1"
+       2) (integer) 18156
+ 9) 1) (integer) 3277
+    2) (integer) 4914
+    3) 1) "127.0.0.1"
+       2) (integer) 18102
+    4) 1) "127.0.0.1"
+       2) (integer) 18152
+10) 1) (integer) 13107
+    2) (integer) 14745
+    3) 1) "127.0.0.1"
+       2) (integer) 18108
+    4) 1) "127.0.0.1"
+       2) (integer) 18158
+localhost:18101>
+
+
    +
  • CONFIG
  • +
+

With CONFIG command, you can set or get the configuration of each feature.

+

1) Get

+
localhost:18101> config get maxmemory
+1) "maxmemory"
+2) "300mb"
+localhost:18101> config set maxmemory 310mb
+OK
+
+

2) Set

+
localhost:18101> config set maxmemory 310mb
+OK
+localhost:18101> config get maxmemory
+1) "maxmemory"
+2) "310mb"
+
+

3) Rewrite

+

With config set command, you can change the configuration only in memory.

+

To save the modification on disk, use config rewrite after setting.

+
localhost:18101> config rewrite
+OK
+localhost:18101>
+
+

4) DIR

+

With DIR command, you can check the path of directory that each node uses to save .rdb, .aof, db and *.conf files.

+
localhost:18101> config get dir
+1) "dir"
+2) "/sata_ssd/ssd_03/nvkvs/myaccount"
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/how-to-use-lightningdb-on-k8s/index.html b/how-to-use-lightningdb-on-k8s/index.html new file mode 100644 index 0000000..f68e049 --- /dev/null +++ b/how-to-use-lightningdb-on-k8s/index.html @@ -0,0 +1,1132 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + How to use LightningDB - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

How to use LightningDB on Kubernetes

+

1. ltdb-http v2 - thrift beeline

+
kubectl -n metavision exec -it pod/ltdbv2-beeline-857f578cd9-d7kc4 -- beeline.sh
+
+0: jdbc:hive2://ltdbv2-http-svc:13000> select * from files limit 3;
+
+
+
    +
  • Create table(Do not use ANN)
  • +
+
CREATE TABLE IF NOT EXISTS ltdb.metavision.img_feats_noann(
+id BIGINT,
+is_s3 BOOLEAN,
+bucket STRING,
+obj_key STRING,
+features ARRAY<FLOAT>,
+meta STRING
+) USING lightning
+LOCATION '127.0.0.1:18500'
+TBLPROPERTIES ('partition.size'=2, 'partition.0'='bucket','partition.1'='id');
+
+
    +
  • Create table for ANN
  • +
+
CREATE TABLE IF NOT EXISTS ltdb.metavision.img_feats(
+id BIGINT,
+is_s3 BOOLEAN,
+bucket STRING,
+obj_key STRING,
+features ARRAY<FLOAT>,
+meta STRING
+) USING lightning
+LOCATION 'ltdbv2:6379'
+TBLPROPERTIES ('partition.size'=2, 'partition.0'='bucket','partition.1'='id', 'feature_idx'='4', 'ann_type'='1', 'feature_dim'='1024', 'ef_construction'='500', 'ann_max_elem'='10000', 'ann_m'='20');
+
+
    +
  • Ingest ANN data (from parquet)
      +
    • parquet 을 받아줄 임시 테이블 생성
    • +
    +
  • +
+
CREATE TABLE IF NOT EXISTS ltdb.parquet.temptable(
+id BIGINT,
+is_s3 BOOLEAN,
+bucket STRING,
+obj_key STRING,
+features ARRAY<FLOAT>,
+meta STRING
+) USING parquet LOCATION 's3a://upload-data/real/vision-ai-private-data_6.csv.ViT-H-14.laion2b_s32b_b79k.975.parquet';
+
+
    +
  • Insert data
  • +
+
INSERT INTO ltdb.metavision.img_feats
+SELECT
+(CAST(RANDOM() * 1000000 AS INTEGER) % 400) AS id,
+is_s3,
+CONCAT('metavision-', bucket) AS bucket,
+obj_key,
+features,
+meta
+FROM
+ltdb.parquet.temptable
+LIMIT 100;
+
+
    +
  • Query data
  • +
+
SELECT * FROM ltdb.metavision.img_feats;
+SELECT count(obj_key) FROM ltdb.metavision.img_feats;
+
+
    +
  • Describe table
  • +
+
DESCRIBE formatted ltdb.metavision.img_feats;
+
+
    +
  • Drop table
  • +
+
DROP TABLE IF EXISTS ltdb.parquet.temptable;
+DROP TABLE IF EXISTS ltdb.metavision.img_feats;
+
+

2. Thunderquery CLI tool

+
kubectl -n metavision exec -it thunderquery-68544ff5f7-9shjv -- thunderquery-cli ltdbv2-0.ltdbv2
+
+
    +
  • ANN command
  • +
+
select bucket, obj_key, ann(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;
+
+
    +
  • KNN command
  • +
+
select bucket, obj_key, euclideandistance(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as knn_result from ltdb.metavision.img_feats limit 2;
+
+

3. REST API

+
    +
  • Create table
  • +
+
$ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table \
+--header "Content-Type: text/plain" \
+--data "{
+'table': 'ltdb.metavision.img_feats',
+'schema': [{'name': 'id', 'typ': 'BIGINT'},
+{'name': 'is_s3', 'typ': 'BOOLEAN'},
+{'name': 'bucket', 'typ': 'STRING'},
+{'name': 'obj_key', 'typ': 'STRING'},
+{'name': 'features', 'typ': 'ARRAY<FLOAT>'},
+{'name': 'meta', 'typ': 'STRING'}],
+'loc': 'ltdbv2:6379',
+'props': [{'key': 'partition.size', 'val': '2'},
+{'key': 'partition.0', 'val': 'bucket'},
+{'key': 'partition.1', 'val': 'id'},
+{'key': 'feature_idx', 'val': '4'},
+{'key': 'ann_type', 'val': '1'},
+{'key': 'feature_dim', 'val': '1024'},
+{'key': 'ef_construction', 'val': '500'},
+{'key': 'ann_max_elem', 'val': '10000'},
+{'key': 'ann_m', 'val': '20'}]
+}"
+
+
    +
  • Ingest ANN data( from parquet)
  • +
+
$ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/ingest/data \
+--header "Content-Type: text/plain" \
+--data "{
+'src_format': 'parquet',
+'src_loc': 's3a://upload-data/real/vision-ai-private-data_6.csv.ViT-H-14.laion2b_s32b_b79k.975.parquet',
+'dest_table': 'ltdb.metavision.img_feats',
+'limit': 100,
+'src_cols_with_random': [{'name': 'id', 'range': 400}],
+'src_cols_to_modify': [{'name': 'bucket', 'prefix': 'metavision-'}]
+}"
+
+
    +
  • Query data
  • +
+
$ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/query \
+--header "Content-Type: text/plain" \
+--data "SELECT count(obj_key) FROM ltdb.metavision.img_feats"
+
+
    +
  • Describe table
  • +
+
$ curl --location --request GET http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table/ltdb.metavision.img_feats 
+
+
    +
  • Drop table
  • +
+
$ curl --location --request DELETE http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table/ltdb.metavision.img_feats 
+
+
    +
  • ANN command
  • +
+
$ curl -d 'select bucket, obj_key, ann(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;' http://metavision.k8s.lightningdb/thunderquery/sql 
+
+
    +
  • KNN command
  • +
+
$ curl -d 'select bucket, obj_key, euclideandistance(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;' http://metavision.k8s.lightningdb/thunderquery/sql 
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/images/LightningDB_archi.png b/images/LightningDB_archi.png new file mode 100644 index 0000000..18038ff Binary files /dev/null and b/images/LightningDB_archi.png differ diff --git a/images/aws_marketplace.png b/images/aws_marketplace.png new file mode 100644 index 0000000..118ba1e Binary files /dev/null and b/images/aws_marketplace.png differ diff --git a/images/curr-build-env.png b/images/curr-build-env.png new file mode 100644 index 0000000..a84047d Binary files /dev/null and b/images/curr-build-env.png differ diff --git a/images/flashbase-architecture1.png b/images/flashbase-architecture1.png new file mode 100644 index 0000000..4b8ab7a Binary files /dev/null and b/images/flashbase-architecture1.png differ diff --git a/images/flashbase-architecture2.png b/images/flashbase-architecture2.png new file mode 100644 index 0000000..5ecba0b Binary files /dev/null and b/images/flashbase-architecture2.png differ diff --git a/images/import_notebook.gif b/images/import_notebook.gif new file mode 100644 index 0000000..f8b5b2a Binary files /dev/null and b/images/import_notebook.gif differ diff --git a/images/lightningdb_architecture.png b/images/lightningdb_architecture.png new file mode 100644 index 0000000..d5d4d13 Binary files /dev/null and b/images/lightningdb_architecture.png differ diff --git a/images/ltdb-deployment-env.png b/images/ltdb-deployment-env.png new file mode 100644 index 0000000..62b9b05 Binary files /dev/null and b/images/ltdb-deployment-env.png differ diff --git a/images/shell-timeout.png b/images/shell-timeout.png new file mode 100644 index 0000000..96385ba Binary files /dev/null and b/images/shell-timeout.png differ diff --git a/images/spark-interpreter.png b/images/spark-interpreter.png new file mode 100644 index 0000000..c4d507b Binary files /dev/null and b/images/spark-interpreter.png differ diff --git a/images/tutorial-aggr-pushdown.png b/images/tutorial-aggr-pushdown.png new file mode 100644 index 0000000..e33e332 Binary files /dev/null and b/images/tutorial-aggr-pushdown.png differ diff --git a/images/tutorial-architecture.png b/images/tutorial-architecture.png new file mode 100644 index 0000000..445e8c1 Binary files /dev/null and b/images/tutorial-architecture.png differ diff --git a/images/tutorial-failover.png b/images/tutorial-failover.png new file mode 100644 index 0000000..f538244 Binary files /dev/null and b/images/tutorial-failover.png differ diff --git a/images/tutorial-features.png b/images/tutorial-features.png new file mode 100644 index 0000000..5ae8a26 Binary files /dev/null and b/images/tutorial-features.png differ diff --git a/images/tutorial-minmax_AggregateMultiBinary.png b/images/tutorial-minmax_AggregateMultiBinary.png new file mode 100644 index 0000000..a8624e2 Binary files /dev/null and b/images/tutorial-minmax_AggregateMultiBinary.png differ diff --git a/images/tutorial-minmax_PrunedFilteredScan.png b/images/tutorial-minmax_PrunedFilteredScan.png new file mode 100644 index 0000000..27ab91a Binary files /dev/null and b/images/tutorial-minmax_PrunedFilteredScan.png differ diff --git a/images/tutorial-partitioning.png b/images/tutorial-partitioning.png new file mode 100644 index 0000000..4f452b6 Binary files /dev/null and b/images/tutorial-partitioning.png differ diff --git a/images/tutorial-query-accel.png b/images/tutorial-query-accel.png new file mode 100644 index 0000000..60afc73 Binary files /dev/null and b/images/tutorial-query-accel.png differ diff --git a/images/tutorial-query-accel2.png b/images/tutorial-query-accel2.png new file mode 100644 index 0000000..e124255 Binary files /dev/null and b/images/tutorial-query-accel2.png differ diff --git a/images/tutorial-replication.png b/images/tutorial-replication.png new file mode 100644 index 0000000..1f184b4 Binary files /dev/null and b/images/tutorial-replication.png differ diff --git a/index.html b/index.html new file mode 100644 index 0000000..f1d37b9 --- /dev/null +++ b/index.html @@ -0,0 +1,920 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Overview - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

1. LightningDB is

+

A distributed in-memory DBMS for real-time big data analytics

+
    +
  • Realtime ingestion and analytics for large scale data
  • +
  • Advantages in random small data access based on DRAM/SSD resident KV Store
  • +
  • Optimized for time series data and geospatial data
  • +
+

2. Architecture

+

Spark with Redis/Rocksdb key-value stores

+
    +
  • No I/O bottleneck due to redis in DRAM and RocksDB in SSDs due to the small-sized key/value I/O and DRAM/SSDs’ short latency (~200us)
  • +
  • Filter predicates push down to redis and only associated partitions are chosen to be scanned
  • +
+

flashbase-architecture2

+

3. Features

+
    +
  • Ingestion performance (500,000 records/sec/node)
  • +
  • Extreme partitioning (up-to 2 billion partitions for a single node)
  • +
  • Real-time query performance by using fine-grained partitions and filter acceleration (vector processing by exploiting XEON SIMD instructions)
  • +
  • Column-store / row-store support
  • +
  • DRAM - SSD - HDD Tiering
  • +
  • High compression ratio and compression speed (Gzip level compression ratio w/ LZ4 level speed)
  • +
  • Low Write Amplification for SSD lifetime
  • +
  • Asynchronous replication with low latency and high performance
  • +
  • Node-based scale-out(Adding new nodes and scale out without data rebalancing )
  • +
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/install-ltcli/index.html b/install-ltcli/index.html new file mode 100644 index 0000000..e9deeed --- /dev/null +++ b/install-ltcli/index.html @@ -0,0 +1,1207 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Installation - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

1. How to run LTCLI

+

If you try to use LTCLI for the first time after the EC2 instance was created, please update LTCLI like below.

+
pip install ltcli --upgrade --user
+
+

(1) Run

+

To run LTCLI, ${FBPATH} should be set. If not, the following error messages will be shown.

+
To start using LTCLI, you should set env FBPATH
+ex)
+export FBPATH=$HOME/.flashbase
+
+
+

Tip

+

In the case of EC2 Instance, this path is set automatically.

+
+

Run LTCLI by typing 'ltcli'

+
$ ltcli
+
+

When LTCLI starts for the first time, you need to confirm 'base_directory'.

+

[~/tsr2]1 is default value.

+
Type base directory of LightningDB [~/tsr2]
+~/tsr2
+OK, ~/tsr2
+
+

In '${FBPATH}/.flashbase/config', you can modify 'base_directory'.

+

If you logs in LTCLI normally, LTCLI starts on the last visited cluster. +In the case of the first login, '-' is shown instead of cluster number.

+
root@lightningdb:->
+
+...
+...
+
+root@lightningdb:1>
+
+
+

Tip

+

In this page, '$' means that you are in Centos and '>' means that you are in LTCLI.

+
+

(2) Log messages

+

Log messages of LTCLI will be saved in '$FBPATH/logs/fb-roate.log'.

+

Its max-file-size is 1GiB and rolling update will be done in case of exceeding of size limit.

+

2. Deploy LightningDB

+

Deploy is the procedure that LightningDB is installed with the specified cluster number.

+

You could make LightningDB cluster with the following command.

+
> deploy 1
+
+

After deploy command, you should type the following information that provides its last used value.

+
    +
  • Installer
  • +
  • Host
  • +
  • Number of masters
  • +
  • Replicas
  • +
  • Number of SSD(disk)
  • +
  • The prefix of DB path (used for 'redis data', 'redis DB path' and 'flash DB path')
  • +
+

Use the below option not to save the last used value.

+
> deploy --history-save=False
+
+

(1) Select installer

+
Select installer
+
+    [ INSTALLER LIST ]
+    (1) [DOWNLOAD] lightningdb.release.master.5a6a38.bin
+    (2) [LOCAL] lightningdb.release.master.dbcb9e.bin
+    (3) [LOCAL] lightningdb.trial.master.dbcb9e-dirty.bin
+    (4) [LOCAL] lightningdb.trial.master.dbcb9e.bin
+
+Please enter the number, file path or url of the installer you want to use.
+you can also add file in list by copy to '$FBPATH/releases/'
+1
+OK, lightningdb.release.master.5a6a38.bin
+
+
+

Tip

+

LOCAL means installer file under path '$FBPATH/releases/' on your local. +DOWNLOAD refers to a file that can be downloaded and up to 5 files are displayed in the latest order. To confirm the recommended Lightning DB version, use Release Notes

+
+

Select a number to use that file. Type DOWNLOAD will be used after downloading. The downloaded file is saved in path '$FBPATH/releases'.

+
Select installer
+
+    [ INSTALLER LIST ]
+    (empty)
+
+Please enter file path or url of the installer you want to use
+you can also add file in list by copy to '$FBPATH/releases/'
+https://flashbase.s3.ap-northeast-2.amazonaws.com/latest/lightningdb.release.master.5a6a38.bin
+Downloading lightningdb.release.master.5a6a38.bin
+[==================================================] 100%
+OK, lightningdb.release.master.5a6a38.bin
+
+

If the installer list is empty like above, you can also use file path or URL. If you enter URL, download the file and use it. The downloaded file is saved in path '$FBPATH/releases'.

+

(2) Type Hosts

+

IP address or hostname can be used. In the case of several hosts, the list can be separated by comma(',').

+
Please type host list separated by comma(,) [127.0.0.1]
+
+OK, ['127.0.0.1']
+
+

(3) Type Masters

+
How many masters would you like to create on each host? [10]
+
+OK, 10
+Please type ports separate with a comma(,) and use a hyphen(-) for range. [18100-18109]
+
+OK, ['18100-18109']
+
+

Define how many master processes will be created in the cluster per server.

+
+

Tip

+

To create a cluster, 3 master processes should be included at least.

+
+

(4) Type information of slave

+
How many replicas would you like to create on each master? [0]
+
+OK, 0
+
+

Define how many slave processes will be created for a master process.

+

(5) Type the count of SSD(disk) and the path of DB files

+
How many ssd would you like to use? [4]
+
+OK, 4
+Type prefix of db path [/nvme/data_]
+
+OK, /nvme/data_
+
+

With this setting, LightningDB will use 4 disk paths('/nvme/data_01', '/nvme/data_02', '/nvme/data_03', '/nvme/data_04').

+
+

Tip

+

In order to use this setting, the 'nvkvs' directories must be generated under all disk path and the permission setting(chmod/chown) for the directory must be configured as follows.

+
+
$ pwd
+/nvme/data_01
+$ mkdir nvkvs
+$chown ltdb nvkvs   // The current user is 'ltdb'
+$chmod 755 nvkvs
+$ ls -alh
+drwxr-xr-x 10 ltdb  ltdb  4.0K  4월 27 14:34 .
+drwxr-xr-x 33 ltdb  ltdb  4.0K  2월  4 10:19 ..
+drwxrwxr-x  3 ltdb  ltdb  4.0K  6월  5 18:36 nvkvs  // The current user is 'ltdb'
+...
+
+

(6) Check all settings finally

+

Finally, all settings will be shown and confirmation will be requested like below.

+
+--------------+---------------------------------------+
+| NAME         | VALUE                                 |
++--------------+---------------------------------------+
+| installer    | lightningdb.release.master.5a6a38.bin |
+| hosts        | 127.0.0.1                             |
+| master ports | 18100-18109                           |
+| ssd count    | 4                                     |
+| db path      | /nvme/data_                           |
++--------------+---------------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+
+

(7) Deploy cluster

+

After deploying is completed, the following messages are shown and LTCLI of the cluster is activated.

+
Check status of hosts...
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+OK
+Checking for cluster exist...
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | CLEAN  |
++-----------+--------+
+OK
+Transfer installer and execute...
+ - 127.0.0.1
+Sync conf...
+Complete to deploy cluster 1.
+Cluster '1' selected.
+
+

When an error occurs during deploying, error messages will be shown like below.

+

(8) Errors

+

Host connection error

+
Check status of hosts...
++-------+------------------+
+| HOST  | STATUS           |
++-------+------------------+
+| nodeA | OK               |
+| nodeB | SSH ERROR        |
+| nodeC | UNKNOWN HOST     |
+| nodeD | CONNECTION ERROR |
++-------+------------------+
+There are unavailable host.
+
+
    +
  • SSH ERROR
      +
    • SSH access error. Please check SSH KEY exchange or the status of SSH client/server.
    • +
    +
  • +
  • UNKNOWN HOST
      +
    • Can not get IP address with the hostname. Please check if the hostname is right.
    • +
    +
  • +
  • CONNECTION ERROR
      +
    • Please check the status of the host(server) or outbound/inbound of the server.
    • +
    +
  • +
+

Cluster already exist

+
Checking for cluster exist...
++-------+---------------+
+| HOST  | STATUS        |
++-------+---------------+
+| nodeA | CLEAN         |
+| nodeB | CLEAN         |
+| nodeC | CLUSTER EXIST |
+| nodeD | CLUSTER EXIST |
++-------+---------------+
+Cluster information exist on some hosts.
+
+
    +
  • CLUSTER EXIST
      +
    • LightningDB is already deployed in the cluster of the host.
    • +
    +
  • +
+

Not include localhost

+
 Check status of hosts...
+  +-------+------------------+
+  | HOST  | STATUS           |
+  +-------+------------------+
+  | nodeB | OK               |
+  | nodeC | OK               |
+  | nodeD | OK               |
+  +-------+------------------+
+  Must include localhost.
+
+

If the localhost(127.0.0.1) is not included in host information, this error occurs. Please add the localhost in the host list in this case.

+

3. Start LightningDB

+

Create a cluster of LightningDB using 'cluster create' command.

+
ec2-user@lightningdb:1> cluster create
+Check status of hosts...
+OK
+Backup redis master log in each MASTER hosts...
+ - 127.0.0.1
+create redis data directory in each MASTER hosts
+ - 127.0.0.1
+sync conf
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| 127.0.0.1 | OK     |
++-----------+--------+
+OK
+Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104|18105|18106|18107|18108|18109 ...
+Wait until all redis process up...
+cur: 10 / total: 10
+Complete all redis process up
+>>> Creating cluster
++-----------+-------+--------+
+| HOST      | PORT  | TYPE   |
++-----------+-------+--------+
+| 127.0.0.1 | 18100 | MASTER |
+| 127.0.0.1 | 18101 | MASTER |
+| 127.0.0.1 | 18102 | MASTER |
+| 127.0.0.1 | 18103 | MASTER |
+| 127.0.0.1 | 18104 | MASTER |
+| 127.0.0.1 | 18105 | MASTER |
+| 127.0.0.1 | 18106 | MASTER |
+| 127.0.0.1 | 18107 | MASTER |
+| 127.0.0.1 | 18108 | MASTER |
+| 127.0.0.1 | 18109 | MASTER |
++-----------+-------+--------+
+replicas: 0
+
+Do you want to proceed with the create according to the above information? (y/n)
+y
+Cluster meet...
+ - 127.0.0.1:18107
+ - 127.0.0.1:18106
+ - 127.0.0.1:18101
+ - 127.0.0.1:18100
+ - 127.0.0.1:18103
+ - 127.0.0.1:18109
+ - 127.0.0.1:18102
+ - 127.0.0.1:18108
+ - 127.0.0.1:18105
+ - 127.0.0.1:18104
+Adding slots...
+ - 127.0.0.1:18107, 1642
+ - 127.0.0.1:18106, 1638
+ - 127.0.0.1:18101, 1638
+ - 127.0.0.1:18100, 1638
+ - 127.0.0.1:18103, 1638
+ - 127.0.0.1:18109, 1638
+ - 127.0.0.1:18102, 1638
+ - 127.0.0.1:18108, 1638
+ - 127.0.0.1:18105, 1638
+ - 127.0.0.1:18104, 1638
+Check cluster state and asign slot...
+Ok
+create cluster complete.
+ec2-user@lightningdb:1> cli ping --all
+alive redis 10/10
+
+ec2-user@lightningdb:1>
+
+

From now, you can try ingestion and query in LightningDB with Zeppelin. And for further information about commands of LTCLI, please use Command Line.

+
+
+
    +
  1. +

    If you type 'enter' without any text, the default value is applied. In some cases, the default value will not be provided. 

    +
  2. +
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/kaetlyn/index.html b/kaetlyn/index.html new file mode 100644 index 0000000..e496cc6 --- /dev/null +++ b/kaetlyn/index.html @@ -0,0 +1,1264 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Data ingestion with KAFKA - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

1. Kafka broker

+

1. Kafka Cluster

+

(1) Install kafka

+
    +
  • Install kafka in each server in which kafka cluster is utilized.
  • +
  • Add $KAFKA_HOME path into ~/.bash_profile.
  • +
+

(2) Install zookeeper

+
    +
  • In each server of kafka cluster, set $dataDir, $server.1 ~ $server.n properties in $KAFKA_HOME/config/zookeeper.properties.
  • +
  • For example, if you try to configure kafka cluster with my-server1, my-server2, set server.1, server.2 fields.
  • +
+
dataDir=/hdd_01/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=0
+
+initLimit=5
+syncLimit=2
+
+# Zookeeper will use these ports (2891, etc.) to connect the individual follower nodes to the leader nodes.
+# The other ports (3881, etc.) are used for leader election in the ensemble.
+server.1=my-server1:2888:3888
+server.2=my-server2:2888:3888
+
+
    +
  • In each server, set ${dataDir}/myid with its own id.
  • +
  • For example, use echo "1" > ${dataDir}/myid in my-server1 and echo "2" > ${dataDir}/myid in my-server2.
  • +
  • start zookeeper in each server.
  • +
+
    > $KAFKA_HOME/bin/zookeeper-server-start.sh config/zookeeper.properties &
+
+

(3) Start kafka broker

+
    +
  • Edit $KAFKA_HOME/conf/server.properties in each server,
  • +
  • Set Broker ID in my-server1.
  • +
+
    broker.id=1     // '2' in case of my-server2
+
+
    +
  • Configure zookeeper IP and PORT : Add , as seperator.
  • +
+
    zookeeper.connect=my-server1:2181,my-server2:2181
+
+
    +
  • Configure a path for Kafka data: Add a directory in each disk for load balancing.
  • +
+
    log.dirs=/hdd_01/kafka,/hdd_02/kafka,/hdd_03/kafka,/hdd_04/kafka
+
+
    +
  • Configure a retention time for keeping record and a retention size limit for each partition.
  • +
+
    # default value: 168
+    log.retention.hours=168 
+
+    # '-1' means 'unlimited'.
+    log.retention.bytes=-1
+
+
    +
  • Configure a max size of a message.
  • +
+
    # If a size of a produced message exceed this limit, the exception is thrown.
+    # If you want to create a message with many rows, increase this value and restart broker.
+    # default value: 1000012 byte
+    message.max.bytes=1000012
+
+
    +
  • Start kafka server in each server.
  • +
+
    > $KAFKA_HOME/bin/kafka-server-start.sh config/server.properties &
+
+
    +
  • Create topic.
  • +
+
    # --zookeeper localhost:2181 : Need zookeeper host & clientPort, because topics and partition information are stored in zookeeper.
+    # --topic nvkvs : For example, set 'nvkvs' as topic name.
+    # --partitions 16 : For example, set 2 partitions in each disk and use 16 partitions((# of cluster nodes) X (# of disks in each node) X 2 = 2 X 4 X 2 = 16).
+    # --replication-factor 2 : Create 1 follower for each partition.
+    > $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 2 --partitions 16 --topic nvkvs
+    # Check a generated topic: A broker.id of Replicas is different with a broker.id of Leader.
+    > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --describe --topic nvkvs
+
+    Topic:nvkvs PartitionCount:16   ReplicationFactor:2 Configs:
+    Topic: nvkvs    Partition: 0    Leader: 0   Replicas: 0,1   Isr: 1,0
+    Topic: nvkvs    Partition: 1    Leader: 1   Replicas: 1,0   Isr: 1,0
+    Topic: nvkvs    Partition: 2    Leader: 0   Replicas: 0,1   Isr: 1,0
+    Topic: nvkvs    Partition: 3    Leader: 1   Replicas: 1,0   Isr: 1,0
+    Topic: nvkvs    Partition: 4    Leader: 0   Replicas: 0,1   Isr: 1,0
+    Topic: nvkvs    Partition: 5    Leader: 1   Replicas: 1,0   Isr: 1,0
+    Topic: nvkvs    Partition: 6    Leader: 0   Replicas: 0,1   Isr: 1,0
+    Topic: nvkvs    Partition: 7    Leader: 1   Replicas: 1,0   Isr: 1,0
+    Topic: nvkvs    Partition: 8    Leader: 0   Replicas: 0,1   Isr: 1,0
+    Topic: nvkvs    Partition: 9    Leader: 1   Replicas: 1,0   Isr: 1,0
+    Topic: nvkvs    Partition: 10   Leader: 0   Replicas: 0,1   Isr: 1,0
+    Topic: nvkvs    Partition: 11   Leader: 1   Replicas: 1,0   Isr: 1,0
+    Topic: nvkvs    Partition: 12   Leader: 0   Replicas: 0,1   Isr: 1,0
+    Topic: nvkvs    Partition: 13   Leader: 1   Replicas: 1,0   Isr: 1,0
+    Topic: nvkvs    Partition: 14   Leader: 0   Replicas: 0,1   Isr: 1,0
+    Topic: nvkvs    Partition: 15   Leader: 1   Replicas: 1,0   Isr: 1,0
+
+
    +
  • Delete topic / Modify the number of partitions.
  • +
+
    # Topic delete Command
+    > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --delete --topic nvkvs
+
+    # Topic partition modification
+    > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181/chroot --alter --topic nvkvs --partitions 6
+
+

2. Kafka Topic Information

+
    +
  • Consumer list
  • +
+
    > $KAFKA_HOME/bin/kafka-consumer-groups.sh  --list --bootstrap-server localhost:9092
+
+
    +
  • Console consumer start
  • +
+
    > $KAFKA_HOME/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic nvkvs --from-beginning
+
+
    +
  • Consumer offset check
  • +
+
    # Add '--group {consumer group name}'
+    > $KAFKA_HOME/bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group (Consumer group name)
+
+    TOPIC           PARTITION  CURRENT-OFFSET  LOG-END-OFFSET  LAG             CONSUMER-ID     HOST            CLIENT-ID
+    nvkvs           4          272904          272904          0               -               -               -
+    nvkvs           12         272904          272904          0               -               -               -
+    nvkvs           15         273113          273113          0               -               -               -
+    nvkvs           6          272906          272906          0               -               -               -
+    nvkvs           0          272907          272907          0               -               -               -
+    nvkvs           8          272905          272905          0               -               -               -
+    nvkvs           3          273111          273111          0               -               -               -
+    nvkvs           9          273111          273111          0               -               -               -
+    nvkvs           13         273111          273111          0               -               -               -
+    nvkvs           10         272912          272912          0               -               -               -
+    nvkvs           1          273111          273111          0               -               -               -
+    nvkvs           11         273112          273112          0               -               -               -
+    nvkvs           14         272904          272904          0               -               -               -
+    nvkvs           7          273110          273110          0               -               -               -
+    nvkvs           5          273111          273111          0               -               -               -
+    nvkvs           2          272905          272905          0               -               -               -
+
+
    +
  • Consumer offset modification
  • +
+
    # --shift-by <positive_or_negative_integer>
+    # --group < name of group to shift>
+    > $KAFKA_HOME/bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --reset-offsets --shift-by -10000 --execute --group (Consumer 그룹명) --topic nvkvs
+
+

2. Kafka consumer

+

1. Kaetlyn Consumer

+
    +
  • tsr2-kaetlyn edit
  • +
+
  KAFKA_SERVER : Kafka Broker의 host:port
+  DRIVER_MEMORY, EXECUTOR_MEMORY : A memory of Spark Driver/Excutor의 Memory in Yarn. After start, check 'FGC' count with using 'jstat -gc' and optimize these values.
+  EXECUTERS, EXECUTER_CORES : Basically consumers as many as the number of kafka partitions are generated. With this rule, need to optimize the number of EXECUTERS, EXECUTER_CORES.
+  JSON_PATH : The path of TABLE json. Do not support hdfs path. This is relative path from tsr2-kaetlyn.
+  KAFKA_CONSUMER_GROUP_ID : consumer group id
+  KAFKA_CONSUMING_TOPIC_LIST : Topic list with seperator ','.
+  JOB_GENERATION_PERIOD : With this period, check latest-offset and execute consuming job.
+  MAX_RATE_PER_PARTITION : the maximum offset that a consumer executes within a job period.
+
+
> cfc 1 (or c01)
+> tsr2-kaetlyn edit
+
+#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14040}
+
+KAFKA_SERVER=my-server1:9092
+
+###############################################################################
+# Properties for Consumer
+DRIVER_MEMORY=2g
+
+EXECUTOR_MEMORY=2g
+EXECUTERS=16
+EXECUTER_CORES=4
+
+JSON_PATH=~/Flashbase/flashbase-benchmark/json/load_no_skew
+KAFKA_CONSUMER_GROUP_ID=nvkvs_redis_connector
+KAFKA_CONSUMING_TOPIC_LIST=nvkvs
+JOB_GENERATION_PERIOD=1
+MAX_RATE_PER_PARTITION=100
+...
+
+

2. Kaetlyn Consumer start/stop

+
    +
  • Because a kaetlyn consumer is a spark application in yarn cluster, Hadoop/Yarn and spark should already be installed.
  • +
  • Start and monitor Driver Log
  • +
+
> tsr2-kaetlyn consumer start
+> tsr2-kaetlyn consumer monitor
+
+
    +
  • If a consumer is started successfully, a state of application in yarn is set as RUNNING.
  • +
+
> yarn application -list
+
+
    +
  • Stop : By SIGTERM, stop a current job and update kafka offset.
  • +
+
> tsr2-kaetlyn consumer stop
+
+

3. Kaetlyn Log level modification

+
    +
  • Kaetlyn use a logback as a logger. After kaetlyn consumer start, '$SPARK_HOME/conf/logback-kaetlyn.xml' file is generated.
  • +
  • to modify driver log level, edit this file.
  • +
+
  > vi $SPARK_HOME/conf/logback-kaetlyn.xml
+
+

3. Kafka producer

+

Start kafka producer.

+
kafka-console-producer.sh --broker-list localhost:9092 --topic {topic name} < {filename to ingest}
+
+

To produce for a kaetlyn consumer, 2 header fields should be included.

+
TABLE_ID
+SEPARATOR
+
+

If you use 'kafkacat', you can produce with the additional header fields.(https://docs.confluent.io/3.3.0/app-development/kafkacat-usage.html# )

+

1. How to install kafkacat

+
    +
  • c++ compiler
  • +
+
$yum install gcc-c++
+
+
    +
  • Download source codes
  • +
+
$ git clone https://github.com/edenhill/librdkafka
+
+
    +
  • Make and Installation
  • +
+
$ cd librdkafka
+$ ./configure
+$ make
+$ sudo make install
+
+
    +
  • Move to '/usr/local/lib' and execute below commands.
  • +
+
$ git clone https://github.com/edenhill/kafkacat
+$ cd kafkacat
+$ ./configure
+$ make
+$ sudo make install
+
+
    +
  • How to find Lib path
  • +
+
$ ldd kafkacat
+
+
    +
  • Create and edit /etc/ld.so.conf.d/usrlocal.conf
  • +
+
Contents:
+/usr/local/lib
+
+
    +
  • Save and execution
  • +
+
$ ldconfig -v
+
+
    +
  • If 'kafkacat' is shown, kafkacat is installed successfully.
  • +
+
$kafkacat
+
+

2. Producing with kafkacat

+

1) Produce a single file

+
kafkacat -b localhost:9092 -t {topic name} -T -P -H TABLE_ID='{table id}' -H  SEPARATOR='|' -l {filename}
+
+

2) Produce all files in a directory

+

After moving to the directory path,

+
ls | xargs -n 1 kafkacat -q -b localhost:9092 -t {topic name} -P -H TABLE_ID='{table id}' -H  SEPARATOR='|' -l
+
+

3. kafka-utils.sh

+

With kafka-utils.sh, check the status of kafka broker.

+

Because 'kafka-utils.sh' exists under sbin path of each cluster, you can use this with 'cfc {cluster number}'.

+
[C:6][ltdb@d205 ~]$ which kafka-utils.sh
+~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/kafka-utils.sh
+
+

After 'CONSUMER_GROUP_ID' is set, kafka-utils.sh is enabled.

+
[C:6][ltdb@d205 ~]$ kafka-utils.sh help
+Please, set $CONSUMER_GROUP_ID first.
+
+

Need to set'kafka-utils.sh'.

+
#!/bin/bash
+
+CONSUMER_GROUP_ID='nvkvs_redis_connector'  // Need to modify
+KAFKA_SERVER=localhost:9092
+ZOOKEEPER_SERVER=localhost:2181...
+
+
[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh help
+kafka-utils.sh offset-check
+kafka-utils.sh offset-monitor
+kafka-utils.sh offset-earliest topic_name
+kafka-utils.sh offset-latest topic_name
+kafka-utils.sh offset-move topic_name 10000
+kafka-utils.sh error-monitor error_topic_name
+kafka-utils.sh consumer-list
+kafka-utils.sh topic-check topic_name
+kafka-utils.sh topic-create topic_name 10
+kafka-utils.sh topic-delete topic_name
+kafka-utils.sh topic-config-check topic_name
+kafka-utils.sh topic-config-set topic_name config_name config_value
+kafka-utils.sh topic-config-remove topic_name config_name
+kafka-utils.sh topic-list
+kafka-utils.sh message-earliest topic_name
+kafka-utils.sh message-latest topic_name
+
+

If a command needs args, the error messages like below is shown.

+
[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh offset-move 
+Please, specify topic name & the size of moving offset (ex) kafka-utils.sh offset-move my-topic 100
+[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh topic-create
+Please, specify topic name and its partition count. (ex) kafka-utils.sh topic-create topic-new 10
+[C:6][ltdb@d205 ~/kafka/config]$
+
+

For example,

+
[C:6][ltdb@d205 ~]$ kafka-utils.sh message-earliest nvkvs3
+20160711055950|ELG|2635055200|34317|5|6091|1|25|0|11|0|100.0|0.0|0|2846|3|33|0|5|0|-1000|0.0|0.0|94932|1027|0|176|35.2|40|0|7818000000|109816071|10|0|6000000.0|164843|2.75|0|2592|6000000|0.04|1288488|1303|1338|0|530|1|88.33|0|721|67948|428|0|1|108|108.0|108|0|0.0|0|0|0|-1000|1|1|100.0|62|39.0|62.9|23.0|37.1|0|0|0|0|29|10|-7022851.0|59998.0|-117.05|-6865443.5|59998.0|-114.43|4|198060.0|59998.0|22.5|3.3|0|1|5.82|3|1.94||0|0|0|0|0|0|0|0|4|0|0|0|15|14|231|140|0|0|0|0|0|0|0|0|4|0|0|0|15|13|174|110|1|0|0|0|0|0|0|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|0|0|0.0|0.0|0.0|0.0|0.0|0.0|570.0|0.0|3.0|0.0|0.0|0.0|0.0|2.0|3.0|3.0|0.0|15.73|0.0|0.0|0.0|0.0|0.0|12.0|22.0|68.0|83.0|339.0|205.0|144.0|54.0|38.0|12.0|0.0|0.0|0.0|0.0|0.0|0.0|100.0|50.55|1:22,2:7|1.0|||||1:1,17:1,23:1|13.67|0|0|0.0|0.0|-1000||-1000||-1000|11|2|05
+Processed a total of 1 messages
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-list
+__consumer_offsets
+nvkvs3
+topic-error
+topic_name
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-create ksh 18
+Created topic ksh.
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-check  ksh
+Topic:ksh   PartitionCount:18   ReplicationFactor:2 Configs:
+    Topic: ksh  Partition: 0    Leader: 1   Replicas: 1,3   Isr: 1,3
+    Topic: ksh  Partition: 1    Leader: 2   Replicas: 2,1   Isr: 2,1
+    Topic: ksh  Partition: 2    Leader: 3   Replicas: 3,2   Isr: 3,2
+    Topic: ksh  Partition: 3    Leader: 1   Replicas: 1,2   Isr: 1,2
+    Topic: ksh  Partition: 4    Leader: 2   Replicas: 2,3   Isr: 2,3
+    Topic: ksh  Partition: 5    Leader: 3   Replicas: 3,1   Isr: 3,1
+    Topic: ksh  Partition: 6    Leader: 1   Replicas: 1,3   Isr: 1,3
+    Topic: ksh  Partition: 7    Leader: 2   Replicas: 2,1   Isr: 2,1
+    Topic: ksh  Partition: 8    Leader: 3   Replicas: 3,2   Isr: 3,2
+    Topic: ksh  Partition: 9    Leader: 1   Replicas: 1,2   Isr: 1,2
+    Topic: ksh  Partition: 10   Leader: 2   Replicas: 2,3   Isr: 2,3
+    Topic: ksh  Partition: 11   Leader: 3   Replicas: 3,1   Isr: 3,1
+    Topic: ksh  Partition: 12   Leader: 1   Replicas: 1,3   Isr: 1,3
+    Topic: ksh  Partition: 13   Leader: 2   Replicas: 2,1   Isr: 2,1
+    Topic: ksh  Partition: 14   Leader: 3   Replicas: 3,2   Isr: 3,2
+    Topic: ksh  Partition: 15   Leader: 1   Replicas: 1,2   Isr: 1,2
+    Topic: ksh  Partition: 16   Leader: 2   Replicas: 2,3   Isr: 2,3
+    Topic: ksh  Partition: 17   Leader: 3   Replicas: 3,1   Isr: 3,1
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/manage-failover/index.html b/manage-failover/index.html new file mode 100644 index 0000000..0de5f92 --- /dev/null +++ b/manage-failover/index.html @@ -0,0 +1,1279 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Failover with LightningDB - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +
+

Note

+

This document guides how to use 'flashbase' script for failover. +If you use LTCLI, you can check the status of failure and operate Lightning DB more easily and powerfully. +Therefore, if possible, we recommend LTCLI rather than 'flashbase' script.

+
+

1. Prerequisite

+

1) Redis

+
    +
  • Check 'flashbase cluster-rowcount'
  • +
  • Check 'flashbase cli-all config get flash-db-ttl'
  • +
  • Check 'flashbase cli-all info keyspace' // 'memKeys'(the number of in-memory data keys)
  • +
  • Check 'flashbase cli-all info tablespace' // 'totalRowgroups', 'totalRows'
  • +
  • Check 'flashbase cli-all info eviction' // 'avg full percent'
  • +
+

2) Thriftserver

+
    +
  • Check cron jobs with 'crontab -e'.
  • +
  • Check table schema and query.
  • +
+
select * from {table name} where ... limit 1;
+
+

3) System resources

+
    +
  • Check available memory(nmon, 'free -h')
  • +
  • check the status of disks(nmon, 'df -h')
  • +
+

2. Check the status and failover

+

1) Background

+
    +
  • If a redis-server is killed, a status of the node is changed to 'disconnected'.
  • +
+
543f81b6c5d6e29b9871ddbbd07a4524508d27e5 127.0.0.1:18202 master - 1585787616744 1585787612000 0 disconnected
+
+
    +
  • After a single node checked that a redis-server is disconnected, the status of the redis-server is changed to pFail.
  • +
  • After all nodes inf the cluster checked that the node is disconnected, the status of the redis-server is changed to Fail.
  • +
  • If the node is replicated, the slave of the node is failovered.
  • +
  • With 'cluster-failover.sh', you can do failover regardless of the status(pFail/Fail).
  • +
+
543f81b6c5d6e29b9871ddbbd07a4524508d27e5 127.0.0.1:18202 master,fail - 1585787616744 1585787612000 0 disconnected
+
+
    +
  • If node-{port}.conf file is lost by disk failure, the redis-server using the conf file creates new uuid.
  • +
  • Because the previous uuid in the cluster is lost, the uuid is changed to noaddr. This noaddr uuid should be removed with using cluster forget command.
  • +
+
// previous uuid of 18202
+543f81b6c5d6e29b9871ddbbd07a4524508d27e5 :0 master,fail,noaddr - 1585787799235 1585787799235 0 disconnected
+
+// new uuid of 18202
+001ce4a87de2f2fc62ff44e2b5387a3f0bb9837c 127.0.0.1:18202 master - 0 1585787800000 0 connected
+
+

2) Check the status of the cluster

+

1) check-distribution

+

Show the distribution of master/slave in each server.

+
> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+127.0.0.1   | 5 | 3
+--------------------------------
+Total nodes | 5 | 3
+
+

2) find-masters

+
    +
  • options
  • +
+
> flashbase find-masters
+Use options(no-slave|no-slot|failovered)
+
+
    +
  • no-slave (masters without slaves. Need to add the failbacked slaves to this node)
  • +
+
> flashbase find-masters no-slave
+127.0.0.1:18203
+127.0.0.1:18252
+
+
    +
  • no-slot (Not yet added into the cluster or masters without slot)
  • +
+
> flashbase find-masters no-slot
+127.0.0.1:18202
+127.0.0.1:18253
+
+
    +
  • failovered (When the cluster is initialized, this node was a slave. But now, the nodes is a master by failover)
  • +
+
> flashbase find-masters failovered
+127.0.0.1:18250
+127.0.0.1:18252
+127.0.0.1:18253
+
+

3) find-slaves

+
    +
  • options
  • +
+
flashbase find-slaves
+Use options(failbacked)
+
+
    +
  • failbacked (When the cluster is initialized, this node was a master. But now, the nodes is a slave by failback)
  • +
+
> flashbase find-slaves failbacked
+127.0.0.1:18200
+
+

4) find-masters-with-dir

+
    +
  • List up the redis-servers with using the disk with HW fault.
  • +
  • After HW fault, some of these nodes are already killed and the others will be killed in a few minutes.
  • +
+
> flashbase find-masters-with-dir
+Error) Invalid arguments.
+ex. 'flashbase find-masters-with-dir 127.0.0.1 /DATA01/nvkvs/nvkvs'
+
+> flashbase find-masters-with-dir 127.0.0.1 /nvdrive0/ssd_01/nvkvs/nvkvs
+18200
+18204
+
+

3) How to handle HW fault(in case of replication)

+

1) cluster-failover.sh

+

If some redis-servers are disconnected(killed/paused), you can do failover immediately and make the status of the cluster 'ok'.

+

2) find-nodes-with-dir / find-masters-with-dir / failover-with-dir / kill-with-dir

+
    +
  • List up all nodes or master those are using the disk with HW fault.
  • +
+
> flashbase find-masters-with-dir
+Error) Invalid arguments.
+ex. 'flashbase find-masters-with-dir 127.0.0.1 /DATA01/nvkvs/nvkvs'
+
+> flashbase find-masters-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs
+18200
+18204
+
+
    +
  • Do failover and change the master using the error disk to the slave
  • +
+
> failover-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs
+127.0.0.1:18250 will be master
+127.0.0.1:18254 will be master
+OK
+
+
    +
  • with kill-with-dir, kill all nodes that use the error disk.
  • +
+
> flashbase kill-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs
+flashbase kill 18200
+flashbase kill 18204
+flashbase kill 18253
+
+> flashbase cli-all ping
+redis client for 127.0.0.1:18200
+Could not connect to Redis at 127.0.0.1:18200: Connection refused
+redis client for 127.0.0.1:18201
+PONG
+redis client for 127.0.0.1:18202
+PONG
+redis client for 127.0.0.1:18203
+PONG
+redis client for 127.0.0.1:18204
+Could not connect to Redis at 127.0.0.1:18204: Connection refused
+redis client for 127.0.0.1:18250
+PONG
+redis client for 127.0.0.1:18251
+PONG
+redis client for 127.0.0.1:18252
+PONG
+redis client for 127.0.0.1:18253
+Could not connect to Redis at 127.0.0.1:18253: Connection refused
+redis client for 127.0.0.1:18254
+PONG
+
+

3) find-noaddr / forget-noaddr

+
    +
  • Remove 'noaddr' node
  • +
+
> flashbase find-noaddr  // The prev uuid. Now not used anymore.
+1b5d70b57079a4549a1d2e8d0ac2bd7c50986372 :0 master,fail,noaddr - 1589853266724 1589853265000 1 disconnected
+
+> flashbase forget-noaddr // Remove the 'noaddr' uuid.
+(error) ERR Unknown node 1b5d70b57079a4549a1d2e8d0ac2bd7c50986372  // Because newly added node does not know the previous uuid.
+OK
+OK
+OK
+OK
+
+> flashbase find-noaddr // Check that the noaddr uuid is removed
+
+
+

4) do-replicate

+
    +
  • First of all, make the master/slave pair. If there are many nodes to replicate, pairing.py is helpful.
  • +
+
> flashbase find-noslot > slaves
+
+> flashbase find-noslave > masters
+
+> python pairing.py slaves masters
+flashbase do-replicate 192.168.0.2:19003 192.168.0.4:19053
+flashbase do-replicate 192.168.0.2:19004 192.168.0.4:19054
+flashbase do-replicate 192.168.0.2:19005 192.168.0.4:19055
+...
+
+
    +
  • Add no-slot master as the slave to no-slave master(replicate)
  • +
+
> flashbase do-replicate 127.0.0.1:18202 127.0.0.1:18252
+Add 127.0.0.1:18202 as slave of master(127.0.0.1:18252)
+OK
+
+> flashbase cli -p 18202 info replication
+# Replication
+role:slave
+master_host:127.0.0.1
+master_port:18252
+master_link_status:down
+master_last_io_seconds_ago:-1
+master_sync_in_progress:0
+slave_repl_offset:1
+master_link_down_since_seconds:1585912329
+slave_priority:100
+slave_read_only:1
+connected_slaves:0
+master_repl_offset:0
+repl_backlog_active:0
+repl_backlog_size:1048576
+repl_backlog_first_byte_offset:0
+repl_backlog_histlen:0
+
+> flashbase do-replicate 127.0.0.1:18253 127.0.0.1:18203
+Add 127.0.0.1:18253 as slave of master(127.0.0.1:18203)
+OK
+
+> flashbase cli -p 18253 info replication
+# Replication
+role:slave
+master_host:127.0.0.1
+master_port:18203
+master_link_status:up
+master_last_io_seconds_ago:5
+master_sync_in_progress:0
+slave_repl_offset:29
+slave_priority:100
+slave_read_only:1
+connected_slaves:0
+master_repl_offset:0
+repl_backlog_active:0
+repl_backlog_size:1048576
+repl_backlog_first_byte_offset:0
+repl_backlog_histlen:0
+
+

If the slave candidate is not included in the cluster, 'do-replicate' is done after 'cluster meet'.

+
> flashbase do-replicate 127.0.0.1:18252 127.0.0.1:18202
+Add 127.0.0.1:18252 as slave of master(127.0.0.1:18202)
+Fail to get masters uuid
+'cluster meet' is done
+OK // 'cluster meet' is done successfully
+OK // 'cluster replicate' is done successfully
+
+

5) reset-distribution

+

To initialize the node distribution, use 'reset-distribution'.

+
// Check the distribution of cluster nodes.
+> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+192.168.111.35 | 4 | 4
+192.168.111.38 | 0 | 8
+192.168.111.41 | 8 | 0
+--------------------------------
+Total nodes | 12 | 12
+
+...
+
+> flashbase reset-distribution
+192.168.111.38:20600
+OK
+192.168.111.38:20601
+OK
+192.168.111.38:20602
+OK
+192.168.111.38:20603
+OK
+
+...
+
+// Check the distribution of cluster nodes again.
+> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+192.168.111.35 | 4 | 4
+192.168.111.38 | 4 | 4
+192.168.111.41 | 4 | 4
+--------------------------------
+Total nodes | 12 | 12
+
+

6) force-failover

+

When a server need to be shutdown by HW fault or checking, change all masters in the server to slaves by failover of those slaves.

+
> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+192.168.111.35 | 4 | 4
+192.168.111.38 | 4 | 4
+192.168.111.41 | 4 | 4
+--------------------------------
+Total nodes | 12 | 12
+
+> flashbase force-failover 192.168.111.41
+all masters in 192.168.111.41 will be slaves and their slaves will promote to masters
+192.168.111.35:20651 node will be master!
+OK
+192.168.111.38:20651 node will be master!
+OK
+192.168.111.35:20653 node will be master!
+OK
+192.168.111.38:20653 node will be master!
+OK
+
+> flashbase check-distribution
+check distribution of masters/slaves...
+SERVER NAME | M | S
+--------------------------------
+192.168.111.35 | 6 | 2
+192.168.111.38 | 6 | 2
+192.168.111.41 | 0 | 5
+--------------------------------
+Total nodes | 12 | 9
+
+

4) How to handle HW fault(in case of no replication)

+

After disk replacement, nodes-{port number}.conf is lost.

+

Therefore a new uuid is generated after restart.

+

Because the previous uuid in the cluster is lost, the uuid is changed to noaddr. This noaddr uuid should be removed with using cluster forget command.

+

Because the restarted node with the new uuid has no slot information, a slot range should be assigned by using 'addslots'.

+

1) Find noaddr node and check its slot range.

+
> flashbase find-noaddr
+7c84d9bb36ae3fa4caaf75318b59d3d2f6c7e9d8 :0 master,fail,noaddr - 1596769266377 1596769157081 77 disconnected 13261-13311 // '13261-13311' is the lost slot range.
+
+

2) Add the slot range to the restarted node.

+
> flashbase cli -h 192.168.111.35 -p 18317 cluster addslots {13261..13311}
+
+

3) Increase the epoch of the node and update the cluster information.

+
> flashbase cli -h 192.168.111.35 -p 18317 cluster bumpepoch
+BUMPED 321
+
+

4) Remove the noaddr node.

+
> flashbase forget-noaddr
+
+

3. Check the status

+

1) Redis

+
    +
  • Compare 'flashbase cluster-rowcount' with the previous result.
  • +
  • Compare 'flashbase cli-all config get flash-db-ttl' with the previous result.
  • +
  • flashbase cli-all cluster info | grep state:ok | wc -l
  • +
  • flashbase cli -h {ip} -p {port} cluster nodes
  • +
  • flashbase cli-all info memory | grep isOOM:true
  • +
+

2) yarn & spark

+
    +
  • Check web ui or 'yarn application -list'.
  • +
  • In case of spark,
      +
    • Remove the disk with HW fault in spark.local.dir of spark-default.conf and restart thriftserver.
    • +
    +
  • +
+

3) Thriftserver

+
    +
  • Check cron jobs with 'crontab -e'.
  • +
  • Check table schema and query.
  • +
+
select * from {table name} where ... limit 1;
+
+

4) kafka & kaetlyn

+
    +
  • kafka-utils.sh help // list up options
  • +
  • kafka-utils.sh topic-check {topic name} // Check the distribution of Leaders
  • +
  • kafka-utils.sh offset-check // Consumer LAG of each partition
  • +
+

5) System resources

+
    +
  • Check available memory
  • +
  • Check the status of disks
  • +
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/prerequisite/index.html b/prerequisite/index.html new file mode 100644 index 0000000..149d1a0 --- /dev/null +++ b/prerequisite/index.html @@ -0,0 +1,973 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Prerequisite - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +
+

Note

+

This page guides how to start LightningDB automatically only for the case of AWS EC2 Instance.

+
+

1. Create EC2 Instance

+

Amazon Machine Image(AMI) for LightningDB can be found in 'AWS Marketplace' and user can create EC2 Instance with the AMI.

+

aws marketplace

+

To use LightningDB in a new Instance, the size of the root volume should be 15GiB at least.

+

To use Web UI of HDFS, YARN, Spark and Zeppelin, you should add the following ports to 'Edit inbound rules' of 'Security groups' in EC2 Instance.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
ServicePort
HDFS50070
YARN8088
Spark4040
Zeppelin8080
+

2. Access EC2 Instance

+

Create a EC2 Instance for LightningDB and access with 'Public IP' or 'Public DNS'.

+

'*.pem' file is also required to access EC2 Instance.

+
$ ssh -i /path/to/.pem ec2-user@${IP_ADDRESS}
+
+

3. Setup environment

+

When you access EC2 Instance, the following jobs are already done.

+
    +
  • Create and exchange SSH KEY for user authentication
  • +
  • Mount disks
  • +
+
+

Warning

+

Before starting LightningDB, please check if the disk mount is completed using 'lsblk' like below.

+
+
[ec2-user@ip-172-31-34-115 ~]$ lsblk
+NAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
+xvda    202:0    0   10G  0 disk
+└─xvda1 202:1    0   10G  0 part /
+nvme0n1 259:0    0  1.7T  0 disk /nvme/data_01
+nvme1n1 259:1    0  1.7T  0 disk /nvme/data_02
+nvme3n1 259:2    0  1.7T  0 disk /nvme/data_03
+nvme2n1 259:3    0  1.7T  0 disk /nvme/data_04
+
+
    +
  • Set Hadoop configurations(core-site.xml, hdfs-site.xml, yarn-site.xml).
      +
    • This settings is default value for starter of Hadoop.
    • +
    • To optimize resource or performance, user needs to modify some features with Hadoop Get Started
    • +
    +
  • +
  • Set Spark configuration(spark-default.conf.template)
      +
    • To optimize resource and performance, user also need to modify some features with Spark Configuration
    • +
    +
  • +
+
+

Tip

+

To launch Spark application on YARN, start YARN with running 'start-dfs.sh' and 'start-yarn.sh' in order.

+
+

4. Start LightningDB

+

LightningDB provides LTCLI that is introduced in Installation. With LTCLI, you can deploy and use LightningDB.

+

LightningDB supports Zeppelin to provide the convenience of ingestion and querying data of LightningDB. About Zeppelin, Try out with Zeppelin page provides some guides.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-note/index.html b/release-note/index.html new file mode 100644 index 0000000..dbdd04f --- /dev/null +++ b/release-note/index.html @@ -0,0 +1,979 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Release Notes - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

1. Recommended Version

+

LightningDB ver 1.4.21

+

2. Release Notes

+

Ver 2.0.0 CXL-CMS

+
    +
  • Date: 2024.03.21
  • +
  • Download: LightningDB ver 2.0.0 CXL-CMS
  • +
  • License: free
  • +
  • Description
      +
    • Support Interfaces for CXL memory and CMS of SK Hynix
    • +
    +
  • +
+

Ver 2.0.0

+
    +
  • Date: 2024.01.11
  • +
  • Download: LightningDB ver 2.0.0
  • +
  • License: free
  • +
  • Description
      +
    • Support ANN(Aproximate Nearest Neighbor) Search
    • +
    • Support Thunderquery CLI without Spark
    • +
    • Use Apache Spark 3.x and CatalogPlugin
    • +
    • Use Apache Arrow
    • +
    +
  • +
+

Ver 1.4.3

+
    +
  • Date: 2024.03.21
  • +
  • Download: LightningDB ver 1.4.3
  • +
  • License: free
  • +
  • Description
      +
    • Support Kubernetes
    • +
    +
  • +
+

Ver 1.3.1

+
    +
  • Date: 2021.06.03
  • +
  • Download: LightningDB ver 1.3.1
  • +
  • License: free
  • +
  • Description
      +
    • Support Exact KNN Search
    • +
    • Recommended Spark version: 2.3.4
    • +
    • Support the permanent storage
    • +
    +
  • +
+

Ver 1.2.3

+
    +
  • Date: 2020.07.21
  • +
  • Download: LightningDB ver 1.2.3
  • +
  • License: free
  • +
  • Description
      +
    • Nondisruptive scale-out(Adding new nodes)
    • +
    • Aggregation Pushdown
    • +
    • Optimize memory usage about clustering
    • +
    • Adaptive RDD partitioning scheme by filters('node base' or 'redis key base')
    • +
    • Support geoSpatial queries based on OGC standards
    • +
    • Update OSGeo repository address
    • +
    +
  • +
+

Ver 1.0

+
    +
  • Date: 2019.11.20
  • +
  • Download: LightningDB ver 1.0
  • +
  • License: free
  • +
  • Description
      +
    • Initial version
    • +
    • Support LTCLI
    • +
    • Support geoSpatial functions
    • +
    +
  • +
+
+
+
    +
  1. +

    Copy link address with Right-Clicking and paste when you try to deploy LightningDB in LTCLI. 

    +
  2. +
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/scripts/ddl_fb_test_101.sql b/scripts/ddl_fb_test_101.sql new file mode 100644 index 0000000..e4df476 --- /dev/null +++ b/scripts/ddl_fb_test_101.sql @@ -0,0 +1,13 @@ +drop table if exists fb_test; +create table if not exists fb_test +(user_id string, +name string, +company string, +country string, +event_date string, +data1 string, +data2 string, +data3 string, +data4 string, +data5 string) +using r2 options (table '101', host 'localhost', port '18102', partitions 'user_id country event_date', mode 'nvkvs', group_size '10', query_result_partition_cnt_limit '40000', query_result_task_row_cnt_limit '10000', query_result_total_row_cnt_limit '100000000'); diff --git a/scripts/deploy-flashbase.sh b/scripts/deploy-flashbase.sh new file mode 100644 index 0000000..3656c44 --- /dev/null +++ b/scripts/deploy-flashbase.sh @@ -0,0 +1,52 @@ +#nodes=("flashbase01" "flashbase02" "flashbase03" "flashbase04" "flashbase05" "flashbase06") +nodes=( "localhost") + +INSTALLER_PATH=$1 +shift + +[[ $INSTALLER_PATH == "" ]] && echo "NO ARGS" && echo "cmd " && exit 1 +[[ ! -e $INSTALLER_PATH ]] && echo "NO FILE: $INSTALLER_PATH" && exit 1 + +INSTALLER_BIN=$(basename $INSTALLER_PATH) +DATEMIN=`date +%Y%m%d%H%M%S` +TSR2_DIR=~/tsr2 +echo "DATEMIN: $DATEMIN" +echo "INSTALLER PATH: $INSTALLER_PATH" +echo "INSTALLER NAME: $INSTALLER_BIN" + +for cluster_num in $@; +do + CLUSTER_DIR=$TSR2_DIR/cluster_${cluster_num} + BACKUP_DIR="${CLUSTER_DIR}_bak_$DATEMIN" + CONF_BACKUP_DIR="${CLUSTER_DIR}_conf_bak_$DATEMIN" + SR2_HOME=${CLUSTER_DIR}/tsr2-assembly-1.0.0-SNAPSHOT + SR2_CONF=${SR2_HOME}/conf + + echo "======================================================" + echo "DEPLOY CLUSTER $cluster_num" + echo "" + echo "CLUSTER_DIR: $CLUSTER_DIR" + echo "SR2_HOME: $SR2_HOME" + echo "SR2_CONF: $SR2_CONF" + echo "BACKUP_DIR: $BACKUP_DIR" + echo "CONF_BACKUP_DIR: $CONF_BACKUP_DIR" + echo "======================================================" + echo "backup..." + mkdir -p ${CONF_BACKUP_DIR} + cp -rf ${SR2_CONF}/* $CONF_BACKUP_DIR + + echo "" + + for node in ${nodes[@]}; + do + echo "DEPLOY NODE $node" + ssh $node "mv ${CLUSTER_DIR} ${BACKUP_DIR}" + ssh $node "mkdir -p ${CLUSTER_DIR}" + scp -r $INSTALLER_PATH $node:${CLUSTER_DIR} + ssh $node "PATH=${PATH}:/usr/sbin; ${CLUSTER_DIR}/${INSTALLER_BIN} --full ${CLUSTER_DIR}" + rsync -avr $CONF_BACKUP_DIR/* $node:${SR2_CONF} + done + + echo "" +done + diff --git a/scripts/pairing.py b/scripts/pairing.py new file mode 100644 index 0000000..b98f132 --- /dev/null +++ b/scripts/pairing.py @@ -0,0 +1,61 @@ +import sys + +if len(sys.argv) < 3: + print("Usage: " + sys.argv[0] + " [masters' path] [slaves' path]") + sys.exit(1) + +masters = [x.strip() for x in open(sys.argv[1])] +slaves = [x.strip() for x in open(sys.argv[2])] + +master_dict = {} +slave_dict = {} + +for master in masters: + split = master.split(":") + if master_dict.get(split[0], "failed") == "failed": + master_dict[split[0]] = [split[1]] + else: + master_dict[split[0]].append(split[1]) + +for slave in slaves: + split = slave.split(":") + if slave_dict.get(split[0], "failed") == "failed": + slave_dict[split[0]] = [split[1]] + else: + slave_dict[split[0]].append(split[1]) + +for node in master_dict.keys(): + for slave_node in slave_dict.keys(): + if node != slave_node and len(master_dict[node]) == len(slave_dict[slave_node]): + master_dict[node].sort() + slave_dict[slave_node].sort() + for i in range(len(master_dict[node])): + print("flashbase do-replicate " + slave_node + ":" + slave_dict[slave_node].pop(0) + " " + node + ":" + + master_dict[node].pop(0)) + break + +for node in master_dict.keys(): + if len(master_dict[node]) == 0: + master_dict.pop(node) + +for node in slave_dict.keys(): + if len(slave_dict[node]) == 0: + slave_dict.pop(node) + +master_list = master_dict.keys() +slave_list = slave_dict.keys() + +master_list.sort(key = lambda x : len(master_dict[x]), reverse=True) +slave_list.sort(key = lambda x : len(master_dict[x]), reverse=True) + +for node in master_list: + for slave_node in slave_list: + if node != slave_node and len(slave_dict[slave_node]) > 0: + master_dict[node].sort() + slave_dict[slave_node].sort() + size = len(master_dict[node]) if len(master_dict[node]) < len(slave_dict[slave_node]) else len(slave_dict[slave_node]) + for i in range(size): + print("flashbase do-replicate " + slave_node + ":" + slave_dict[slave_node].pop(0) + " " + node + ":" + + master_dict[node].pop(0)) + if len(master_dict[node]) == 0: + break diff --git a/scripts/test.json b/scripts/test.json new file mode 100644 index 0000000..a554568 --- /dev/null +++ b/scripts/test.json @@ -0,0 +1,9 @@ + { + "endpoint": "127.0.0.1:18101", + "id": "101", + "columns": 10, + "partitions": [ + 3, 0 + ], + "rowStore" : true + } diff --git a/scripts/test_data.csv b/scripts/test_data.csv new file mode 100644 index 0000000..e5d516c --- /dev/null +++ b/scripts/test_data.csv @@ -0,0 +1,100 @@ +171128|Maite, Ava, Chancellor, Abdul|Eu Erat Institute|Japan|1620122681899|JNH97HVS1XE|1|1|eros nec tellus. Nunc lectus|sed leo. Cras vehicula aliquet libero. Integer +190105|Silas, Demetrius, Evelyn, Jessamine|Pede Ultrices Institute|Djibouti|1693051552499|OUT72QHJ3RR|2|2|Lorem ipsum dolor sit amet,|habitant morbi tristique senectus et netus et +190406|Lance, Ann, Amy, Wynter|Sed Pharetra Inc.|Serbia|1683070290199|EBH91ZLL9DN|3|10|sed libero. Proin sed|Donec consectetuer mauris id +190116|Audrey, Latifah, Lesley, Branden|Egestas Fusce Aliquet Corporation|Papua New Guinea|1666031514199|CLM05OKD6JU|4|7|Ut nec urna et arcu|ut aliquam iaculis, lacus pede sagittis augue, eu tempor erat +180927|Denton, Sloane, Arsenio, Garth|Sed Eu Eros Limited|Finland|1621061102299|USO29JIO4FC|5|9|in faucibus orci luctus et ultrices posuere cubilia Curae;|Donec sollicitudin adipiscing ligula. +180818|Maite, Christian, Tad, Illiana|Ultrices Corp.|Mexico|1681090140399|PUY52MAG4EK|6|5|ut quam|Fusce feugiat. Lorem +181126|Evan, Hakeem, Paki, Dolan|Amet Risus Foundation|Sierra Leone|1636091070099|AQX29EAH8WO|7|7|dictum. Proin eget odio. Aliquam vulputate ullamcorper magna.|ac turpis egestas. Aliquam fringilla +180918|Elijah, Giselle, Kaseem, Darius|Convallis Erat Eget Foundation|Kuwait|1635071957399|SNB89ZHM0BH|8|9|Nullam|Phasellus ornare. Fusce mollis. Duis sit +190418|Chester, Gwendolyn, Rafael, Celeste|Nec Diam Duis Industries|Chile|1684020225199|QPG40FFD5XK|9|7|et magnis dis parturient montes, nascetur ridiculus mus. Proin|Nulla tempor +171203|Azalia, Otto, Emmanuel, Gabriel|Mattis Semper LLP|Guinea|1603100541199|BGO88QJG7KP|10|4|Donec non justo. Proin|velit egestas lacinia. Sed congue, elit +170930|Brenna, Quintessa, Axel, Hammett|Id Company|Afghanistan|1692120964999|PXV42MXT5EW|11|3|bibendum sed, est. Nunc laoreet|Phasellus ornare. Fusce mollis. Duis sit amet +180320|Tatum, Eliana, Iola, Colby|Mi Eleifend Egestas Institute|American Samoa|1680111178699|QYV83PRB0JW|12|8|tortor at risus. Nunc ac sem ut dolor|non, lobortis quis, pede. Suspendisse dui. Fusce diam +190405|Isaiah, Ruby, Yael, Abra|Orci Ut Consulting|Kenya|1686080698599|QUH92GBD9HB|13|8|est, vitae sodales nisi magna sed dui. Fusce aliquam, enim|magna a +171102|Theodore, Holly, Carter, Fulton|Nisi Nibh Lacinia Industries|Chile|1617010935199|ZEK46GWB7HN|14|5|lacus. Quisque purus sapien,|tempor lorem, eget mollis lectus pede et risus. Quisque libero +171208|Alfonso, Clarke, Kenyon, Bradley|Euismod Enim Inc.|Andorra|1662030155399|BYY56LOO3YZ|15|2|nisl. Quisque fringilla euismod enim. Etiam gravida molestie|eu +170810|Halla, Alec, Madeline, Georgia|At Risus Nunc Limited|Northern Mariana Islands|1641080547999|UGU76UDB5UU|16|7|orci quis lectus. Nullam suscipit, est ac facilisis facilisis, magna|tellus sem mollis dui, in sodales elit erat vitae risus. +180520|Dalton, Fuller, Leroy, Sebastian|Tristique Incorporated|Lebanon|1648050813699|YLP33GJC2EM|17|4|justo nec ante. Maecenas mi felis, adipiscing fringilla, porttitor vulputate,|ipsum non arcu. Vivamus sit amet risus. Donec egestas. Aliquam +190718|Kenyon, Jeremy, Hedda, Wayne|Sit Consulting|Saint Martin|1655052520899|ZKV28BVO2UJ|18|1|sed pede nec ante blandit viverra. Donec tempus, lorem|a purus. Duis elementum, dui quis accumsan convallis, ante lectus +180921|Sloane, Avram, Sawyer, Colorado|Pede Cum Sociis Foundation|Philippines|1657101795199|SQE79QFG1UI|19|2|natoque penatibus et magnis dis parturient|egestas a, scelerisque sed, sapien. +190605|Perry, Noble, Craig, Phoebe|Et Limited|Antarctica|1626110233099|DSC12RLW5QQ|20|5|Suspendisse ac metus vitae velit egestas lacinia. Sed|eu tempor erat +180901|Ori, Keith, Trevor, William|Vulputate Mauris Company|Viet Nam|1616011192799|OAA66YCK6OR|21|8|Ut|risus. Nunc ac sem ut +181125|Marvin, Berk, Connor, Britanney|Donec Dignissim Magna Company|Djibouti|1662041755899|TOY60WWP1BV|22|10|Morbi metus. Vivamus euismod urna. Nullam|mollis vitae, posuere at, velit. Cras lorem lorem, luctus +170812|Kiayada, Hilel, Hop, Erin|At Libero Institute|Australia|1606012554099|TDJ85TIS5DH|23|2|faucibus orci luctus et ultrices posuere cubilia Curae; Phasellus ornare.|parturient montes, nascetur ridiculus mus. Donec dignissim magna +190515|Edan, Hiroko, Gray, Kenneth|Ullamcorper Eu Euismod Company|Montenegro|1683060707099|ENR73BUJ3YI|24|3|justo eu arcu. Morbi sit amet massa. Quisque porttitor|in faucibus orci luctus et ultrices posuere cubilia Curae; +190609|Montana, Quintessa, Ferris, Claudia|Urna Ltd|Viet Nam|1649061968499|HTK97GWY1BQ|25|1|nibh lacinia|Morbi metus. Vivamus euismod urna. Nullam lobortis quam +171118|Hilel, Ciara, Germaine, Baxter|Lacus Quisque Purus LLC|Israel|1684122522799|GUE73NOR8DB|26|5|eu tellus. Phasellus elit pede, malesuada|turpis non enim. +190216|Graham, Lila, Jasper, Imani|Eu Ligula Aenean Industries|Bangladesh|1660120308899|ACY36RWS1SB|27|8|Suspendisse sed dolor. Fusce mi lorem, vehicula|rhoncus. Donec est. Nunc ullamcorper, velit in aliquet lobortis, nisi +190808|Alisa, Vernon, Gregory, Dale|Ultrices Foundation|Peru|1633051811499|DKY94BEN1QF|28|6|vitae erat|ipsum. Phasellus vitae +180124|Ahmed, Henry, Genevieve, Odessa|Dis Parturient Corp.|Guinea|1607102076599|QTP12BFI2XU|29|9|dolor quam, elementum at, egestas a, scelerisque sed,|nec tempus mauris erat eget ipsum. Suspendisse sagittis. Nullam +171012|Germane, Shaine, McKenzie, Tucker|Lacinia Sed Congue Industries|Taiwan|1658060399599|WBI16ZTV6ZT|30|7|feugiat metus sit amet ante. Vivamus|Sed dictum. Proin eget odio. Aliquam vulputate ullamcorper magna. +190322|Dahlia, Tiger, Montana, Quintessa|Vel Convallis In Institute|Maldives|1665050292799|HID39XTJ0LW|31|10|sem magna nec quam. Curabitur vel|id, mollis nec, cursus +180726|Drew, Adrienne, Blaze, Jade|At Nisi PC|Chad|1637081315399|MKT50ZSU1QN|32|10|sapien. Cras dolor dolor,|Nulla eget metus eu erat semper rutrum. Fusce dolor +180810|Christen, Abdul, Destiny, Noelani|Id Ante Institute|Tunisia|1608020722999|MMT88JUR2WH|33|1|Cras interdum. Nunc sollicitudin commodo ipsum. Suspendisse non leo.|magna a neque. Nullam ut nisi a +190312|Cassidy, Zahir, Colin, Jared|Eu LLP|Chile|1652011207799|XNT44USL5EN|34|7|fringilla ornare placerat, orci lacus vestibulum lorem, sit amet ultricies|tellus lorem +181110|Kiara, Yvonne, Shafira, Dalton|Pellentesque Habitant Morbi Company|Japan|1640022802999|NRJ96ECT6PL|35|7|natoque penatibus et magnis dis parturient montes,|eu odio tristique pharetra. Quisque ac +180316|Velma, Rebecca, Tamara, Elliott|Gravida Corp.|Senegal|1670111273899|LHI28IZC6IE|36|7|neque|morbi tristique senectus et netus et +180524|Roanna, Aladdin, Gillian, Claudia|Eget Ltd|Guinea|1681070891299|BNS10FAM9RY|37|3|ultrices posuere cubilia Curae; Donec tincidunt. Donec vitae erat|Suspendisse aliquet molestie tellus. Aenean egestas hendrerit neque. +180912|Quentin, Sophia, Mark, Holly|Velit PC|Nauru|1670031601799|WNR35MBI9NW|38|6|at|sagittis. Duis gravida. Praesent eu +190302|Lamar, Melvin, Hop, Marah|Fermentum Arcu Vestibulum Foundation|Saudi Arabia|1663111091299|SBO48FHQ1BJ|39|2|vel, convallis in, cursus et, eros. Proin ultrices. Duis volutpat|ornare. Fusce mollis. Duis +170804|Amery, Castor, Mufutau, Lois|Consectetuer Incorporated|Guyana|1623040412699|NRB82CRU9WQ|40|2|Suspendisse ac metus vitae velit egestas lacinia. Sed congue, elit|Aliquam gravida mauris ut mi. Duis risus odio, +180303|Savannah, Finn, Jaime, Kelsey|Sed Incorporated|France|1666011388299|GLJ42XIO2KW|41|7|eget massa. Suspendisse eleifend. Cras sed leo. Cras|tristique senectus et netus et +171221|Megan, Ava, Tiger, Azalia|Feugiat Lorem LLP|Korea, South|1693112634099|LNK69KXU7BP|42|10|per inceptos hymenaeos. Mauris ut quam vel|tellus +190509|Ruby, Declan, Regina, Justin|Mi Company|Ethiopia|1604022306199|TCO99QTY7AP|43|4|lorem ac risus. Morbi metus. Vivamus euismod urna. Nullam lobortis|Cras vulputate velit eu +180730|Curran, Quamar, Rigel, Daria|Dui Company|Libya|1624020124999|DZE95HQI0XL|44|5|non, cursus non, egestas a,|Mauris non dui nec urna suscipit nonummy. Fusce fermentum +190603|Alma, Kirestin, Hilary, Jordan|Sed Et Libero Ltd|Bahrain|1611032970899|QXI06NPR8NR|45|6|egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed sem|felis orci, adipiscing non, luctus sit amet, +180227|Chaim, Sean, Lacey, Uriah|Proin Vel Institute|Cape Verde|1616102911099|ZEB73AGM2VL|46|6|eu|Quisque varius. +190531|Emerson, Hayden, Coby, Yoshi|Tincidunt Dui Corporation|Kuwait|1665090731599|PCH73IIX0AJ|47|8|nisi magna sed dui. Fusce aliquam,|magnis dis parturient montes, nascetur ridiculus +190429|Merrill, Janna, Cruz, Wynter|Tellus PC|India|1659050593499|VKJ72JYA8PD|48|7|nec, leo. Morbi neque tellus,|est, mollis non, +180518|Ocean, Anastasia, Rhonda, Jeremy|Integer Incorporated|Comoros|1692110786099|IZE88YQU4FM|49|5|Aliquam|Vivamus +190328|Richard, Haviva, Dylan, Patrick|Nulla Tempor Limited|Russian Federation|1655020193399|OUX97TCS8LY|50|10|nisi. Mauris nulla. Integer urna. Vivamus|libero est, +180402|Claudia, Walker, Noble, Lenore|Sapien Cursus In Limited|Turkey|1658052126199|YQL33AAC9HP|51|5|mi|neque sed dictum eleifend, nunc risus varius orci, in +190401|Ursula, Emerson, Denton, Wyoming|Lobortis Quam Company|Ecuador|1638120251599|GTC02SWP8EJ|52|8|dictum|ultrices +190726|Sierra, Cherokee, Mufutau, Tad|Mollis Dui In Associates|Cuba|1695080400399|MJL43HHL1WI|53|2|sollicitudin|magna. Ut tincidunt orci quis lectus. Nullam suscipit, est ac +190604|Caldwell, Kenyon, Latifah, Declan|Tempor Arcu LLC|Falkland Islands|1669071816499|ROK31XFZ0SE|54|4|elit, a feugiat tellus lorem eu metus. In lorem.|consequat +170922|Wang, Jasper, Macy, Callum|Libero Company|Somalia|1652011690599|NMG70INC6KI|55|2|pharetra,|Duis risus odio, auctor vitae, aliquet nec, imperdiet nec, leo. +190323|Elvis, Zane, Joseph, Maisie|Feugiat Corp.|Brazil|1624050541999|FSI16NGX9BE|56|3|eget magna.|enim nec tempus scelerisque, +180206|Constance, Eugenia, Kelsie, Lewis|Integer In Magna PC|Korea, North|1682100536299|YRK00ITG2VI|57|6|sem, consequat nec, mollis vitae, posuere|quam. Pellentesque habitant morbi tristique senectus et +170804|Aurora, Bryar, Desiree, India|Adipiscing Enim Mi Incorporated|Sao Tome and Principe|1639081264399|WRL06CLW8XI|58|10|quis accumsan convallis, ante lectus convallis est, vitae sodales|Curabitur dictum. Phasellus in felis. Nulla tempor augue ac ipsum. +190421|Kenyon, Timon, Keely, Cora|Turpis Nec Mauris LLC|Vanuatu|1601101902699|YTH30UPY5JW|59|6|fringilla mi lacinia mattis. Integer eu lacus. Quisque imperdiet,|Aliquam gravida +171123|Caesar, Nicholas, Sybill, Talon|Fusce Feugiat Lorem Associates|Pakistan|1663032896999|SUR71ZHE6YS|60|9|iaculis quis, pede.|lorem, vehicula et, rutrum eu, ultrices sit amet, risus. Donec +180504|Jana, Simon, Stone, Juliet|Id Corp.|Dominican Republic|1610011098499|TRD03GIS6JN|61|3|a, arcu. Sed et libero. Proin mi.|quis, pede. Praesent eu dui. Cum sociis natoque penatibus +180415|Lesley, Cedric, Alexandra, Callum|Ut Quam Vel Corp.|Montenegro|1636031140599|GHE88GOL1BE|62|3|orci lobortis augue scelerisque mollis. Phasellus|Aliquam gravida mauris ut mi. Duis +180822|Paki, Nayda, Keegan, Clinton|Nunc Pulvinar Company|Somalia|1621012252699|ULI90CWC1KH|63|2|fringilla ornare placerat, orci lacus vestibulum lorem, sit amet|gravida. Praesent eu nulla at sem molestie sodales. +190306|Debra, Tiger, Macey, Arsenio|Vehicula Pellentesque PC|Monaco|1659120468899|JCB88FKA8XK|64|6|lacus. Quisque purus sapien, gravida|lacus. Mauris +181114|Aretha, Clayton, Charde, Porter|Interdum Enim Incorporated|Guinea-Bissau|1632011780799|DCS07JPS0KW|65|7|placerat,|ac facilisis facilisis, magna tellus faucibus leo, +180823|Francesca, Herman, Vielka, India|Ultrices Iaculis Associates|Poland|1620042766799|ZTA20FEE9YW|66|2|blandit congue. In scelerisque scelerisque dui. Suspendisse ac metus vitae|a mi fringilla mi lacinia mattis. Integer eu lacus. +170907|Ori, Martena, Roary, Omar|Enim LLP|Cuba|1689122251699|CNJ76OKE8QF|67|9|purus|ut aliquam +190808|Aladdin, Rhona, Orla, Madaline|Suspendisse Ac Metus LLC|Saint Vincent and The Grenadines|1621050925699|RQK45QVM4CS|68|2|elit pede, malesuada vel, venenatis vel, faucibus|sociis natoque penatibus et +190722|Eaton, Shad, Lionel, Giacomo|Et Magnis LLP|Kyrgyzstan|1657040273399|LXY21QMG2RN|69|7|tincidunt pede ac urna. Ut tincidunt vehicula risus.|Morbi accumsan laoreet ipsum. Curabitur consequat, lectus sit amet luctus +180219|Sophia, Brenden, Edward, Dahlia|Diam Luctus Lobortis Incorporated|Côte D'Ivoire (Ivory Coast)|1612031015299|ZJF31UXK5PI|70|2|mi pede, nonummy ut, molestie in, tempus eu, ligula. Aenean|nunc sed libero. +181214|Dominic, Mohammad, Melissa, Maia|Duis Dignissim Industries|Tonga|1687031543199|LAE81JYL5YJ|71|7|dapibus quam quis diam. Pellentesque habitant|arcu. Vivamus sit amet risus. +181121|Kirk, Thor, Sage, Shana|Ac Inc.|India|1671080437199|CKQ60RDI9HV|72|10|tempor bibendum. Donec felis|Nullam vitae +170815|Sarah, Francis, Adena, Driscoll|Fringilla Ornare Placerat Industries|Bahamas|1644090227699|DTT75FAP2RJ|73|2|faucibus id, libero. Donec consectetuer mauris|Nullam lobortis quam a felis ullamcorper viverra. Maecenas iaculis +190602|Nathan, Mari, Britanney, Cathleen|Ullamcorper Foundation|New Caledonia|1690050975999|HSH59SGZ8PL|74|6|nunc. In at pede. Cras vulputate velit|in aliquet lobortis, nisi nibh lacinia orci, +170810|Teegan, Rosalyn, Iola, Valentine|Gravida Sit Corp.|Viet Nam|1687091774499|YHV12DEU1SU|75|1|leo. Vivamus nibh dolor,|egestas. Fusce aliquet magna +171231|Chava, Kyra, Raja, Oren|Vivamus Incorporated|Saint Barthélemy|1690042666199|RXU48QDX6JG|76|6|et nunc. Quisque ornare tortor|Proin nisl sem, +171214|Maryam, Phillip, Lillian, Henry|Sit Amet Faucibus PC|Singapore|1668021703999|CXB39KZC8ZD|77|5|ligula eu enim.|mollis non, cursus non, egestas a, dui. Cras +170927|Callie, Miriam, Carson, Faith|Vel Vulputate Eu Foundation|Niger|1677070137099|WSK59IXF7NB|78|3|congue. In scelerisque|Nulla interdum. Curabitur dictum. Phasellus in felis. Nulla tempor augue +180709|Paloma, Yael, Sebastian, Drew|Lorem Ipsum Consulting|Tokelau|1693051794999|VHL56FFE3RN|79|7|lacus. Etiam|faucibus orci luctus +190219|Carl, Macaulay, Brent, Lael|Vestibulum Nec Euismod Company|Honduras|1682011611899|ECS73TCR2JY|80|1|est. Nunc ullamcorper, velit in|mattis semper, dui lectus rutrum urna, nec luctus felis +171225|Cullen, Risa, Keith, Kenyon|Nullam LLC|Saint Vincent and The Grenadines|1681060476999|BPD48HRD3DC|81|8|porttitor scelerisque|tellus id nunc interdum feugiat. Sed nec metus facilisis +170807|Flynn, Chaim, Wilma, Cameron|Arcu Imperdiet Corporation|Tuvalu|1612091856899|PHU58RPR1LB|82|1|fringilla|ipsum. Donec sollicitudin adipiscing +171205|Clayton, Cullen, Mia, Iris|Dictum Eu Placerat Corp.|Falkland Islands|1687020772199|IMJ41BVM2NX|83|1|enim consequat|egestas blandit. +190714|Thaddeus, Courtney, Alexis, Bell|Ultrices LLP|Nicaragua|1642072229899|ZAC25PQJ4IV|84|6|accumsan|ornare, libero at auctor +190510|Tanek, Jeanette, Mira, Ima|Libero Dui Incorporated|New Zealand|1697080707099|CIV64KIU6QR|85|8|ac metus vitae velit|nulla vulputate dui, nec tempus mauris erat +181020|Sacha, Mufutau, Chaney, Iliana|Integer Tincidunt PC|Belgium|1648100961899|CGN99LLB1WQ|86|10|fringilla mi lacinia mattis.|Sed nunc est, mollis +190703|Thor, Myra, Jolene, Elmo|Pellentesque PC|Liechtenstein|1692092027199|HYH12XZL5BF|87|10|et|congue. In scelerisque +180521|Kuame, Scarlett, Macaulay, Cadman|Donec Est Corporation|Turks and Caicos Islands|1636050431099|BYN83REO8CF|88|1|lacus. Mauris non dui nec|tincidunt aliquam arcu. Aliquam +181224|Aquila, Harding, Lyle, Francesca|Volutpat Industries|Zambia|1662070325099|SPK37GQW2RG|89|8|Donec dignissim magna|molestie orci tincidunt adipiscing. Mauris molestie +190108|Cecilia, Macon, Cynthia, Lavinia|Mus Proin Consulting|Eritrea|1624070296599|HOM50BVI9OU|90|3|malesuada ut, sem. Nulla interdum.|bibendum. Donec felis orci, adipiscing +190803|Raphael, Zelda, Maisie, Florence|Nec Eleifend Non Limited|Trinidad and Tobago|1677012246799|RNH00TBN7OR|91|5|nonummy. Fusce fermentum fermentum arcu. Vestibulum ante ipsum|magnis dis parturient montes, nascetur +190529|Dorothy, Orla, MacKenzie, Miriam|Neque Ltd|Iraq|1622102547999|KMR58PLD9WF|92|5|Duis cursus, diam at pretium|augue ac ipsum. Phasellus vitae mauris sit amet +190721|Wynne, Hedwig, Dean, Jael|Erat Vitae Risus Corporation|Singapore|1600041165399|GDI14MNW6BQ|93|5|Phasellus at augue id ante|egestas. Sed pharetra, felis eget varius ultrices, mauris ipsum +180601|Keefe, Bo, Dorothy, James|Ligula Associates|Tajikistan|1642122318299|AHU18XOF8MT|94|4|risus. Morbi metus. Vivamus euismod urna.|Proin non massa +171104|Ignatius, Marcia, Lilah, Asher|Sed Associates|Bahamas|1678100301599|EYV07QDL3FF|95|10|mollis|et, rutrum non, hendrerit id, ante. Nunc mauris sapien, cursus +180501|Tasha, Cedric, Beverly, Octavius|Malesuada Fringilla Est LLC|Seychelles|1614112691899|WUM85THU0RB|96|4|urna suscipit nonummy. Fusce fermentum fermentum|nec, malesuada ut, sem. Nulla +180101|Aimee, Asher, Maggie, Addison|Dictum Foundation|Cayman Islands|1678091216599|ATF69MKL1XM|97|1|lobortis tellus justo sit amet nulla. Donec non justo.|euismod ac, fermentum vel, mauris. Integer sem elit, +190615|Hayley, Ainsley, Jemima, Edan|Fusce Diam Institute|Cameroon|1620051899899|ULX81GII2QH|98|10|a neque. Nullam ut nisi a odio semper cursus.|dui. Fusce aliquam, enim +181106|Calvin, Nelle, Vielka, Inga|Duis PC|Chad|1684100965299|MHL78UIB1BU|99|9|quis urna. Nunc quis arcu vel quam dignissim|egestas hendrerit neque. In ornare sagittis +180429|Kellie, Buckminster, Cole, Myles|Lobortis Ultrices PC|British Indian Ocean Territory|1640021514799|DCJ20YII5FQ|100|5|fringilla est. Mauris eu turpis. Nulla aliquet. Proin velit. Sed|commodo ipsum. Suspendisse non leo. Vivamus nibh diff --git a/scripts/tutorial_ko.json b/scripts/tutorial_ko.json new file mode 100644 index 0000000..97ddbd9 --- /dev/null +++ b/scripts/tutorial_ko.json @@ -0,0 +1 @@ +{"paragraphs":[{"text":"%md\n## 환영합니다!\n\n### 이 튜토리얼에서는 Lightning DB를 사용하여 Data를 적재하고 불러오는 방법을 배워보도록 하겠습니다.","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"editorHide":true,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

환영합니다!

\n

이 튜토리얼에서는 Lightning DB를 사용하여 Data를 적재하고 불러오는 방법을 배워보도록 하겠습니다.

\n
"}]},"apps":[],"jobName":"paragraph_1593153154826_1921315873","id":"20200625-141839_323834833","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"focus":true,"$$hashKey":"object:172"},{"text":"%md\n### 0. 준비 (1)\n\n- [Lightning DB 튜토리얼 저장소](https://github.com/mnms/tutorials)를 복제합니다\n- 이 튜토리얼 저장소는 샘플 데이터를 생성하기 위한 코드를 포함하고 있습니다.","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"editorHide":true,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

0. 준비 (1)

\n\n
"}]},"apps":[],"jobName":"paragraph_1593153154829_380083617","id":"20191113-084312_71033313","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:173"},{"text":"%sh # Shell Command\n\ncd ~/\ngit clone https://github.com/mnms/tutorials.git ltdb-tutorials","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"sh","editOnDblClick":false,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/sh","fontSize":9,"editorHide":false,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"Cloning into 'ltdb-tutorials'...\n"}]},"apps":[],"jobName":"paragraph_1593153154829_1099707681","id":"20191118-044249_1240062393","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:174"},{"text":"%sh\n\nls -alF ~/ltdb-tutorials\n# ls -alF ~/ltdb-tutorials/prep-sample-data/","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"editorSetting":{"language":"sh","editOnDblClick":false,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/sh","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"total 8\ndrwxrwxr-x 5 ltdb ltdb 81 Jun 26 09:00 ./\ndrwx------ 34 ltdb ltdb 4096 Jun 26 09:09 ../\ndrwxrwxr-x 8 ltdb ltdb 152 Jun 26 09:00 .git/\n-rw-rw-r-- 1 ltdb ltdb 74 Jun 26 09:00 .gitignore\ndrwxrwxr-x 3 ltdb ltdb 53 Jun 26 09:00 prep-sample-data/\ndrwxrwxr-x 2 ltdb ltdb 22 Jun 26 09:00 zeppelin-notebook/\n"}]},"apps":[],"jobName":"paragraph_1593153154830_-860175536","id":"20191114-042102_932829872","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:175"},{"text":"%md\n### 0. 준비 (2)\n\n- [샘플 데이터 생성 방법](https://github.com/mnms/tutorials/tree/master/prep-sample-data)을 참고하여 샘플 데이터를 생성합니다.\n- 아래와 같은 명령어를 실행하여 미리 생성한 샘플 데이터를 사용하도록 하겠습니다. (72개 CSV파일 / 전체 약 31백만건)","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"editorHide":true,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

0. 준비 (2)

\n
    \n
  • 샘플 데이터 생성 방법을 참고하여 샘플 데이터를 생성합니다.
  • \n
  • 아래와 같은 명령어를 실행하여 미리 생성한 샘플 데이터를 사용하도록 하겠습니다. (72개 CSV파일 / 전체 약 31백만건)
  • \n
\n
"}]},"apps":[],"jobName":"paragraph_1593153154830_-28103094","id":"20191118-054547_1991349652","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:176"},{"text":"%sh\n\ncd ~/ltdb-tutorials/prep-sample-data\n\npython3 prep.py --start 202001010000 --count 72 --scale 1 --worker 24 --outdir ~/ltdb-sample-data","user":"anonymous","dateUpdated":"2020-06-26T15:35:18+0900","config":{"editorSetting":{"language":"sh","editOnDblClick":false,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/sh","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"apps":[],"jobName":"paragraph_1593153154831_1215651008","id":"20200625-141922_723506974","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:177"},{"text":"%md\n### 0. 준비(3)\n\n- 샘플 데이터를 Spark Dataframe으로 불러옵니다.","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"editorHide":true,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

0. 준비(3)

\n
    \n
  • 샘플 데이터를 Spark Dataframe으로 불러옵니다.
  • \n
\n
"}]},"apps":[],"jobName":"paragraph_1593153154833_-2110104548","id":"20191118-044237_1397043835","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:178"},{"text":"%spark\n\nval csvDf = spark.sqlContext.read\n .format(\"csv\")\n .option(\"header\", \"true\")\n .option(\"inferSchema\", \"true\")\n .load(\"file:///some/path/to/ltdb-sample-data\")\n\ncsvDf.count()","user":"anonymous","dateUpdated":"2020-06-26T16:13:09+0900","config":{"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/scala","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"csvDf: org.apache.spark.sql.DataFrame = [event_time: bigint, name: string ... 6 more fields]\nres9: Long = 30979632\n"}]},"apps":[],"jobName":"paragraph_1593153154833_820765613","id":"20200625-142156_1724342740","dateCreated":"2020-06-26T15:32:34+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:179","dateFinished":"2020-06-26T16:13:00+0900","dateStarted":"2020-06-26T16:12:53+0900"},{"text":"%spark\n\ncsvDf.show()","user":"anonymous","dateUpdated":"2020-06-26T16:13:09+0900","config":{"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/scala","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"+------------+--------+----------+----+-------------------+--------+---------+--------------+\n| event_time| name| sur_name| sex| city|latitude|longitude| country|\n+------------+--------+----------+----+-------------------+--------+---------+--------------+\n|202001010000|Santiago|SCHWANBECK| boy|City of Westminster| 51.5| -0.1167|United Kingdom|\n|202001010000| Felipe| LIMONES| boy| Koutiala| 12.3904| -5.47| Mali|\n|202001010000| Myrtis| MELLUM|girl| Charleston| 38.3484| -81.6323| United States|\n|202001010000| Marleen| HOLDSCLAW|girl| Tuscaloosa| 33.2349| -87.5267| United States|\n|202001010000| Deann| NICOARA|girl| Itanhaem| -24.18| -46.8| Brazil|\n|202001010000| Marcus| TRAIGLE| boy| Izhevsk| 56.85| 53.23| Russia|\n|202001010000|Victoria| PERRELLO|girl| Sejong| 36.6092| 127.2919| Korea, South|\n|202001010000| Jeanne|EARLEYWINE|girl| Bohinjska Bistrica| 46.2769| 13.955| Slovenia|\n|202001010000| Daniel| GOUVEA| boy| Shaker Heights| 41.4744| -81.5496| United States|\n|202001010000| Major| CHRESTMAN| boy| Independence| 37.2122| -95.7324| United States|\n|202001010000|Bertrand| DEMART| boy| Clinton| 34.4774| -81.864| United States|\n|202001010000| Albert| KINCART| boy| Surigao| 9.7843| 125.4888| Philippines|\n|202001010000| Reuben| MILOS| boy| Kyaukphyu| 19.429| 93.5494| Burma|\n|202001010000| Josie| SCHWADER|girl| Angren| 41.0304| 70.1549| Uzbekistan|\n|202001010000| Dollie| EZPINOZA|girl| Katsina| 12.9904| 7.6| Nigeria|\n|202001010000| Leanna| GLEE|girl| Canillo| 42.567| 1.5981| Andorra|\n|202001010000| Almira| DANTUONO|girl| Addison| 41.9314| -88.0085| United States|\n|202001010000| Cherie| RUK|girl| Trondheim| 63.4167| 10.4167| Norway|\n|202001010000| Carmel| ALMEN|girl| Saint Paul Park| 44.836| -92.9949| United States|\n|202001010000| Regina| LINEBERRY|girl| Aurangabad| 24.7704| 84.38| India|\n+------------+--------+----------+----+-------------------+--------+---------+--------------+\nonly showing top 20 rows\n\n"}]},"apps":[],"jobName":"paragraph_1593153154834_-1595890550","id":"20200625-142228_1960064863","dateCreated":"2020-06-26T15:32:34+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:180","dateFinished":"2020-06-26T16:13:10+0900","dateStarted":"2020-06-26T16:13:09+0900"},{"text":"%md\n### 1. DataFrame의 스키마 확인\n\n* Lightning DB에 새로운 테이블을 생성하기 전에, CSV DataFrame의 스키마를 확인합니다.\n","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"editorHide":true,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

1. DataFrame의 스키마 확인

\n
    \n
  • Lightning DB에 새로운 테이블을 생성하기 전에, CSV DataFrame의 스키마를 확인합니다.
  • \n
\n
"}]},"apps":[],"jobName":"paragraph_1593153154835_-353598348","id":"20191118-055616_1455560182","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:181"},{"text":"%spark\n\ncsvDf.schema","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/scala","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"res12: org.apache.spark.sql.types.StructType = StructType(StructField(event_time,LongType,true), StructField(name,StringType,true), StructField(sur_name,StringType,true), StructField(sex,StringType,true), StructField(city,StringType,true), StructField(latitude,DoubleType,true), StructField(longitude,DoubleType,true), StructField(country,StringType,true))\n"}]},"apps":[],"jobName":"paragraph_1593153154835_-1130004358","id":"20200625-142632_662293077","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:182"},{"text":"%md\n### 2. 신규 테이블 생성\n\n* r2를 사용하여 테이블을 생성합니다. (r2는 redis + rocksdb를 뜻합니다)\n* 사용하는 옵션들은 [이 곳](https://docs.lightningdb.io/data-ingestion-and-querying/)에서 확인할 수 있습니다.\n* 아래 예제에서는 EVENT_TIME과 CITY를 파티션 컬럼으로 사용하였습니다.\n* 파티션 컬럼은 데이터를 고르게 적재하는 것은 물론, 질의 시 데이터를 빠르게 찾을 수 있도록 적절하게 설정해주는 것이 필요합니다.\n","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"editorHide":true,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

2. 신규 테이블 생성

\n
    \n
  • r2를 사용하여 테이블을 생성합니다. (r2는 redis + rocksdb를 뜻합니다)
  • \n
  • 사용하는 옵션들은 이 곳에서 확인할 수 있습니다.
  • \n
  • 아래 예제에서는 EVENT_TIME과 CITY를 파티션 컬럼으로 사용하였습니다.
  • \n
  • 파티션 컬럼은 데이터를 고르게 적재하는 것은 물론, 질의 시 데이터를 빠르게 찾을 수 있도록 적절하게 설정해주는 것이 필요합니다.
  • \n
\n
"}]},"apps":[],"jobName":"paragraph_1593153154836_703675994","id":"20200625-144416_52402019","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:183"},{"text":"%spark\n\nimport org.apache.spark.sql.types._\n\nval params = Map(\n \"table\" -> \"1\", // table id\n \"host\" -> \"d201\", // host\n \"port\" -> \"18700\", // port\n \"partitions\" -> \"event_time city\", // partition\n \"mode\" -> \"nvkvs\",\n \"group_query_enabled\" -> \"no\",\n \"group_size\" -> \"44\",\n \"query_result_partition_cnt_limit\" -> \"400000000\",\n \"query_result_task_row_cnt_limit\" -> \"10000000\",\n \"query_result_total_row_cnt_limit\" -> \"2147483647\",\n \"at_least_one_partition_enabled\" -> \"no\" // 운영서버에는 yes로 설정\n )\n\nval r2Df = spark.read.format(\"r2\")\n .options(params)\n .schema(csvDf.schema)\n .load()\n\nr2Df.createOrReplaceTempView(\"ltdb_tutorial\")\nr2Df.printSchema()","user":"anonymous","dateUpdated":"2020-06-26T16:13:14+0900","config":{"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/scala","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"root\n |-- event_time: long (nullable = true)\n |-- name: string (nullable = true)\n |-- sur_name: string (nullable = true)\n |-- sex: string (nullable = true)\n |-- city: string (nullable = true)\n |-- latitude: double (nullable = true)\n |-- longitude: double (nullable = true)\n |-- country: string (nullable = true)\n\nimport org.apache.spark.sql.types._\nparams: scala.collection.immutable.Map[String,String] = Map(query_result_partition_cnt_limit -> 400000000, query_result_task_row_cnt_limit -> 10000000, host -> d201, at_least_one_partition_enabled -> no, query_result_total_row_cnt_limit -> 2147483647, group_size -> 44, port -> 18700, group_query_enabled -> no, mode -> nvkvs, partitions -> event_time city, table -> 1)\nr2Df: org.apache.spark.sql.DataFrame = [event_time: bigint, name: string ... 6 more fields]\n"}]},"apps":[],"jobName":"paragraph_1593153154837_-1274181638","id":"20200625-142632_1024922279","dateCreated":"2020-06-26T15:32:34+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:184","dateFinished":"2020-06-26T16:13:14+0900","dateStarted":"2020-06-26T16:13:14+0900"},{"text":"%spark\n\nspark.sql(\"SHOW TABLES\").show()\n\nspark.sql(\"SELECT COUNT(*) FROM ltdb_tutorial\").show()","user":"anonymous","dateUpdated":"2020-06-26T16:13:28+0900","config":{"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/scala","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"+--------+-------------+-----------+\n|database| tableName|isTemporary|\n+--------+-------------+-----------+\n| |ltdb_tutorial| true|\n+--------+-------------+-----------+\n\n+--------+\n|count(1)|\n+--------+\n+--------+\n\n"}]},"apps":[],"jobName":"paragraph_1593153154840_1706995273","id":"20200625-144809_1147858936","dateCreated":"2020-06-26T15:32:34+0900","dateStarted":"2020-06-26T16:13:28+0900","dateFinished":"2020-06-26T16:13:28+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:185"},{"text":"%md\n### 3. 데이터 적재\n\n* CSV DataFrame을 사용하여 새로 생성한 Lightning DB 테이블에 데이터를 적재합니다.","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"editorHide":true,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

3. 데이터 적재

\n
    \n
  • CSV DataFrame을 사용하여 새로 생성한 Lightning DB 테이블에 데이터를 적재합니다.
  • \n
\n
"}]},"apps":[],"jobName":"paragraph_1593153154840_-1390793852","id":"20191113-084356_929059768","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:186"},{"text":"%spark\n\ncsvDf.write.insertInto(\"ltdb_tutorial\")","user":"anonymous","dateUpdated":"2020-06-26T16:13:46+0900","config":{"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/scala","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[]},"apps":[],"jobName":"paragraph_1593153154841_435389862","id":"20200625-144903_179421886","dateCreated":"2020-06-26T15:32:34+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:187","dateFinished":"2020-06-26T16:14:03+0900","dateStarted":"2020-06-26T16:13:46+0900"},{"text":"%spark\n\nspark.sql(\"SELECT COUNT(*) FROM ltdb_tutorial\").show()","user":"anonymous","dateUpdated":"2020-06-26T16:14:13+0900","config":{"colWidth":12,"fontSize":9,"enabled":true,"results":{},"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"editorMode":"ace/mode/scala"},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"+--------+\n|count(1)|\n+--------+\n|30979632|\n+--------+\n\n"}]},"apps":[],"jobName":"paragraph_1593154627895_1865512900","id":"20200626-155707_1859894629","dateCreated":"2020-06-26T15:57:07+0900","dateStarted":"2020-06-26T16:14:13+0900","dateFinished":"2020-06-26T16:14:14+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:188"},{"text":"%md\n\n### 4. Lightning DB에 질의하기","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"editorHide":true,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

4. Lightning DB에 질의하기

\n
"}]},"apps":[],"jobName":"paragraph_1593153154841_1818552071","id":"20200625-181004_1978882286","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:189"},{"text":"%sql\n\nSELECT COUNT(*) FROM ltdb_tutorial","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"editorSetting":{"language":"sql","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/sql","fontSize":9,"results":{"0":{"graph":{"mode":"table","height":134,"optionOpen":false,"setting":{"table":{"tableGridState":{},"tableColumnTypeState":{"names":{"count(1)":"string"},"updated":false},"tableOptionSpecHash":"[{\"name\":\"useFilter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable filter for columns\"},{\"name\":\"showPagination\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable pagination for better navigation\"},{\"name\":\"showAggregationFooter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable a footer for displaying aggregated values\"}]","tableOptionValue":{"useFilter":false,"showPagination":false,"showAggregationFooter":false},"updated":false,"initialized":false}},"commonSetting":{}}}},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TABLE","data":"count(1)\n30979632\n"},{"type":"TEXT","data":""}]},"apps":[],"jobName":"paragraph_1593153154841_1617955310","id":"20191118-060008_627652660","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:190"},{"text":"%sql\n\nSELECT * FROM ltdb_tutorial WHERE event_time = '202001010005' AND CITY = 'Tyler' LIMIT 20","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"editorSetting":{"language":"sql","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/sql","fontSize":9,"results":{"0":{"graph":{"mode":"table","height":300,"optionOpen":false,"setting":{"table":{"tableGridState":{},"tableColumnTypeState":{"names":{"event_time":"string","name":"string","sur_name":"string","sex":"string","city":"string","latitude":"string","longitude":"string","country":"string"},"updated":false},"tableOptionSpecHash":"[{\"name\":\"useFilter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable filter for columns\"},{\"name\":\"showPagination\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable pagination for better navigation\"},{\"name\":\"showAggregationFooter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable a footer for displaying aggregated values\"}]","tableOptionValue":{"useFilter":false,"showPagination":false,"showAggregationFooter":false},"updated":false,"initialized":false}},"commonSetting":{}}}},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TABLE","data":"event_time\tname\tsur_name\tsex\tcity\tlatitude\tlongitude\tcountry\n202001010005\tTheodora\tPINIZZOTTO\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tDoyle\tSAUVAGE\tboy\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tKailee\tKNICKELBEIN\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tMarcelino\tKENNEMUR\tboy\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tCletus\tHOLLENBACH\tboy\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tLovell\tSTROGEN\tboy\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tWillie\tFANTONE\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tLollie\tRUOTOLO\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tNan\tTHARPE\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tAlaina\tHAFFENDEN\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tMickey\tELHAG\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tWorth\tPROCOPIO\tboy\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tLeila\tMAKER\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tVon\tKINKLE\tboy\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tTracy\tBERGERT\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tLes\tPICOS\tboy\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tNora\tSNOUFFER\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tSolomon\tKLEBS\tboy\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tSonya\tDUNNUM\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n202001010005\tMaegan\tBEISNER\tgirl\tTyler\t32.3184\t-95.3065\tUnited States\n"},{"type":"TEXT","data":""}]},"apps":[],"jobName":"paragraph_1593153154842_-1876719550","id":"20191113-090030_1825956406","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:191"},{"text":"%sql\n\nSELECT COUNTRY, COUNT(*) AS PEOPLE_COUNT FROM ltdb_tutorial WHERE EVENT_TIME = '202001010005' GROUP BY COUNTRY ORDER BY PEOPLE_COUNT DESC LIMIT 1000","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"sql","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/sql","fontSize":9,"editorHide":false,"results":{"0":{"graph":{"mode":"multiBarChart","height":300,"optionOpen":false,"setting":{"table":{"tableGridState":{},"tableColumnTypeState":{"names":{"COUNTRY":"string","PEOPLE_COUNT":"string"},"updated":false},"tableOptionSpecHash":"[{\"name\":\"useFilter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable filter for columns\"},{\"name\":\"showPagination\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable pagination for better navigation\"},{\"name\":\"showAggregationFooter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable a footer for displaying aggregated values\"}]","tableOptionValue":{"useFilter":false,"showPagination":false,"showAggregationFooter":false},"updated":false,"initialized":false},"multiBarChart":{"rotate":{"degree":"-45"},"xLabelStatus":"default","stacked":false}},"commonSetting":{},"keys":[{"name":"COUNTRY","index":0,"aggr":"sum"}],"groups":[],"values":[{"name":"PEOPLE_COUNT","index":1,"aggr":"sum"}]},"helium":{}}},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TABLE","data":"COUNTRY\tPEOPLE_COUNT\nUnited States\t149720\nRussia\t17394\nChina\t12281\nBrazil\t12086\nCanada\t7690\nUnited Kingdom\t7142\nAustralia\t6676\nSlovenia\t6602\nIndia\t6503\nMexico\t5793\nArgentina\t4788\nSerbia\t4400\nUganda\t3897\nPhilippines\t3679\nLatvia\t3489\nIndonesia\t3343\nKazakhstan\t3063\nTurkey\t2822\nCongo (Kinshasa)\t2747\nPeru\t2713\nChile\t2635\nThailand\t2575\nJapan\t2362\nNigeria\t2248\nBolivia\t2231\nAzerbaijan\t2224\nVietnam\t2220\nSouth Africa\t2196\nColombia\t2179\nMacedonia\t2112\nMalta\t2106\nIran\t2035\nTanzania\t1944\nAlgeria\t1869\nKenya\t1824\nFrance\t1814\nGermany\t1781\nItaly\t1733\nVenezuela\t1633\nLithuania\t1627\nUkraine\t1599\nAngola\t1503\nSpain\t1484\nNew Zealand\t1474\nEgypt\t1331\nPakistan\t1297\nEcuador\t1294\nRomania\t1289\nAfghanistan\t1272\nParaguay\t1208\nLibya\t1200\nKosovo\t1172\nMoldova\t1156\nBurma\t1135\nEthiopia\t1109\nZambia\t1092\nNorway\t1079\nSweden\t1060\nSudan\t1037\nMalaysia\t1035\nIreland\t1024\nUruguay\t1020\nMozambique\t1014\nDominican Republic\t1007\nSwitzerland\t996\nBurkina Faso\t989\nSaudi Arabia\t971\nNamibia\t958\nMali\t955\nCameroon\t955\nGreece\t944\nKorea, South\t940\nKorea, North\t933\nSomalia\t930\nMalawi\t913\nUzbekistan\t889\nFaroe Islands\t883\nGuinea\t866\nIraq\t859\nMadagascar\t855\nTunisia\t844\nBulgaria\t840\nAlbania\t838\nTaiwan\t836\nChad\t819\nYemen\t812\nCambodia\t807\nPortugal\t796\nHungary\t789\nCuba\t784\nCôte D’Ivoire\t781\nGuatemala\t775\nBotswana\t772\nMongolia\t766\nPoland\t765\nFinland\t759\nPapua New Guinea\t733\nMontenegro\t717\nGreenland\t695\nMauritania\t694\nMaldives\t693\nZimbabwe\t677\nMorocco\t647\nSyria\t645\nHonduras\t645\nCabo Verde\t642\nCentral African Republic\t638\nCroatia\t636\nNicaragua\t632\nBhutan\t628\nCongo (Brazzaville)\t614\nLaos\t601\nGabon\t564\nPanama\t557\nBurundi\t557\nNepal\t552\nSouth Sudan\t546\nGhana\t537\nOman\t523\nEstonia\t511\nLiberia\t501\nGeorgia\t496\nNiger\t486\nBangladesh\t480\nKyrgyzstan\t476\nJamaica\t460\nNetherlands\t460\nBelarus\t453\nSenegal\t453\nSri Lanka\t440\nTimor-Leste\t435\nEl Salvador\t430\nTrinidad And Tobago\t427\nJordan\t420\nTurkmenistan\t412\nGuyana\t402\nCosta Rica\t393\nSuriname\t392\nArmenia\t392\nCzechia\t390\nSamoa\t380\nDenmark\t375\nLuxembourg\t365\nBenin\t360\nLiechtenstein\t360\nRwanda\t352\nAustria\t322\nGuinea-Bissau\t320\nHaiti\t309\nIceland\t307\nLesotho\t300\nBelgium\t296\nLebanon\t284\nSolomon Islands\t278\nTogo\t277\nQatar\t275\nSlovakia\t275\nSao Tome And Principe\t272\nEquatorial Guinea\t268\nEritrea\t260\nUnited Arab Emirates\t244\nTajikistan\t236\nAndorra\t222\nSwaziland\t216\nFrench Guiana\t214\nBelize\t212\nGambia, The\t210\nCyprus\t208\nVanuatu\t193\nDjibouti\t191\nSierra Leone\t191\nIsrael\t182\nBosnia And Herzegovina\t176\nMicronesia, Federated States Of\t152\nSan Marino\t118\nFiji\t114\nSaint Helena, Ascension, And Tristan Da Cunha\t109\nNew Caledonia\t106\nGuadeloupe\t100\nPalau\t92\nKuwait\t84\nTonga\t73\nBahamas, The\t66\nGuam\t63\nReunion\t60\nMauritius\t59\nWallis And Futuna\t56\nSaint Pierre And Miquelon\t45\nAmerican Samoa\t44\nNorthern Mariana Islands\t39\nMayotte\t37\nDominica\t37\nSaint Kitts And Nevis\t37\nVirgin Islands, British\t37\nKiribati\t36\nTurks And Caicos Islands\t36\nMacau\t36\nAnguilla\t35\nIsle Of Man\t35\nNorfolk Island\t34\nCayman Islands\t34\nMonaco\t34\nMontserrat\t33\nSaint Barthelemy\t33\nTuvalu\t33\nSaint Vincent And The Grenadines\t32\nSint Maarten\t32\nBahrain\t31\nSaint Martin\t31\nSvalbard\t31\nComoros\t30\nBermuda\t30\nWest Bank\t29\nSeychelles\t29\nSaint Lucia\t28\nMartinique\t28\nFalkland Islands (Islas Malvinas)\t28\nBrunei\t27\nChristmas Island\t27\nBarbados\t27\nAntigua And Barbuda\t27\nCuraçao\t27\nMarshall Islands\t27\nAruba\t27\nNiue\t26\nGrenada\t25\nPitcairn Islands\t25\nJersey\t25\nGuernsey\t24\nSouth Georgia And South Sandwich Islands\t23\nGibraltar\t23\nSingapore\t23\nFrench Polynesia\t23\nHong Kong\t23\nCook Islands\t22\n"},{"type":"TEXT","data":""}]},"apps":[],"jobName":"paragraph_1593153154843_-1185459007","id":"20191118-064532_280573780","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:192"},{"text":"%sql\n\nSELECT CITY, COUNT(*) AS PEOPLE_COUNT FROM ltdb_tutorial WHERE EVENT_TIME = '202001010005' AND COUNTRY = 'United States' GROUP BY CITY ORDER BY PEOPLE_COUNT DESC LIMIT 1000","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"editorSetting":{"language":"sql","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/sql","fontSize":9,"results":{"0":{"graph":{"mode":"multiBarChart","height":300,"optionOpen":false,"setting":{"table":{"tableGridState":{},"tableColumnTypeState":{"names":{"CITY":"string","PEOPLE_COUNT":"string"},"updated":false},"tableOptionSpecHash":"[{\"name\":\"useFilter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable filter for columns\"},{\"name\":\"showPagination\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable pagination for better navigation\"},{\"name\":\"showAggregationFooter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable a footer for displaying aggregated values\"}]","tableOptionValue":{"useFilter":false,"showPagination":false,"showAggregationFooter":false},"updated":false,"initialized":false},"multiBarChart":{"rotate":{"degree":"-45"},"xLabelStatus":"default"}},"commonSetting":{},"keys":[{"name":"CITY","index":0,"aggr":"sum"}],"groups":[],"values":[{"name":"PEOPLE_COUNT","index":1,"aggr":"sum"}]},"helium":{}}},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TABLE","data":"CITY\tPEOPLE_COUNT\nFranklin\t337\nSpringfield\t294\nWashington\t286\nClinton\t284\nJackson\t268\nGreenville\t267\nMonroe\t259\nSalem\t250\nColumbia\t244\nMarion\t243\nLebanon\t239\nRichmond\t230\nAuburn\t214\nJacksonville\t213\nMount Vernon\t207\nTroy\t203\nLexington\t203\nMonticello\t195\nFlorence\t191\nMiddletown\t189\nBurlington\t187\nMadison\t186\nPortland\t184\nIndependence\t177\nLancaster\t177\nColumbus\t177\nGlendale\t174\nAshland\t174\nBellevue\t173\nCovington\t172\nNewport\t169\nCleveland\t169\nHenderson\t168\nLafayette\t167\nManchester\t167\nGreenwood\t167\nNewark\t166\nPrinceton\t166\nFarmington\t165\nDayton\t165\nClayton\t162\nOxford\t161\nPlymouth\t159\nBristol\t159\nDover\t156\nCanton\t156\nRochester\t156\nAlexandria\t153\nUnion City\t153\nMilton\t151\nNewton\t148\nAthens\t148\nBedford\t143\nHighland Park\t141\nLa Grange\t140\nWilmington\t140\nQuincy\t138\nMount Pleasant\t138\nDecatur\t135\nCambridge\t135\nGarden City\t134\nAlbany\t133\nGreenfield\t133\nDanville\t133\nHarrison\t132\nFairfield\t128\nFairview\t128\nMarysville\t127\nSelma\t127\nCenterville\t126\nCharleston\t126\nGeorgetown\t125\nAurora\t125\nMontgomery\t125\nHillsboro\t123\nUnion\t121\nMarshall\t120\nLincoln\t120\nBrentwood\t119\nJasper\t118\nFremont\t118\nWarren\t117\nLakewood\t117\nSaint Joseph\t116\nAberdeen\t116\nChester\t116\nSidney\t116\nFayetteville\t115\nHighland\t115\nEureka\t113\nParis\t113\nNorwalk\t112\nSweetwater\t111\nWatertown\t111\nNiles\t111\nHuron\t110\nCamden\t110\nWyoming\t110\nMaryville\t108\nNew Castle\t108\nCommerce\t108\nLawrence\t106\nWellington\t106\nPortage\t105\nHarrisburg\t105\nGrafton\t105\nManhattan\t104\nAugusta\t104\nMaywood\t104\nGainesville\t104\nSmithfield\t103\nCarbondale\t103\nOrange\t103\nCarrollton\t101\nMilan\t100\nLouisville\t100\nWestfield\t100\nWoodbury\t100\nClarksville\t99\nSaint Marys\t99\nSmyrna\t98\nGlencoe\t98\nByron\t97\nLansing\t96\nBrookhaven\t96\nWaynesboro\t95\nSavannah\t95\nArlington\t95\nWarrenton\t94\nSeminole\t94\nPlainfield\t93\nOttawa\t93\nChillicothe\t93\nWhitehall\t93\nBatesville\t92\nLawrenceburg\t92\nBrunswick\t92\nLowell\t92\nSheridan\t92\nTrenton\t92\nLive Oak\t91\nAnderson\t91\nHartford\t91\nWoodstock\t91\nLogan\t91\nMilford\t91\nBloomington\t90\nWinfield\t90\nGeneva\t90\nHolly Springs\t90\nWestminster\t89\nSanford\t89\nPark City\t89\nLake City\t89\nLewisburg\t89\nPortsmouth\t89\nRiverdale\t89\nHastings\t89\nSyracuse\t88\nRichfield\t88\nLong Beach\t88\nMansfield\t88\nWindsor\t88\nFrankfort\t88\nHudson\t87\nMount Carmel\t87\nKingston\t87\nBowling Green\t86\nOakdale\t86\nYork\t86\nLaurel\t86\nTaylor\t86\nMedina\t86\nDublin\t85\nDallas\t85\nCottage Grove\t85\nCaldwell\t85\nNew Albany\t85\nRiverside\t85\nOntario\t85\nEdgewater\t85\nDouglas\t85\nGonzales\t84\nRoseville\t84\nBerkeley\t83\nShorewood\t83\nHamilton\t83\nEnglewood\t83\nNorthfield\t83\nAltoona\t83\nWinchester\t83\nJohnstown\t83\nFreeport\t83\nConcord\t82\nBenton\t82\nAvon\t82\nShelbyville\t82\nRussellville\t82\nLeesburg\t81\nWildwood\t81\nBridgeport\t81\nPerry\t80\nAlma\t80\nGermantown\t79\nSunnyvale\t79\nForest Park\t79\nPleasantville\t79\nChelsea\t78\nEphrata\t78\nBelton\t78\nCrowley\t78\nPiedmont\t78\nOakland\t77\nRoswell\t77\nTitusville\t76\nCary\t76\nStaunton\t76\nNorwich\t76\nLiberty\t76\nGlenwood\t76\nBrandon\t75\nHempstead\t75\nWorthington\t75\nNorwood\t74\nPacific\t74\nCollinsville\t74\nKansas City\t74\nAttica\t74\nCorcoran\t73\nPleasant Hill\t73\nWebster\t73\nHeath\t73\nDe Soto\t73\nOak Grove\t73\nSummerville\t72\nMidland\t72\nElizabethtown\t72\nBatavia\t72\nPulaski\t72\nClearfield\t71\nBastrop\t71\nCudahy\t71\nChatham\t71\nRoselle\t71\nSanger\t70\nShelby\t70\nFruitland\t70\nBath\t70\nBirmingham\t70\nConway\t70\nMarshfield\t70\nRedmond\t70\nOrange City\t70\nAlpine\t69\nPelham\t69\nOswego\t69\nFairfax\t69\nCrestwood\t69\nAlbertville\t69\nGladstone\t69\nPalmyra\t68\nMurray\t68\nHuntsville\t68\nProvidence\t68\nWharton\t68\nVictoria\t68\nCarlsbad\t67\nBlaine\t67\nDeer Park\t67\nJefferson\t67\nGuadalupe\t67\nOak Park\t67\nKearney\t67\nRiverton\t67\nCampbell\t67\nFitchburg\t67\nLitchfield\t67\nAustin\t67\nAntioch\t67\nTecumseh\t67\nBowie\t67\nCreve Coeur\t67\nHelena\t67\nHawthorne\t66\nOzark\t66\nLas Vegas\t66\nGrove City\t66\nRocky Mount\t66\nSomerville\t66\nGrandview\t66\nNorton\t66\nWilliamsburg\t66\nBelmont\t66\nShawnee\t66\nRobinson\t65\nGardner\t65\nCarthage\t65\nCorinth\t65\nWest Haven\t65\nClovis\t65\nSaginaw\t65\nOak Ridge\t65\nPleasanton\t65\nHollywood\t65\nWhite Oak\t65\nRome\t65\nO'Fallon\t65\nTyrone\t65\nRensselaer\t65\nSomerset\t65\nBrighton\t65\nLawrenceville\t65\nEmporia\t65\nDuluth\t65\nOtsego\t65\nClaremont\t65\nBrookings\t65\nRichland\t64\nBeaver Dam\t64\nRidgefield\t64\nRamsey\t64\nRogers\t64\nSuperior\t64\nLewisville\t64\nMineola\t64\nPasadena\t64\nArtesia\t64\nPayson\t64\nMountain Home\t64\nIndianola\t64\nReading\t64\nAda\t64\nSaint Cloud\t64\nSaint Clair\t63\nPeoria\t63\nSterling\t63\nBeachwood\t63\nHumboldt\t63\nEvanston\t63\nLumberton\t63\nWaynesville\t63\nMaplewood\t63\nMenominee\t63\nLockport\t63\nOnalaska\t63\nBay City\t63\nLoveland\t63\nFranklin Park\t63\nSpring Hill\t63\nPontiac\t63\nNorthampton\t63\nTorrington\t63\nRoanoke\t63\nMiami\t62\nDenison\t62\nVandalia\t62\nLake Forest\t62\nBoonville\t62\nSaint Louis\t62\nJohnson City\t62\nMount Airy\t62\nWalker\t62\nHomewood\t62\nColorado City\t62\nNew Brighton\t62\nBangor\t62\nGeneseo\t62\nAlbion\t62\nWarwick\t62\nBrookfield\t61\nHampton\t61\nSocorro\t61\nSaint Francis\t61\nLynwood\t61\nBeaumont\t61\nHutchinson\t61\nAlliance\t61\nNewcastle\t61\nCharlotte\t61\nLondon\t61\nSparta\t61\nBoone\t61\nTaylorsville\t61\nWaterville\t61\nCentral\t61\nHurricane\t60\nAndover\t60\nAnthony\t60\nErie\t60\nArcadia\t60\nSherwood\t60\nPleasant Grove\t60\nBaldwin\t60\nStillwater\t60\nEdgewood\t60\nCrete\t60\nMooresville\t60\nBluffton\t60\nMendota\t60\nSan Marcos\t60\nAnna\t60\nFerndale\t60\nLincoln Park\t60\nBrownsville\t60\nJamestown\t60\nMorris\t60\nNew London\t60\nWheeling\t60\nBridgeton\t59\nSanta Clara\t59\nMadisonville\t59\nMorristown\t59\nBreckenridge\t59\nBeverly Hills\t59\nFrederick\t59\nCollege Park\t59\nFulton\t59\nCentralia\t59\nSaint Albans\t59\nSunbury\t59\nGoshen\t59\nLodi\t59\nSpringdale\t59\nPhiladelphia\t59\nRed Oak\t59\nLewiston\t59\nHillsborough\t59\nWoodland\t59\nMorrisville\t59\nGretna\t59\nFredericksburg\t59\nAtlanta\t59\nGulfport\t59\nPowell\t59\nHammond\t58\nSturgis\t58\nLewistown\t58\nNevada\t58\nOregon\t58\nElsmere\t58\nHighland Heights\t58\nBloomingdale\t58\nSpring Valley\t58\nSeaside\t58\nMcFarland\t58\nWest Point\t58\nSaint Charles\t58\nPoint Pleasant\t57\nEaston\t57\nLivingston\t57\nLindenhurst\t57\nJonesboro\t57\nTexarkana\t57\nMiddleton\t57\nAddison\t57\nApple Valley\t57\nVilla Park\t57\nFairmont\t57\nKenmore\t57\nKnoxville\t57\nWaterford\t57\nClarkston\t57\nMedford\t57\nRockford\t56\nDixon\t56\nEaton\t56\nBarrington\t56\nDerby\t56\nMesquite\t56\nWayne\t56\nCameron\t56\nBurbank\t56\nEverett\t56\nOdessa\t56\nGreensburg\t56\nPlano\t56\nNorfolk\t56\nMartinsville\t55\nGraham\t55\nInverness\t55\nLakeland\t55\nCarlisle\t55\nElgin\t55\nBuffalo\t55\nThomasville\t55\nRed Bank\t55\nBrooklyn\t54\nJennings\t54\nGrand Rapids\t54\nAbbeville\t54\nWilliamston\t54\nMcMinnville\t54\nSummit\t54\nNorth Bend\t54\nNashville\t54\nButler\t54\nPittsburg\t54\nWeston\t54\nNewberry\t54\nSanta Fe\t54\nElmwood Park\t54\nRidgeland\t54\nRichmond Heights\t54\nBryan\t53\nWilson\t53\nMason\t53\nDickinson\t53\nDelano\t53\nNew Haven\t53\nMidlothian\t53\nPineville\t53\nSaratoga Springs\t53\nPark Ridge\t53\nCelina\t53\nHendersonville\t53\nCornelius\t53\nRipon\t52\nEl Dorado\t52\nBerlin\t52\nHuntington\t52\nHobart\t52\nKent\t52\nPatterson\t52\nForsyth\t52\nPeru\t52\nMonmouth\t52\nUniversity Park\t51\nEvansville\t51\nHillsdale\t51\nMillersville\t51\nJefferson City\t51\nBerea\t51\nShawano\t51\nExeter\t51\nCortland\t50\nWeatherford\t50\nMission\t50\nTarpon Springs\t50\nNorthport\t50\nWaterloo\t50\nKeene\t50\nVienna\t50\nBartlett\t50\nProspect Park\t50\nMcKees Rocks\t49\nEagle\t49\nAbilene\t49\nChesapeake Beach\t49\nBourbonnais\t49\nNolanville\t49\nMeridian\t49\nDes Moines\t49\nMarietta\t49\nSalisbury\t48\nSanta Barbara\t48\nCumberland\t48\nShelton\t48\nBelen\t48\nSeal Beach\t48\nChandler\t47\nNiceville\t47\nKingsville\t47\nOrono\t47\nAlton\t47\nUrbana\t47\nHellertown\t46\nCotati\t46\nOneonta\t46\nArchbald\t46\nOcean City\t46\nLake Worth\t46\nAlamo Heights\t46\nWheat Ridge\t46\nMorro Bay\t46\nCouncil Bluffs\t46\nGlenolden\t46\nSouth Plainfield\t46\nGreer\t46\nCoshocton\t46\nRosemount\t46\nJunction City\t46\nGreeley\t45\nLongview\t45\nWinthrop Harbor\t45\nPekin\t45\nHudson Falls\t45\nBisbee\t45\nArkansas City\t45\nFlowood\t45\nPalm Springs\t45\nJersey City\t45\nLufkin\t44\nEl Monte\t44\nSioux Center\t44\nNorthville\t44\nFort Mitchell\t44\nWillard\t44\nBeeville\t44\nFraser\t44\nNewburgh\t44\nSutherlin\t44\nMerced\t44\nPosen\t44\nMuleshoe\t44\nSalamanca\t44\nPeekskill\t44\nCherry Hills Village\t44\nApache Junction\t44\nKenosha\t44\nLauderdale-by-the-Sea\t44\nMalibu\t43\nMcHenry\t43\nHampstead\t43\nWestlake\t43\nChino Hills\t43\nUpper Arlington\t43\nHartland\t43\nWatchung\t43\nOlathe\t43\nWestmont\t43\nTuscumbia\t43\nTallulah\t43\nWesternport\t43\nToppenish\t43\nRoselle Park\t43\nWilliams\t43\nEast Rochester\t43\nBirdsboro\t43\nBenton Harbor\t43\nKingsland\t43\nWest Lafayette\t43\nValdosta\t43\nSouth Pasadena\t43\nGreensboro\t43\nBlytheville\t43\nLittleton\t43\nBroadway\t42\nCenter Line\t42\nWest Saint Paul\t42\nIndian Trail\t42\nFarmville\t42\nRolling Meadows\t42\nPurcell\t42\nPlacerville\t42\nRaytown\t42\nHilliard\t42\nNorthbrook\t42\nCahokia\t42\nMint Hill\t42\nConneaut\t42\nNorth Platte\t42\nSahuarita\t42\nLa Habra Heights\t42\nRoanoke Rapids\t42\nBig Spring\t42\nCoronado\t42\nWake Forest\t42\nKey Biscayne\t42\nWylie\t42\nLa Habra\t42\nMonrovia\t42\nFall River\t42\nGolden\t42\nJeannette\t42\nLa Salle\t41\nMount Sterling\t41\nSouthlake\t41\nRancho Mirage\t41\nStow\t41\nAshville\t41\nUpper Saddle River\t41\nJohnston\t41\nEl Reno\t41\nOak Brook\t41\nMilwaukee\t41\nSaint Petersburg\t41\nCallaway\t41\nHurst\t41\nDouglasville\t41\nGlen Rock\t41\nLovejoy\t41\nCushing\t41\nIvins\t41\nLincolnwood\t41\nHoward\t41\nGilmer\t41\nBorger\t41\nDemarest\t41\nWestwego\t41\nHazel Crest\t41\nEmmett\t41\nFairbanks\t41\nWentzville\t41\nSonora\t41\nWalterboro\t41\nSault Sainte Marie\t41\nFlorissant\t41\nCairo\t41\nRancho Cordova\t41\nLeadville\t41\nMoss Point\t41\nBuena Park\t41\nFort Pierce\t41\nTorrance\t41\nBonita Springs\t41\nEast Hills\t41\nRalston\t41\nParsons\t41\nValatie\t41\nRye Brook\t41\nRhinelander\t41\nSheboygan Falls\t41\nNappanee\t40\nEcorse\t40\nHope\t40\nArchdale\t40\nVidalia\t40\nBound Brook\t40\nOak Island\t40\nAdel\t40\nTuskegee\t40\nCedarhurst\t40\nSouth El Monte\t40\nHesperia\t40\nPearl\t40\nHope Mills\t40\nSapulpa\t40\nRound Rock\t40\nHavre de Grace\t40\nEagle Point\t40\nSausalito\t40\nDesert Hot Springs\t40\nKeystone Heights\t40\nAbingdon\t40\nNorth Miami\t40\nPark Forest\t40\nCastle Pines\t40\nDuquesne\t40\nBernardsville\t40\nAlpena\t40\nMills River\t40\nNorth Lauderdale\t40\nPhilipsburg\t40\nBedford Heights\t40\nSecaucus\t40\nRadcliff\t40\nSharonville\t40\nCoon Rapids\t40\nSaco\t40\nKaser\t40\nPalmview\t40\nBellefonte\t40\nVinton\t40\nAzle\t40\nWest Haverstraw\t40\nBethlehem\t40\nMoberly\t40\nNanticoke\t40\nAtherton\t40\nLincolnshire\t40\nEastvale\t40\nNewport News\t40\nHuntington Woods\t40\nLamar\t40\nMokena\t40\nCrawfordsville\t40\nMidwest City\t40\nSaint Pete Beach\t40\nRutland\t40\nWest Plains\t40\nPinellas Park\t40\nHaines City\t40\nMaple Valley\t40\nChurch Hill\t40\nMequon\t40\nRockdale\t39\nIngleside\t39\nCalhoun\t39\nLindenwold\t39\nParma\t39\nMelbourne\t39\nKennewick\t39\nHoneoye Falls\t39\nRock Island\t39\nBridgeville\t39\nMechanicsburg\t39\nShow Low\t39\nHickory Hills\t39\nGowanda\t39\nBerne\t39\nKankakee\t39\nAmbridge\t39\nSuisun\t39\nBrockport\t39\nSioux Falls\t39\nMatthews\t39\nIssaquah\t39\nAnchorage\t39\nPort Orange\t39\nBrooklyn Center\t39\nPlantation\t39\nLemoore\t39\nKronenwetter\t39\nDunwoody\t39\nProvo\t39\nBellefontaine\t39\nUhrichsville\t39\nCity of Milford (balance)\t39\nEast Alton\t39\nGrosse Pointe\t39\nMachesney Park\t39\nNormandy Park\t39\nClifton\t39\nZeeland\t39\nEldridge\t39\nAlabaster\t39\nCoolidge\t39\nPhenix City\t39\nPahokee\t39\nLaurinburg\t39\nHughson\t39\nMenomonee Falls\t39\nAntigo\t39\nFairport\t39\nWorland\t39\nBraidwood\t39\nHayden\t39\nVan Wert\t39\nNorth Oaks\t39\nMounds View\t39\nOrland Park\t39\nLa Crosse\t39\nChula Vista\t39\nBroadview Heights\t39\nPort Townsend\t39\nBlue Ash\t39\nDeSoto\t39\nMorrilton\t39\nEasley\t39\nLake Wales\t39\nBeaufort\t39\nEustis\t39\nOradell\t39\nBerwyn\t39\nNew Lenox\t39\nLaPorte\t39\nPoplar Bluff\t39\nOrmond Beach\t39\nLakemoor\t39\nSignal Mountain\t39\nNew Baltimore\t39\nBrady\t39\nFox Point\t39\nStanton\t39\nStroudsburg\t39\nDaytona Beach\t38\nWashougal\t38\nAsheville\t38\nGrapevine\t38\nButte\t38\nChardon\t38\nHarlan\t38\nFort Payne\t38\nInternational Falls\t38\nLompoc\t38\nShakopee\t38\nSterling Heights\t38\nCedar Park\t38\nOkmulgee\t38\nLindale\t38\nEast Saint Louis\t38\nHoboken\t38\nWallington\t38\nBriarcliff Manor\t38\nMill Valley\t38\nTrophy Club\t38\nShaker Heights\t38\nOgden\t38\nGuymon\t38\nRancho Cucamonga\t38\nWoodridge\t38\nPhoenix\t38\nLacy-Lakeview\t38\nSalt Lake City\t38\nChino Valley\t38\nNaugatuck\t38\nSaint Paul Park\t38\nYazoo City\t38\nHawaiian Gardens\t38\nDoraville\t38\nAddis\t38\nRockport\t38\nWinterville\t38\nAtchison\t38\nSan Bernardino\t38\nStokesdale\t38\nBattlefield\t38\nAkron\t38\nSouth Charleston\t38\nTega Cay\t38\nKannapolis\t38\nCampbellsville\t38\nFort Myers Beach\t38\nPort Saint Lucie\t38\nChehalis\t38\nUniversity Place\t38\nRochelle\t38\nWebb City\t38\nDunn\t38\nSouth Gate\t38\nCanal Fulton\t38\nWorth\t38\nShoreline\t38\nUnion Grove\t38\nLakeport\t38\nMontpelier\t38\nPalm Beach Gardens\t38\nNewport Beach\t38\nDumont\t38\nWynne\t38\nHemet\t38\nBogalusa\t38\nZimmerman\t38\nSchiller Park\t38\nBelpre\t38\nMinnetonka\t38\nMarina\t38\nNorth Haledon\t38\nMahtomedi\t38\nMillington\t38\nPickerington\t38\nVista\t38\nGreendale\t38\nPurcellville\t38\nHoughton\t38\nSeguin\t38\nNew Bremen\t38\nSilsbee\t38\nAlbemarle\t38\nWalnut Creek\t38\nCoeur d'Alene\t38\nPell City\t38\nCollingdale\t38\nGoodrich\t38\nHolmen\t38\nParagould\t38\nHuntersville\t38\nElko\t38\nOak Lawn\t38\nEau Claire\t38\nSwarthmore\t38\nMcKinney\t38\nGrantsville\t38\nGreenbrier\t38\nMilliken\t38\nYoungtown\t38\nWoodhaven\t38\nHales Corners\t38\nBuena Vista\t38\nLarchmont\t38\nMaitland\t37\nKutztown\t37\nAbsecon\t37\nDeerfield Beach\t37\nThief River Falls\t37\nNew Martinsville\t37\nBrainerd\t37\nForest Lake\t37\nDiamondhead\t37\nSolvay\t37\nHarvard\t37\nOpelousas\t37\nSeward\t37\nWahpeton\t37\nWaycross\t37\nRiver Grove\t37\nLilburn\t37\nWest Covina\t37\nKissimmee\t37\nElburn\t37\nGenoa\t37\nChanute\t37\nPullman\t37\nLynchburg\t37\nShreveport\t37\nChicopee\t37\nPanama City Beach\t37\nGloucester City\t37\nSnowflake\t37\nSwissvale\t37\nMoncks Corner\t37\nFountain Hills\t37\nKettering\t37\nBasalt\t37\nNatchitoches\t37\nMaple Grove\t37\nMount Healthy\t37\nNorth Wilkesboro\t37\nHackettstown\t37\nScottsdale\t37\n"},{"type":"TEXT","data":""}]},"apps":[],"jobName":"paragraph_1593153154843_215231840","id":"20191113-111055_1491832496","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:193"},{"text":"%sql\n\nSELECT NAME, COUNT(*) AS PEOPLE_COUNT FROM ltdb_tutorial WHERE EVENT_TIME >= '202001010005' AND EVENT_TIME <= '202001010035' AND COUNTRY = 'Russia' GROUP BY NAME ORDER BY PEOPLE_COUNT DESC LIMIT 1000","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"tableHide":false,"editorSetting":{"language":"sql","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/sql","fontSize":9,"editorHide":false,"results":{"0":{"graph":{"mode":"multiBarChart","height":300,"optionOpen":false,"setting":{"table":{"tableGridState":{},"tableColumnTypeState":{"names":{"NAME":"string","PEOPLE_COUNT":"string"},"updated":false},"tableOptionSpecHash":"[{\"name\":\"useFilter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable filter for columns\"},{\"name\":\"showPagination\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable pagination for better navigation\"},{\"name\":\"showAggregationFooter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable a footer for displaying aggregated values\"}]","tableOptionValue":{"useFilter":false,"showPagination":false,"showAggregationFooter":false},"updated":false,"initialized":false},"multiBarChart":{"rotate":{"degree":"-45"},"xLabelStatus":"default"}},"commonSetting":{},"keys":[{"name":"NAME","index":0,"aggr":"sum"}],"groups":[],"values":[{"name":"PEOPLE_COUNT","index":1,"aggr":"sum"}]},"helium":{}}},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TABLE","data":"NAME\tPEOPLE_COUNT\nJessie\t133\nLee\t130\nJames\t123\nFrancis\t122\nRobert\t121\nDana\t120\nGuadalupe\t120\nJoe\t120\nDavid\t119\nJohn\t119\nJean\t117\nGeorge\t115\nBonnie\t114\nSidney\t113\nFrances\t112\nCharlie\t112\nMargaret\t111\nJimmie\t111\nLeslie\t111\nLonnie\t111\nWilliam\t110\nCharles\t110\nBillie\t110\nJoseph\t109\nWillie\t108\nJohnnie\t108\nTommie\t106\nElizabeth\t106\nNoel\t105\nLynn\t104\nRichard\t104\nCarol\t104\nFrank\t103\nIra\t103\nMarion\t102\nCecil\t101\nMary\t100\nDale\t100\nConnie\t100\nJamie\t99\nHenry\t98\nBennie\t98\nMichael\t97\nTracy\t97\nShelby\t96\nNancy\t96\nSandy\t95\nThomas\t95\nKelly\t95\nRuby\t95\nHelen\t94\nShirley\t94\nChristian\t93\nDonnie\t93\nGene\t92\nEddie\t92\nClare\t92\nCora\t91\nSammie\t91\nJesse\t90\nRose\t90\nJerry\t89\nKyle\t89\nCarmen\t89\nHazel\t89\nGail\t89\nBobbie\t88\nTerry\t88\nBeverly\t88\nWalter\t88\nClaire\t88\nVivian\t88\nClinton\t87\nIvy\t87\nSylvia\t87\nClara\t87\nLeo\t87\nFreddie\t86\nBilly\t86\nJewel\t85\nPearl\t85\nJackie\t85\nOllie\t85\nRay\t84\nLouis\t84\nMadeline\t84\nRuth\t83\nSolomon\t83\nSarah\t83\nMaxwell\t82\nLois\t82\nFrankie\t82\nMarshall\t82\nDaniel\t82\nSusan\t82\nAnna\t82\nCaroline\t82\nEsther\t82\nAlbert\t81\nEva\t81\nAlma\t81\nRoy\t81\nLinda\t81\nHoward\t81\nChristopher\t81\nOra\t81\nAnnie\t81\nFranklin\t81\nBarbara\t80\nCecilia\t80\nBetty\t80\nAnnette\t80\nLaverne\t79\nHarrison\t79\nHarley\t79\nCleo\t79\nClifford\t79\nDennis\t79\nMatthew\t79\nJoy\t79\nLuke\t79\nGuy\t79\nTaylor\t79\nAugustine\t79\nLester\t79\nAmelia\t78\nJack\t78\nArthur\t78\nEdith\t78\nAlexander\t78\nMarie\t78\nReed\t77\nAurora\t77\nCourtney\t77\nCecelia\t77\nMerle\t77\nAlonzo\t77\nEunice\t77\nPatsy\t77\nLillian\t77\nRoland\t77\nEarl\t77\nArtie\t77\nRene\t77\nMitchell\t77\nPablo\t77\nAlva\t77\nNaomi\t77\nPerry\t77\nDorothy\t76\nAvery\t76\nKenneth\t76\nHector\t76\nPhillip\t76\nElmer\t76\nFelix\t76\nCornelius\t76\nJason\t76\nDonna\t76\nKathryn\t75\nAudrey\t75\nAdrian\t75\nPatricia\t75\nLouie\t75\nGrace\t75\nMartha\t75\nVictoria\t75\nBobby\t75\nAlice\t75\nPedro\t75\nJulia\t74\nLarry\t74\nCarrie\t74\nElla\t74\nCarter\t74\nPat\t74\nMorgan\t74\nFred\t74\nGabriel\t74\nJoshua\t74\nRoss\t74\nBen\t74\nRobin\t74\nClaude\t74\nVincent\t74\nSamuel\t74\nAubrey\t74\nLucia\t74\nEmily\t74\nEdna\t74\nRegina\t73\nLindsey\t73\nAnthony\t73\nMelvin\t73\nJeanette\t73\nAmanda\t73\nAmy\t73\nGlenn\t73\nJasper\t73\nMercedes\t73\nMolly\t73\nTimothy\t73\nRobbie\t73\nChris\t73\nJulius\t73\nJonathan\t73\nBennett\t72\nSantos\t72\nMickey\t72\nKeith\t72\nNina\t72\nBenjamin\t72\nJulie\t72\nElise\t72\nCassie\t72\nDean\t72\nFrederick\t72\nAdrienne\t72\nJune\t72\nAndrew\t72\nHugo\t72\nMason\t72\nMartin\t71\nLawrence\t71\nLogan\t71\nRosa\t71\nWarren\t71\nBryant\t71\nDonald\t71\nNora\t71\nVirginia\t71\nTony\t71\nEugene\t71\nClyde\t71\nCarlton\t71\nNeil\t71\nVirgil\t71\nFlorence\t71\nAndy\t71\nLaura\t70\nAllie\t70\nMaxie\t70\nClarence\t70\nJosephine\t70\nSara\t70\nBernard\t70\nAlphonso\t70\nGarrett\t70\nDeborah\t70\nMarcella\t70\nColeman\t70\nRodney\t70\nAlexis\t70\nNathaniel\t70\nFelipe\t70\nCarl\t70\nJoel\t70\nJunior\t70\nCarey\t70\nGertrude\t70\nAugust\t70\nFaith\t70\nEdward\t70\nTed\t69\nMattie\t69\nJohanna\t69\nRachel\t69\nMicheal\t69\nIsiah\t69\nStuart\t69\nHarold\t69\nDouglas\t69\nNadine\t69\nCeleste\t69\nJeremiah\t69\nBessie\t69\nBernice\t69\nLou\t69\nJacob\t69\nMarvin\t69\nCurtis\t69\nSydney\t68\nPeter\t68\nKatharine\t68\nEmma\t68\nRiley\t68\nJudith\t68\nJane\t68\nAlvin\t68\nHarry\t68\nGloria\t68\nWanda\t68\nTom\t68\nLily\t68\nFredrick\t68\nLea\t68\nVance\t68\nVernon\t68\nAaron\t68\nPaul\t68\nDee\t68\nKerry\t68\nPauline\t68\nKatherine\t68\nJimmy\t68\nDuncan\t68\nVictor\t67\nSandra\t67\nFloyd\t67\nJerome\t67\nJessica\t67\nMildred\t67\nElbert\t67\nIrene\t67\nJess\t67\nJoan\t67\nFay\t67\nAndrea\t67\nAnderson\t67\nThaddeus\t67\nLouise\t67\nJewell\t67\nCarlos\t67\nGordon\t67\nLevi\t67\nLeah\t67\nAngelo\t67\nAbraham\t67\nSaul\t67\nMelissa\t67\nWillard\t66\nJohnny\t66\nGracie\t66\nAngel\t66\nCatherine\t66\nCamille\t66\nJim\t66\nMonica\t66\nKathleen\t66\nMeredith\t66\nEdgar\t66\nLionel\t66\nMollie\t66\nHope\t66\nLucille\t66\nAnton\t66\nDallas\t66\nElisa\t66\nParker\t66\nLena\t66\nErvin\t66\nEleanor\t66\nEliza\t66\nMargie\t66\nJustin\t66\nDiana\t66\nGayle\t66\nJulian\t66\nAlan\t66\nClaudie\t66\nEmerson\t66\nEzra\t65\nSophia\t65\nSeth\t65\nAlex\t65\nAllen\t65\nClifton\t65\nTheodore\t65\nPatrick\t65\nWinston\t65\nSteven\t65\nClaudia\t65\nRoscoe\t65\nJanet\t65\nMax\t65\nLibby\t65\nJacqueline\t65\nGeorgia\t65\nJohnie\t65\nDoris\t65\nCorrine\t65\nAnne\t65\nStephanie\t65\nHugh\t64\nLeila\t64\nTheo\t64\nJoyce\t64\nAmos\t64\nBeatrice\t64\nLoren\t64\nChristine\t64\nOlivia\t64\nMathew\t64\nBenny\t64\nOrlando\t64\nAron\t64\nRussell\t64\nManuel\t64\nMorris\t64\nFelicia\t64\nNathan\t64\nAntonio\t64\nHannah\t64\nBruno\t64\nAntoinette\t64\nDexter\t64\nErnest\t64\nKaren\t64\nJoanna\t64\nBlair\t64\nDaisy\t64\nGlen\t64\nJonas\t64\nJesus\t64\nEric\t64\nCrystal\t64\nRonald\t63\nMatt\t63\nDanny\t63\nMarcus\t63\nRoberta\t63\nBradley\t63\nMaryann\t63\nElias\t63\nHallie\t63\nAngelina\t63\nSalvatore\t63\nPhilip\t63\nJuan\t63\nSonia\t63\nGraham\t63\nAbel\t63\nKendall\t63\nIssac\t63\nAlberta\t63\nEmmett\t63\nArmando\t63\nReginald\t63\nLamar\t63\nMarcia\t63\nNoah\t63\nCarson\t63\nRaul\t62\nAlden\t62\nOtis\t62\nAnn\t62\nEdmund\t62\nGrady\t62\nLeonard\t62\nOmar\t62\nMaria\t62\nLola\t62\nKathrine\t62\nRafael\t62\nRaymond\t62\nRachael\t62\nIsaac\t62\nMike\t62\nMaurice\t62\nPaula\t62\nCyrus\t62\nByron\t62\nGrant\t62\nRalph\t62\nOliver\t61\nEthel\t61\nGerald\t61\nElsie\t61\nDomingo\t61\nGilbert\t61\nMiguel\t61\nLucy\t61\nAlton\t61\nHelena\t61\nCalvin\t61\nLincoln\t61\nOdell\t61\nMadeleine\t61\nStephen\t61\nSanford\t61\nAileen\t61\nNicholas\t61\nSteve\t61\nAngie\t61\nLyle\t61\nBruce\t61\nMinnie\t61\nLila\t61\nCoy\t61\nLela\t61\nJudy\t61\nJose\t61\nTravis\t61\nAngela\t61\nShannon\t61\nStacey\t61\nRudy\t61\nAna\t61\nJustine\t61\nDominick\t60\nDon\t60\nNellie\t60\nAshley\t60\nSadie\t60\nWilson\t60\nEileen\t60\nBridget\t60\nLauren\t60\nEdmond\t60\nCallie\t60\nEvangeline\t60\nGale\t60\nGerry\t60\nCliff\t60\nErwin\t60\nBertha\t60\nLoretta\t60\nJosie\t60\nYvonne\t60\nCary\t60\nDavis\t60\nOlga\t60\nLewis\t60\nChester\t60\nKarl\t60\nTina\t60\nIrvin\t60\nHorace\t60\nWilford\t60\nCorinne\t60\nSam\t60\nDixie\t60\nMiles\t60\nBenito\t59\nIvan\t59\nMyles\t59\nDena\t59\nIsrael\t59\nPete\t59\nJody\t59\nHilda\t59\nScott\t59\nLacy\t59\nWallace\t59\nAntonia\t59\nJake\t59\nWendell\t59\nUlysses\t59\nValeria\t59\nHerman\t59\nCarolyn\t59\nEmanuel\t59\nElisabeth\t59\nMargarita\t59\nWesley\t59\nEverett\t59\nOwen\t59\nBrooks\t59\nEvan\t59\nAustin\t59\nEli\t59\nHal\t59\nCharlotte\t59\nTroy\t59\nAlfred\t59\nJay\t59\nRex\t59\nGenevieve\t59\nElvin\t59\nWhitney\t59\nLorraine\t58\nDwight\t58\nHans\t58\nWilbur\t58\nConstance\t58\nSilas\t58\nArnold\t58\nFrancisco\t58\nLeland\t58\nValerie\t58\nCynthia\t58\nJenny\t58\nJefferson\t58\nForrest\t58\nJordan\t58\nCarroll\t58\nAndres\t58\nAdam\t58\nMiriam\t58\nAva\t58\nRosalie\t58\nAlfredo\t58\nMalinda\t58\nEvelyn\t58\nGladys\t58\nDamon\t58\nDarrell\t58\nVan\t58\nSherman\t58\nEllis\t58\nBill\t58\nSimon\t57\nBrady\t57\nJaime\t57\nMyron\t57\nThad\t57\nViolet\t57\nBernadette\t57\nLuis\t57\nMaggie\t57\nChristina\t57\nReuben\t57\nElisha\t57\nDelilah\t57\nConrad\t57\nDan\t57\nIrma\t57\nRebecca\t57\nLillie\t57\nClark\t57\nMalcolm\t57\nGreta\t57\nTomas\t57\nTheresa\t57\nRita\t57\nCameron\t57\nEdwin\t57\nLora\t57\nStella\t57\nPreston\t57\nSebastian\t57\nReid\t57\nCelia\t57\nStacy\t57\nRena\t57\nArchie\t57\nRosanna\t56\nLeon\t56\nRocco\t56\nBlanche\t56\nMae\t56\nMarietta\t56\nClint\t56\nElsa\t56\nStanley\t56\nArlie\t56\nGwendolyn\t56\nVaughn\t56\nRae\t56\nSally\t56\nWill\t56\nBryan\t56\nLorena\t56\nForest\t56\nWilma\t56\nDonovan\t56\nEstelle\t56\nHerbert\t56\nDalton\t56\nHubert\t56\nNick\t56\nIgnacio\t56\nElaine\t56\nWade\t56\nSusie\t56\nRamon\t56\nPercy\t56\nHunter\t56\nClayton\t56\nIrving\t56\nKirk\t56\nIda\t55\nBurton\t55\nSpencer\t55\nAlberto\t55\nLuther\t55\nEduardo\t55\nIsabel\t55\nMoses\t55\nReynaldo\t55\nEugenia\t55\nBert\t55\nNolan\t55\nAllison\t55\nLesley\t55\nKay\t55\nDewayne\t55\nNorma\t55\nSterling\t55\nGary\t55\nCraig\t55\nBlaine\t55\nDelia\t55\nRamona\t55\nDominic\t55\nDenis\t55\nMagdalena\t55\nAlthea\t55\nMark\t55\nNicolas\t55\nLon\t55\nKent\t55\nMitchel\t55\nRoosevelt\t55\nEllen\t55\nAlicia\t55\nCleveland\t55\nJeanne\t55\nGarnett\t55\nSantiago\t54\nElvira\t54\nSammy\t54\nDona\t54\nGoldie\t54\nMario\t54\nFletcher\t54\nAllan\t54\nBob\t54\nKate\t54\nAnita\t54\nMadison\t54\nBasil\t54\nDora\t54\nNickolas\t54\nSuzanne\t54\nElliott\t54\nArturo\t54\nKatie\t54\nJan\t54\nWayne\t54\nGarland\t54\nJoaquin\t54\nMyrna\t54\nRufus\t54\nOtto\t54\nLorenzo\t54\nJennifer\t54\nCasey\t54\nLydia\t54\nDarwin\t53\nDewey\t53\nElvis\t53\nClement\t53\nTerrence\t53\nMurray\t53\nCorey\t53\nAda\t53\nRaphael\t53\nGeneva\t53\nColumbus\t53\nLowell\t53\nLemuel\t53\nJackson\t53\nLavern\t53\nClay\t53\nVera\t53\nGeorgina\t53\nWilfred\t53\nFreeman\t53\nNeal\t53\nRosetta\t53\nBetsy\t53\nFlora\t53\nLeroy\t53\nOscar\t53\nJuliet\t53\nRoger\t52\nIris\t52\nLenora\t52\nRowena\t52\nEssie\t52\nEmilie\t52\nFidel\t52\nDuane\t52\nHomer\t52\nOpal\t52\nTeresa\t52\nElva\t52\nAdolfo\t52\nAlfreda\t52\nKelley\t52\nNed\t52\nOrville\t52\nLindsay\t52\nSallie\t52\nSue\t52\nLenore\t52\nJoesph\t52\nLupe\t52\nWeldon\t52\nJanie\t52\nDaphne\t52\nHouston\t51\nMarlene\t51\nJeff\t51\nPamela\t51\nJeannette\t51\nLucius\t51\nElena\t51\nVeronica\t51\nFoster\t51\nElliot\t51\nChauncey\t51\nRoderick\t51\nRoman\t51\nRandy\t51\nSalvador\t51\nAugustus\t51\nNatalie\t51\nEmilia\t51\nMarian\t51\nCarlo\t51\nAnnetta\t51\nAlfonso\t51\nZoe\t51\nPierre\t51\nHattie\t51\nLucretia\t51\nBarry\t51\nJanice\t51\nLula\t50\nGretchen\t50\nKatheryn\t50\nDave\t50\nOcie\t50\nBarton\t50\nJuana\t50\nDolores\t50\nBradford\t50\nHarlan\t50\nMilton\t50\nAlyce\t50\nLoyd\t50\nMerrill\t50\nGregorio\t50\nDolly\t50\nSheldon\t50\nTommy\t50\nJennie\t50\nArlene\t50\nWilmer\t50\nPhil\t50\nEarnest\t50\nMinerva\t50\nQuincy\t50\nDiane\t50\nEmil\t50\nJuliana\t50\nNeva\t49\nEve\t49\nEldon\t49\nNettie\t49\nLilly\t49\nLisa\t49\nVernie\t49\nDorothea\t49\nSusanna\t49\nToby\t49\nGalen\t49\nRuben\t49\nDelbert\t49\nFern\t49\nNelson\t49\nHerschel\t49\nArmand\t49\nNorman\t49\nKevin\t49\nRhoda\t49\nMillard\t49\nIvory\t49\nShawn\t49\nJanette\t49\nRudolph\t49\nDawn\t49\nRoyce\t49\nEnrique\t49\nPriscilla\t49\nAlec\t49\nTherese\t49\nMarlin\t49\nMandy\t49\nLucinda\t49\nAngelita\t48\nAddie\t48\nPhoebe\t48\nIsaiah\t48\nLaurel\t48\nElijah\t48\nKirby\t48\nCorine\t48\nLamont\t48\nRosemary\t48\nOdie\t48\nPhyllis\t48\nJoanne\t48\nAdolph\t48\nMyra\t48\nAl\t48\nNorris\t48\nCarmela\t48\nPeggy\t48\nBuford\t48\nMarjorie\t48\nReba\t48\nLloyd\t48\nMillie\t48\nErnesto\t48\nMona\t48\nFreda\t48\nMaureen\t48\nWinnie\t48\nBernie\t48\nDick\t48\nLauretta\t48\nFernando\t48\nJuliette\t48\nAlphonse\t48\nSylvester\t47\nLeona\t47\nBuck\t47\nCory\t47\nHarriet\t47\nJon\t47\nErin\t47\nDenise\t47\nLorene\t47\nHarvey\t47\nParis\t47\nRussel\t47\nRoyal\t47\nAbram\t47\nKim\t47\nClarissa\t47\nLinwood\t47\nGilberto\t47\nEzekiel\t47\nRenee\t47\nToni\t47\nElwood\t47\nDottie\t47\nLawson\t47\nLottie\t47\nPrince\t47\nVada\t47\nEmory\t47\nJulianne\t47\nBoyd\t47\nWyatt\t47\nNewton\t47\nEmilio\t46\nRandolph\t46\nKermit\t46\nLennie\t46\nWendy\t46\nKaty\t46\nBeryl\t46\nYvette\t46\nPierce\t46\nDessie\t46\nGeraldine\t46\nCathryn\t46\nCruz\t46\nSerena\t46\nBailey\t46\nBeth\t46\nLafayette\t46\nRosie\t46\nLyman\t46\nPearlie\t46\nVern\t46\nDollie\t46\nLorna\t46\nVicente\t45\nEmery\t45\nLouisa\t45\nDenver\t45\nMarilyn\t45\nNapoleon\t45\nAsa\t45\nCamilla\t45\nMyrtle\t45\nIona\t45\nTeddy\t45\nShelton\t45\nColleen\t45\nDevin\t45\nAbbie\t45\nDrew\t45\nFreida\t45\nJanelle\t45\nRogelio\t45\nFrederic\t45\nWilhelmina\t45\nJeanie\t45\nCharley\t45\nBurt\t45\nAdeline\t45\nHiram\t45\nFlorine\t45\nThelma\t45\n"},{"type":"TEXT","data":""}]},"apps":[],"jobName":"paragraph_1593153154844_-1477153754","id":"20191118-071740_398829111","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:194"},{"text":"%sql\n\nSELECT DISTINCT CITY, COUNT(*) AS CNT, LATITUDE AS LAT, LONGITUDE AS LON FROM ltdb_tutorial GROUP BY CITY, LAT, LON ORDER BY CNT DESC LIMIT 100\n","user":"anonymous","dateUpdated":"2020-06-26T15:55:30+0900","config":{"editorSetting":{"language":"sql","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/sql","fontSize":9,"results":{"0":{"graph":{"mode":"zeppelin-leaflet","height":675.141,"optionOpen":true,"setting":{"table":{"tableGridState":{},"tableColumnTypeState":{"names":{"CITY":"string","CNT":"string","LAT":"string","LON":"string"},"updated":false},"tableOptionSpecHash":"[{\"name\":\"useFilter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable filter for columns\"},{\"name\":\"showPagination\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable pagination for better navigation\"},{\"name\":\"showAggregationFooter\",\"valueType\":\"boolean\",\"defaultValue\":false,\"widget\":\"checkbox\",\"description\":\"Enable a footer for displaying aggregated values\"}]","tableOptionValue":{"useFilter":false,"showPagination":false,"showAggregationFooter":false},"updated":false,"initialized":false},"multiBarChart":{"rotate":{"degree":"-45"},"xLabelStatus":"default"},"zeppelin-leaflet":{"latitude":{"name":"LAT","index":2,"aggr":"sum"},"popup":{"name":"CITY","index":0,"aggr":"sum"},"tooltip":{"name":"CITY","index":0,"aggr":"sum"},"longitude":{"name":"LON","index":3,"aggr":"sum"}}},"keys":[{"name":"count(1)","index":0,"aggr":"sum"}],"groups":[],"values":[],"commonSetting":{}},"helium":{}},"1":{"graph":{"mode":"table","height":116,"optionOpen":false}}},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TABLE","data":"CITY\tCNT\tLAT\tLON\nGreenville\t2593\t33.385\t-91.0514\nKirklareli\t2580\t41.743\t27.226\nYork\t2573\t34.9968\t-81.2348\nCamocim\t2572\t-2.9\t-40.85\nNarrogin\t2569\t-32.9329\t117.1666\nSwords\t2566\t53.4597\t-6.2181\nGhardaia\t2563\t32.49\t3.67\nRochester\t2558\t43.168\t-77.6162\nSakiai\t2553\t54.9534\t23.0478\nInhambane\t2552\t-23.858\t35.3398\nSaint Petersburg\t2550\t27.793\t-82.6652\nKayes\t2549\t-4.18\t13.28\nAl Hasakah\t2549\t36.4833\t40.75\nSiglan\t2547\t59.0337\t152.4166\nKaysville\t2547\t41.029\t-111.9456\nSahuarita\t2547\t31.9328\t-110.9654\nGreen Bay\t2545\t44.515\t-87.9896\nBondo\t2545\t3.81\t23.67\nZelezniki\t2544\t46.2333\t14.1667\nSalyan\t2542\t28.35\t82.1833\nFort-Liberte\t2542\t19.6656\t-71.8448\nNew Rochelle\t2538\t40.9305\t-73.7836\nBriceni\t2538\t48.3629\t27.0779\nStokesdale\t2536\t36.2316\t-79.9834\nKrong Kep\t2536\t10.4829\t104.3167\nBinjai\t2535\t3.6204\t98.5001\nOtsego\t2535\t45.266\t-93.6199\nKalkara\t2534\t35.8892\t14.5328\nMontana\t2534\t43.414\t23.237\nCastro\t2532\t-24.79\t-50.01\nMazsalaca\t2531\t57.8619\t25.0547\nBuffalo Grove\t2531\t42.1673\t-87.9616\nSalluit\t2531\t62.1826\t-75.6595\nSultan\t2530\t47.8702\t-121.8041\nHameenlinna\t2529\t60.997\t24.472\nPasso Fundo\t2529\t-28.25\t-52.42\nKorogwe\t2529\t-5.0896\t38.54\nMagdalena\t2528\t30.6166\t-111.05\nGlobe\t2528\t33.3869\t-110.7514\nKundiawa\t2528\t-6.023\t144.96\nVaranasi\t2527\t25.33\t83.0\nPuerto Montt\t2527\t-41.47\t-72.93\nZheleznogorsk\t2527\t52.3548\t35.4044\nPort Orange\t2527\t29.1084\t-81.0136\nNazareth\t2527\t32.704\t35.2955\nJiangmen\t2525\t22.5804\t113.08\nCleveland\t2525\t33.744\t-90.7285\nQoqon\t2525\t40.5404\t70.94\nDerby\t2525\t52.9333\t-1.5\nMaxixe\t2524\t-23.866\t35.3886\nGoulimine\t2524\t28.98\t-10.07\nIfakara\t2524\t-8.1296\t36.68\nDhanbad\t2523\t23.8004\t86.42\nZanzibar\t2523\t-6.16\t39.2\nZurich\t2523\t47.38\t8.55\nHuron\t2522\t36.204\t-120.0961\nIrkutsk\t2522\t52.32\t104.245\nNakhon Nayok\t2522\t14.2\t101.216\nRolesville\t2520\t35.9251\t-78.4664\nKutahya\t2520\t39.42\t29.93\nSagastyr\t2520\t73.3779\t126.5924\nMaiquetia\t2520\t10.6004\t-66.97\nShilka\t2520\t51.8706\t116.0306\nRustavi\t2520\t41.5704\t45.05\nNaxxar\t2519\t35.9136\t14.4436\nAyr\t2519\t55.4504\t-4.6167\nFianarantsoa\t2519\t-21.4333\t47.0833\nAnqing\t2519\t30.5\t117.05\nMesquite\t2518\t32.7622\t-96.5889\nSeagoville\t2518\t32.653\t-96.5455\nDemir Hisar\t2518\t41.221\t21.203\nLibertyville\t2518\t42.287\t-87.967\nEast Hills\t2518\t40.7958\t-73.6292\nTexas City\t2518\t29.4128\t-94.9658\nUhrichsville\t2518\t40.4005\t-81.3516\nDogbo\t2518\t6.8167\t1.7833\nOmsk\t2517\t54.99\t73.4\nNeiafu\t2517\t-18.6496\t-173.9833\nQyzylorda\t2517\t44.8\t65.465\nShaowu\t2517\t27.3004\t117.5\nFremont\t2516\t37.5265\t-121.9852\nPrinceton\t2516\t37.3688\t-81.0961\nJupiter\t2516\t26.9199\t-80.1127\nTarawa\t2515\t1.3382\t173.0176\nTangshan\t2515\t39.6243\t118.1944\nSokode\t2515\t8.9905\t1.15\nShively\t2514\t38.197\t-85.8136\nBama\t2514\t11.5204\t13.69\nGarden Grove\t2513\t33.7787\t-117.9601\nCangamba\t2513\t-13.6996\t19.86\nErseke\t2513\t40.3333\t20.6833\nDumbarton\t2513\t55.95\t-4.5667\nPerpignan\t2513\t42.7\t2.9\nEcorse\t2513\t42.2489\t-83.1399\nKinnelon\t2512\t40.9847\t-74.3862\nLemont\t2512\t41.6698\t-87.983\nOrangeburg\t2512\t33.4928\t-80.8671\nLuan\t2512\t31.7503\t116.48\nBratsk\t2512\t56.157\t101.615\nSault Sainte Marie\t2511\t46.4817\t-84.3723\n"},{"type":"TEXT","data":""}]},"apps":[],"jobName":"paragraph_1593153154845_-94083907","id":"20191113-090111_1840793847","dateCreated":"2020-06-26T15:32:34+0900","dateStarted":"2020-06-26T15:55:30+0900","dateFinished":"2020-06-26T15:55:31+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:195"},{"text":"%md\n\n위 leaflet plugin 설치는 [이 곳](https://github.com/volumeint/helium-volume-leaflet)을 참고해주세요.","user":"anonymous","dateUpdated":"2020-06-26T16:14:37+0900","config":{"colWidth":12,"fontSize":9,"enabled":true,"results":{},"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"editorMode":"ace/mode/markdown","editorHide":true,"tableHide":false},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

위 leaflet plugin 설치는 이 곳을 참고해주세요.

\n
"}]},"apps":[],"jobName":"paragraph_1593154769203_-205048469","id":"20200626-155929_531544520","dateCreated":"2020-06-26T15:59:29+0900","dateStarted":"2020-06-26T16:14:37+0900","dateFinished":"2020-06-26T16:14:37+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:196"},{"text":"%md\n\n### 5. Aggr. Pushdown\n\n* Aggregation Pushdown 기능을 사용하여 일부 연산을 redis에서 처리할 수 있습니다.\n* 2020년 6월 현재 Aggregation Pushdown은 MIN(), MAX(), COUNT(*), AVG(), SUM(), DISTINCT를 지원하고 있습니다.\n\n![](https://docs.lightningdb.io/images/tutorial-aggr-pushdown.png)\n(데이터의 양과 노드의 수에 따라 차이가 날 수 있습니다.)","user":"anonymous","dateUpdated":"2020-06-26T16:29:05+0900","config":{"tableHide":false,"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"editorHide":true,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

5. Aggr. Pushdown

\n
    \n
  • Aggregation Pushdown 기능을 사용하여 일부 연산을 redis에서 처리할 수 있습니다.
  • \n
  • 2020년 6월 현재 Aggregation Pushdown은 MIN(), MAX(), COUNT(*), AVG(), SUM(), DISTINCT를 지원하고 있습니다.
  • \n
\n


(데이터의 양과 노드의 수에 따라 차이가 날 수 있습니다.)

\n
"}]},"apps":[],"jobName":"paragraph_1593153154846_1401736339","id":"20200626-095252_1928232283","dateCreated":"2020-06-26T15:32:34+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:197","dateFinished":"2020-06-26T16:29:05+0900","dateStarted":"2020-06-26T16:29:05+0900"},{"text":"%spark\n\n// Aggregation Pushdown이 꺼진 상태\nspark.sqlContext.setConf(\"spark.r2.aggregation.pushdown\", \"false\")\nspark.sqlContext.setConf(\"spark.r2.binary.aggscan\", \"false\")\n\nval minLatLonVal = r2Df.agg(\n min(\"latitude\").as(\"latitude\"),\n min(\"longitude\").as(\"longitude\")\n ).first\n\nval maxLatLonVal = r2Df.agg(\n max(\"latitude\").as(\"latitude\"),\n max(\"longitude\").as(\"longitude\")\n ).first","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/scala","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"minLatLonVal: org.apache.spark.sql.Row = [-54.9333,-179.59]\nmaxLatLonVal: org.apache.spark.sql.Row = [82.4833,179.3833]\n"}]},"apps":[],"jobName":"paragraph_1593153154846_1509121235","id":"20200626-150749_1837830139","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:198"},{"text":"%md\n\n![](https://docs.lightningdb.io/images/tutorial-minmax_PrunedFilteredScan.png)\n\n* Aggregation Pushdown이 꺼진 상태에서는 PrunedFilteredScan 명령어를 사용하여 데이터를 가져온 후 처리합니다.\n","user":"anonymous","dateUpdated":"2020-06-26T16:00:19+0900","config":{"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/scala","fontSize":9,"results":{},"enabled":true,"editorHide":false,"tableHide":false},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

\n
    \n
  • Aggregation Pushdown이 꺼진 상태에서는 PrunedFilteredScan 명령어를 사용하여 데이터를 가져온 후 처리합니다.
  • \n
\n
"}]},"apps":[],"jobName":"paragraph_1593153154847_-939942431","id":"20200626-150929_129213327","dateCreated":"2020-06-26T15:32:34+0900","dateStarted":"2020-06-26T16:00:19+0900","dateFinished":"2020-06-26T16:00:19+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:199"},{"text":"%spark\n\n// Aggregation Pushdown이 켜진 상태\nspark.sqlContext.setConf(\"spark.r2.aggregation.pushdown\", \"true\")\nspark.sqlContext.setConf(\"spark.r2.binary.aggscan\", \"true\")\n\nval minLatLonVal = r2Df.agg(\n min(\"latitude\").as(\"latitude\"),\n min(\"longitude\").as(\"longitude\")\n ).first\n\nval maxLatLonVal = r2Df.agg(\n max(\"latitude\").as(\"latitude\"),\n max(\"longitude\").as(\"longitude\")\n ).first","user":"anonymous","dateUpdated":"2020-06-26T15:32:34+0900","config":{"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"colWidth":12,"editorMode":"ace/mode/scala","fontSize":9,"results":{},"enabled":true},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"TEXT","data":"minLatLonVal: org.apache.spark.sql.Row = [-54.9333,-179.59]\nmaxLatLonVal: org.apache.spark.sql.Row = [82.4833,179.3833]\n"}]},"apps":[],"jobName":"paragraph_1593153154847_1335041034","id":"20200626-091512_1557491774","dateCreated":"2020-06-26T15:32:34+0900","status":"READY","errorMessage":"","progressUpdateIntervalMs":500,"$$hashKey":"object:200"},{"text":"%md\n\n![](https://docs.lightningdb.io/images/tutorial-minmax_AggregateMultiBinary.png)\n\n* Aggregation Pushdown이 켜진 상태에서는 AggregateMultiBinary 명령어를 사용하여 데이터를 처리한 뒤 가져옵니다.\n","user":"anonymous","dateUpdated":"2020-06-26T16:00:21+0900","config":{"editorSetting":{"language":"markdown","editOnDblClick":true,"completionKey":"TAB","completionSupport":false},"colWidth":12,"editorMode":"ace/mode/markdown","fontSize":9,"results":{},"enabled":true,"editorHide":true,"tableHide":false},"settings":{"params":{},"forms":{}},"results":{"code":"SUCCESS","msg":[{"type":"HTML","data":"
\n

\n
    \n
  • Aggregation Pushdown이 켜진 상태에서는 AggregateMultiBinary 명령어를 사용하여 데이터를 처리한 뒤 가져옵니다.
  • \n
\n
"}]},"apps":[],"jobName":"paragraph_1593153154848_-420016758","id":"20200626-152000_903538781","dateCreated":"2020-06-26T15:32:34+0900","dateStarted":"2020-06-26T16:00:21+0900","dateFinished":"2020-06-26T16:00:21+0900","status":"FINISHED","progressUpdateIntervalMs":500,"$$hashKey":"object:201"},{"text":"%md\n","user":"anonymous","dateUpdated":"2020-06-26T16:10:18+0900","config":{"colWidth":12,"fontSize":9,"enabled":true,"results":{},"editorSetting":{"language":"scala","editOnDblClick":false,"completionKey":"TAB","completionSupport":true},"editorMode":"ace/mode/scala"},"settings":{"params":{},"forms":{}},"apps":[],"jobName":"paragraph_1593155418887_-805429034","id":"20200626-161018_974837908","dateCreated":"2020-06-26T16:10:18+0900","status":"READY","progressUpdateIntervalMs":500,"$$hashKey":"object:203"}],"name":"LightningDB_tutorial_ko","id":"2FDTU6Q15","noteParams":{},"noteForms":{},"angularObjects":{"md:shared_process":[],"sh:shared_process":[],"spark:shared_process":[]},"config":{"isZeppelinNotebookCronEnable":false,"looknfeel":"default","personalizedMode":"false"},"info":{}} \ No newline at end of file diff --git a/search/lunr.js b/search/lunr.js new file mode 100644 index 0000000..aca0a16 --- /dev/null +++ b/search/lunr.js @@ -0,0 +1,3475 @@ +/** + * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9 + * Copyright (C) 2020 Oliver Nightingale + * @license MIT + */ + +;(function(){ + +/** + * A convenience function for configuring and constructing + * a new lunr Index. + * + * A lunr.Builder instance is created and the pipeline setup + * with a trimmer, stop word filter and stemmer. + * + * This builder object is yielded to the configuration function + * that is passed as a parameter, allowing the list of fields + * and other builder parameters to be customised. + * + * All documents _must_ be added within the passed config function. + * + * @example + * var idx = lunr(function () { + * this.field('title') + * this.field('body') + * this.ref('id') + * + * documents.forEach(function (doc) { + * this.add(doc) + * }, this) + * }) + * + * @see {@link lunr.Builder} + * @see {@link lunr.Pipeline} + * @see {@link lunr.trimmer} + * @see {@link lunr.stopWordFilter} + * @see {@link lunr.stemmer} + * @namespace {function} lunr + */ +var lunr = function (config) { + var builder = new lunr.Builder + + builder.pipeline.add( + lunr.trimmer, + lunr.stopWordFilter, + lunr.stemmer + ) + + builder.searchPipeline.add( + lunr.stemmer + ) + + config.call(builder, builder) + return builder.build() +} + +lunr.version = "2.3.9" +/*! + * lunr.utils + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A namespace containing utils for the rest of the lunr library + * @namespace lunr.utils + */ +lunr.utils = {} + +/** + * Print a warning message to the console. + * + * @param {String} message The message to be printed. + * @memberOf lunr.utils + * @function + */ +lunr.utils.warn = (function (global) { + /* eslint-disable no-console */ + return function (message) { + if (global.console && console.warn) { + console.warn(message) + } + } + /* eslint-enable no-console */ +})(this) + +/** + * Convert an object to a string. + * + * In the case of `null` and `undefined` the function returns + * the empty string, in all other cases the result of calling + * `toString` on the passed object is returned. + * + * @param {Any} obj The object to convert to a string. + * @return {String} string representation of the passed object. + * @memberOf lunr.utils + */ +lunr.utils.asString = function (obj) { + if (obj === void 0 || obj === null) { + return "" + } else { + return obj.toString() + } +} + +/** + * Clones an object. + * + * Will create a copy of an existing object such that any mutations + * on the copy cannot affect the original. + * + * Only shallow objects are supported, passing a nested object to this + * function will cause a TypeError. + * + * Objects with primitives, and arrays of primitives are supported. + * + * @param {Object} obj The object to clone. + * @return {Object} a clone of the passed object. + * @throws {TypeError} when a nested object is passed. + * @memberOf Utils + */ +lunr.utils.clone = function (obj) { + if (obj === null || obj === undefined) { + return obj + } + + var clone = Object.create(null), + keys = Object.keys(obj) + + for (var i = 0; i < keys.length; i++) { + var key = keys[i], + val = obj[key] + + if (Array.isArray(val)) { + clone[key] = val.slice() + continue + } + + if (typeof val === 'string' || + typeof val === 'number' || + typeof val === 'boolean') { + clone[key] = val + continue + } + + throw new TypeError("clone is not deep and does not support nested objects") + } + + return clone +} +lunr.FieldRef = function (docRef, fieldName, stringValue) { + this.docRef = docRef + this.fieldName = fieldName + this._stringValue = stringValue +} + +lunr.FieldRef.joiner = "/" + +lunr.FieldRef.fromString = function (s) { + var n = s.indexOf(lunr.FieldRef.joiner) + + if (n === -1) { + throw "malformed field ref string" + } + + var fieldRef = s.slice(0, n), + docRef = s.slice(n + 1) + + return new lunr.FieldRef (docRef, fieldRef, s) +} + +lunr.FieldRef.prototype.toString = function () { + if (this._stringValue == undefined) { + this._stringValue = this.fieldName + lunr.FieldRef.joiner + this.docRef + } + + return this._stringValue +} +/*! + * lunr.Set + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A lunr set. + * + * @constructor + */ +lunr.Set = function (elements) { + this.elements = Object.create(null) + + if (elements) { + this.length = elements.length + + for (var i = 0; i < this.length; i++) { + this.elements[elements[i]] = true + } + } else { + this.length = 0 + } +} + +/** + * A complete set that contains all elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.complete = { + intersect: function (other) { + return other + }, + + union: function () { + return this + }, + + contains: function () { + return true + } +} + +/** + * An empty set that contains no elements. + * + * @static + * @readonly + * @type {lunr.Set} + */ +lunr.Set.empty = { + intersect: function () { + return this + }, + + union: function (other) { + return other + }, + + contains: function () { + return false + } +} + +/** + * Returns true if this set contains the specified object. + * + * @param {object} object - Object whose presence in this set is to be tested. + * @returns {boolean} - True if this set contains the specified object. + */ +lunr.Set.prototype.contains = function (object) { + return !!this.elements[object] +} + +/** + * Returns a new set containing only the elements that are present in both + * this set and the specified set. + * + * @param {lunr.Set} other - set to intersect with this set. + * @returns {lunr.Set} a new set that is the intersection of this and the specified set. + */ + +lunr.Set.prototype.intersect = function (other) { + var a, b, elements, intersection = [] + + if (other === lunr.Set.complete) { + return this + } + + if (other === lunr.Set.empty) { + return other + } + + if (this.length < other.length) { + a = this + b = other + } else { + a = other + b = this + } + + elements = Object.keys(a.elements) + + for (var i = 0; i < elements.length; i++) { + var element = elements[i] + if (element in b.elements) { + intersection.push(element) + } + } + + return new lunr.Set (intersection) +} + +/** + * Returns a new set combining the elements of this and the specified set. + * + * @param {lunr.Set} other - set to union with this set. + * @return {lunr.Set} a new set that is the union of this and the specified set. + */ + +lunr.Set.prototype.union = function (other) { + if (other === lunr.Set.complete) { + return lunr.Set.complete + } + + if (other === lunr.Set.empty) { + return this + } + + return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements))) +} +/** + * A function to calculate the inverse document frequency for + * a posting. This is shared between the builder and the index + * + * @private + * @param {object} posting - The posting for a given term + * @param {number} documentCount - The total number of documents. + */ +lunr.idf = function (posting, documentCount) { + var documentsWithTerm = 0 + + for (var fieldName in posting) { + if (fieldName == '_index') continue // Ignore the term index, its not a field + documentsWithTerm += Object.keys(posting[fieldName]).length + } + + var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5) + + return Math.log(1 + Math.abs(x)) +} + +/** + * A token wraps a string representation of a token + * as it is passed through the text processing pipeline. + * + * @constructor + * @param {string} [str=''] - The string token being wrapped. + * @param {object} [metadata={}] - Metadata associated with this token. + */ +lunr.Token = function (str, metadata) { + this.str = str || "" + this.metadata = metadata || {} +} + +/** + * Returns the token string that is being wrapped by this object. + * + * @returns {string} + */ +lunr.Token.prototype.toString = function () { + return this.str +} + +/** + * A token update function is used when updating or optionally + * when cloning a token. + * + * @callback lunr.Token~updateFunction + * @param {string} str - The string representation of the token. + * @param {Object} metadata - All metadata associated with this token. + */ + +/** + * Applies the given function to the wrapped string token. + * + * @example + * token.update(function (str, metadata) { + * return str.toUpperCase() + * }) + * + * @param {lunr.Token~updateFunction} fn - A function to apply to the token string. + * @returns {lunr.Token} + */ +lunr.Token.prototype.update = function (fn) { + this.str = fn(this.str, this.metadata) + return this +} + +/** + * Creates a clone of this token. Optionally a function can be + * applied to the cloned token. + * + * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token. + * @returns {lunr.Token} + */ +lunr.Token.prototype.clone = function (fn) { + fn = fn || function (s) { return s } + return new lunr.Token (fn(this.str, this.metadata), this.metadata) +} +/*! + * lunr.tokenizer + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A function for splitting a string into tokens ready to be inserted into + * the search index. Uses `lunr.tokenizer.separator` to split strings, change + * the value of this property to change how strings are split into tokens. + * + * This tokenizer will convert its parameter to a string by calling `toString` and + * then will split this string on the character in `lunr.tokenizer.separator`. + * Arrays will have their elements converted to strings and wrapped in a lunr.Token. + * + * Optional metadata can be passed to the tokenizer, this metadata will be cloned and + * added as metadata to every token that is created from the object to be tokenized. + * + * @static + * @param {?(string|object|object[])} obj - The object to convert into tokens + * @param {?object} metadata - Optional metadata to associate with every token + * @returns {lunr.Token[]} + * @see {@link lunr.Pipeline} + */ +lunr.tokenizer = function (obj, metadata) { + if (obj == null || obj == undefined) { + return [] + } + + if (Array.isArray(obj)) { + return obj.map(function (t) { + return new lunr.Token( + lunr.utils.asString(t).toLowerCase(), + lunr.utils.clone(metadata) + ) + }) + } + + var str = obj.toString().toLowerCase(), + len = str.length, + tokens = [] + + for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) { + var char = str.charAt(sliceEnd), + sliceLength = sliceEnd - sliceStart + + if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) { + + if (sliceLength > 0) { + var tokenMetadata = lunr.utils.clone(metadata) || {} + tokenMetadata["position"] = [sliceStart, sliceLength] + tokenMetadata["index"] = tokens.length + + tokens.push( + new lunr.Token ( + str.slice(sliceStart, sliceEnd), + tokenMetadata + ) + ) + } + + sliceStart = sliceEnd + 1 + } + + } + + return tokens +} + +/** + * The separator used to split a string into tokens. Override this property to change the behaviour of + * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens. + * + * @static + * @see lunr.tokenizer + */ +lunr.tokenizer.separator = /[\s\-]+/ +/*! + * lunr.Pipeline + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.Pipelines maintain an ordered list of functions to be applied to all + * tokens in documents entering the search index and queries being ran against + * the index. + * + * An instance of lunr.Index created with the lunr shortcut will contain a + * pipeline with a stop word filter and an English language stemmer. Extra + * functions can be added before or after either of these functions or these + * default functions can be removed. + * + * When run the pipeline will call each function in turn, passing a token, the + * index of that token in the original list of all tokens and finally a list of + * all the original tokens. + * + * The output of functions in the pipeline will be passed to the next function + * in the pipeline. To exclude a token from entering the index the function + * should return undefined, the rest of the pipeline will not be called with + * this token. + * + * For serialisation of pipelines to work, all functions used in an instance of + * a pipeline should be registered with lunr.Pipeline. Registered functions can + * then be loaded. If trying to load a serialised pipeline that uses functions + * that are not registered an error will be thrown. + * + * If not planning on serialising the pipeline then registering pipeline functions + * is not necessary. + * + * @constructor + */ +lunr.Pipeline = function () { + this._stack = [] +} + +lunr.Pipeline.registeredFunctions = Object.create(null) + +/** + * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token + * string as well as all known metadata. A pipeline function can mutate the token string + * or mutate (or add) metadata for a given token. + * + * A pipeline function can indicate that the passed token should be discarded by returning + * null, undefined or an empty string. This token will not be passed to any downstream pipeline + * functions and will not be added to the index. + * + * Multiple tokens can be returned by returning an array of tokens. Each token will be passed + * to any downstream pipeline functions and all will returned tokens will be added to the index. + * + * Any number of pipeline functions may be chained together using a lunr.Pipeline. + * + * @interface lunr.PipelineFunction + * @param {lunr.Token} token - A token from the document being processed. + * @param {number} i - The index of this token in the complete list of tokens for this document/field. + * @param {lunr.Token[]} tokens - All tokens for this document/field. + * @returns {(?lunr.Token|lunr.Token[])} + */ + +/** + * Register a function with the pipeline. + * + * Functions that are used in the pipeline should be registered if the pipeline + * needs to be serialised, or a serialised pipeline needs to be loaded. + * + * Registering a function does not add it to a pipeline, functions must still be + * added to instances of the pipeline for them to be used when running a pipeline. + * + * @param {lunr.PipelineFunction} fn - The function to check for. + * @param {String} label - The label to register this function with + */ +lunr.Pipeline.registerFunction = function (fn, label) { + if (label in this.registeredFunctions) { + lunr.utils.warn('Overwriting existing registered function: ' + label) + } + + fn.label = label + lunr.Pipeline.registeredFunctions[fn.label] = fn +} + +/** + * Warns if the function is not registered as a Pipeline function. + * + * @param {lunr.PipelineFunction} fn - The function to check for. + * @private + */ +lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) { + var isRegistered = fn.label && (fn.label in this.registeredFunctions) + + if (!isRegistered) { + lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn) + } +} + +/** + * Loads a previously serialised pipeline. + * + * All functions to be loaded must already be registered with lunr.Pipeline. + * If any function from the serialised data has not been registered then an + * error will be thrown. + * + * @param {Object} serialised - The serialised pipeline to load. + * @returns {lunr.Pipeline} + */ +lunr.Pipeline.load = function (serialised) { + var pipeline = new lunr.Pipeline + + serialised.forEach(function (fnName) { + var fn = lunr.Pipeline.registeredFunctions[fnName] + + if (fn) { + pipeline.add(fn) + } else { + throw new Error('Cannot load unregistered function: ' + fnName) + } + }) + + return pipeline +} + +/** + * Adds new functions to the end of the pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline. + */ +lunr.Pipeline.prototype.add = function () { + var fns = Array.prototype.slice.call(arguments) + + fns.forEach(function (fn) { + lunr.Pipeline.warnIfFunctionNotRegistered(fn) + this._stack.push(fn) + }, this) +} + +/** + * Adds a single function after a function that already exists in the + * pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. + * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. + */ +lunr.Pipeline.prototype.after = function (existingFn, newFn) { + lunr.Pipeline.warnIfFunctionNotRegistered(newFn) + + var pos = this._stack.indexOf(existingFn) + if (pos == -1) { + throw new Error('Cannot find existingFn') + } + + pos = pos + 1 + this._stack.splice(pos, 0, newFn) +} + +/** + * Adds a single function before a function that already exists in the + * pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. + * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. + */ +lunr.Pipeline.prototype.before = function (existingFn, newFn) { + lunr.Pipeline.warnIfFunctionNotRegistered(newFn) + + var pos = this._stack.indexOf(existingFn) + if (pos == -1) { + throw new Error('Cannot find existingFn') + } + + this._stack.splice(pos, 0, newFn) +} + +/** + * Removes a function from the pipeline. + * + * @param {lunr.PipelineFunction} fn The function to remove from the pipeline. + */ +lunr.Pipeline.prototype.remove = function (fn) { + var pos = this._stack.indexOf(fn) + if (pos == -1) { + return + } + + this._stack.splice(pos, 1) +} + +/** + * Runs the current list of functions that make up the pipeline against the + * passed tokens. + * + * @param {Array} tokens The tokens to run through the pipeline. + * @returns {Array} + */ +lunr.Pipeline.prototype.run = function (tokens) { + var stackLength = this._stack.length + + for (var i = 0; i < stackLength; i++) { + var fn = this._stack[i] + var memo = [] + + for (var j = 0; j < tokens.length; j++) { + var result = fn(tokens[j], j, tokens) + + if (result === null || result === void 0 || result === '') continue + + if (Array.isArray(result)) { + for (var k = 0; k < result.length; k++) { + memo.push(result[k]) + } + } else { + memo.push(result) + } + } + + tokens = memo + } + + return tokens +} + +/** + * Convenience method for passing a string through a pipeline and getting + * strings out. This method takes care of wrapping the passed string in a + * token and mapping the resulting tokens back to strings. + * + * @param {string} str - The string to pass through the pipeline. + * @param {?object} metadata - Optional metadata to associate with the token + * passed to the pipeline. + * @returns {string[]} + */ +lunr.Pipeline.prototype.runString = function (str, metadata) { + var token = new lunr.Token (str, metadata) + + return this.run([token]).map(function (t) { + return t.toString() + }) +} + +/** + * Resets the pipeline by removing any existing processors. + * + */ +lunr.Pipeline.prototype.reset = function () { + this._stack = [] +} + +/** + * Returns a representation of the pipeline ready for serialisation. + * + * Logs a warning if the function has not been registered. + * + * @returns {Array} + */ +lunr.Pipeline.prototype.toJSON = function () { + return this._stack.map(function (fn) { + lunr.Pipeline.warnIfFunctionNotRegistered(fn) + + return fn.label + }) +} +/*! + * lunr.Vector + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A vector is used to construct the vector space of documents and queries. These + * vectors support operations to determine the similarity between two documents or + * a document and a query. + * + * Normally no parameters are required for initializing a vector, but in the case of + * loading a previously dumped vector the raw elements can be provided to the constructor. + * + * For performance reasons vectors are implemented with a flat array, where an elements + * index is immediately followed by its value. E.g. [index, value, index, value]. This + * allows the underlying array to be as sparse as possible and still offer decent + * performance when being used for vector calculations. + * + * @constructor + * @param {Number[]} [elements] - The flat list of element index and element value pairs. + */ +lunr.Vector = function (elements) { + this._magnitude = 0 + this.elements = elements || [] +} + + +/** + * Calculates the position within the vector to insert a given index. + * + * This is used internally by insert and upsert. If there are duplicate indexes then + * the position is returned as if the value for that index were to be updated, but it + * is the callers responsibility to check whether there is a duplicate at that index + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @returns {Number} + */ +lunr.Vector.prototype.positionForIndex = function (index) { + // For an empty vector the tuple can be inserted at the beginning + if (this.elements.length == 0) { + return 0 + } + + var start = 0, + end = this.elements.length / 2, + sliceLength = end - start, + pivotPoint = Math.floor(sliceLength / 2), + pivotIndex = this.elements[pivotPoint * 2] + + while (sliceLength > 1) { + if (pivotIndex < index) { + start = pivotPoint + } + + if (pivotIndex > index) { + end = pivotPoint + } + + if (pivotIndex == index) { + break + } + + sliceLength = end - start + pivotPoint = start + Math.floor(sliceLength / 2) + pivotIndex = this.elements[pivotPoint * 2] + } + + if (pivotIndex == index) { + return pivotPoint * 2 + } + + if (pivotIndex > index) { + return pivotPoint * 2 + } + + if (pivotIndex < index) { + return (pivotPoint + 1) * 2 + } +} + +/** + * Inserts an element at an index within the vector. + * + * Does not allow duplicates, will throw an error if there is already an entry + * for this index. + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @param {Number} val - The value to be inserted into the vector. + */ +lunr.Vector.prototype.insert = function (insertIdx, val) { + this.upsert(insertIdx, val, function () { + throw "duplicate index" + }) +} + +/** + * Inserts or updates an existing index within the vector. + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @param {Number} val - The value to be inserted into the vector. + * @param {function} fn - A function that is called for updates, the existing value and the + * requested value are passed as arguments + */ +lunr.Vector.prototype.upsert = function (insertIdx, val, fn) { + this._magnitude = 0 + var position = this.positionForIndex(insertIdx) + + if (this.elements[position] == insertIdx) { + this.elements[position + 1] = fn(this.elements[position + 1], val) + } else { + this.elements.splice(position, 0, insertIdx, val) + } +} + +/** + * Calculates the magnitude of this vector. + * + * @returns {Number} + */ +lunr.Vector.prototype.magnitude = function () { + if (this._magnitude) return this._magnitude + + var sumOfSquares = 0, + elementsLength = this.elements.length + + for (var i = 1; i < elementsLength; i += 2) { + var val = this.elements[i] + sumOfSquares += val * val + } + + return this._magnitude = Math.sqrt(sumOfSquares) +} + +/** + * Calculates the dot product of this vector and another vector. + * + * @param {lunr.Vector} otherVector - The vector to compute the dot product with. + * @returns {Number} + */ +lunr.Vector.prototype.dot = function (otherVector) { + var dotProduct = 0, + a = this.elements, b = otherVector.elements, + aLen = a.length, bLen = b.length, + aVal = 0, bVal = 0, + i = 0, j = 0 + + while (i < aLen && j < bLen) { + aVal = a[i], bVal = b[j] + if (aVal < bVal) { + i += 2 + } else if (aVal > bVal) { + j += 2 + } else if (aVal == bVal) { + dotProduct += a[i + 1] * b[j + 1] + i += 2 + j += 2 + } + } + + return dotProduct +} + +/** + * Calculates the similarity between this vector and another vector. + * + * @param {lunr.Vector} otherVector - The other vector to calculate the + * similarity with. + * @returns {Number} + */ +lunr.Vector.prototype.similarity = function (otherVector) { + return this.dot(otherVector) / this.magnitude() || 0 +} + +/** + * Converts the vector to an array of the elements within the vector. + * + * @returns {Number[]} + */ +lunr.Vector.prototype.toArray = function () { + var output = new Array (this.elements.length / 2) + + for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) { + output[j] = this.elements[i] + } + + return output +} + +/** + * A JSON serializable representation of the vector. + * + * @returns {Number[]} + */ +lunr.Vector.prototype.toJSON = function () { + return this.elements +} +/* eslint-disable */ +/*! + * lunr.stemmer + * Copyright (C) 2020 Oliver Nightingale + * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt + */ + +/** + * lunr.stemmer is an english language stemmer, this is a JavaScript + * implementation of the PorterStemmer taken from http://tartarus.org/~martin + * + * @static + * @implements {lunr.PipelineFunction} + * @param {lunr.Token} token - The string to stem + * @returns {lunr.Token} + * @see {@link lunr.Pipeline} + * @function + */ +lunr.stemmer = (function(){ + var step2list = { + "ational" : "ate", + "tional" : "tion", + "enci" : "ence", + "anci" : "ance", + "izer" : "ize", + "bli" : "ble", + "alli" : "al", + "entli" : "ent", + "eli" : "e", + "ousli" : "ous", + "ization" : "ize", + "ation" : "ate", + "ator" : "ate", + "alism" : "al", + "iveness" : "ive", + "fulness" : "ful", + "ousness" : "ous", + "aliti" : "al", + "iviti" : "ive", + "biliti" : "ble", + "logi" : "log" + }, + + step3list = { + "icate" : "ic", + "ative" : "", + "alize" : "al", + "iciti" : "ic", + "ical" : "ic", + "ful" : "", + "ness" : "" + }, + + c = "[^aeiou]", // consonant + v = "[aeiouy]", // vowel + C = c + "[^aeiouy]*", // consonant sequence + V = v + "[aeiou]*", // vowel sequence + + mgr0 = "^(" + C + ")?" + V + C, // [C]VC... is m>0 + meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$", // [C]VC[V] is m=1 + mgr1 = "^(" + C + ")?" + V + C + V + C, // [C]VCVC... is m>1 + s_v = "^(" + C + ")?" + v; // vowel in stem + + var re_mgr0 = new RegExp(mgr0); + var re_mgr1 = new RegExp(mgr1); + var re_meq1 = new RegExp(meq1); + var re_s_v = new RegExp(s_v); + + var re_1a = /^(.+?)(ss|i)es$/; + var re2_1a = /^(.+?)([^s])s$/; + var re_1b = /^(.+?)eed$/; + var re2_1b = /^(.+?)(ed|ing)$/; + var re_1b_2 = /.$/; + var re2_1b_2 = /(at|bl|iz)$/; + var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$"); + var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + + var re_1c = /^(.+?[^aeiou])y$/; + var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + + var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + + var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + var re2_4 = /^(.+?)(s|t)(ion)$/; + + var re_5 = /^(.+?)e$/; + var re_5_1 = /ll$/; + var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + + var porterStemmer = function porterStemmer(w) { + var stem, + suffix, + firstch, + re, + re2, + re3, + re4; + + if (w.length < 3) { return w; } + + firstch = w.substr(0,1); + if (firstch == "y") { + w = firstch.toUpperCase() + w.substr(1); + } + + // Step 1a + re = re_1a + re2 = re2_1a; + + if (re.test(w)) { w = w.replace(re,"$1$2"); } + else if (re2.test(w)) { w = w.replace(re2,"$1$2"); } + + // Step 1b + re = re_1b; + re2 = re2_1b; + if (re.test(w)) { + var fp = re.exec(w); + re = re_mgr0; + if (re.test(fp[1])) { + re = re_1b_2; + w = w.replace(re,""); + } + } else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = re_s_v; + if (re2.test(stem)) { + w = stem; + re2 = re2_1b_2; + re3 = re3_1b_2; + re4 = re4_1b_2; + if (re2.test(w)) { w = w + "e"; } + else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); } + else if (re4.test(w)) { w = w + "e"; } + } + } + + // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say) + re = re_1c; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + w = stem + "i"; + } + + // Step 2 + re = re_2; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = re_mgr0; + if (re.test(stem)) { + w = stem + step2list[suffix]; + } + } + + // Step 3 + re = re_3; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = re_mgr0; + if (re.test(stem)) { + w = stem + step3list[suffix]; + } + } + + // Step 4 + re = re_4; + re2 = re2_4; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = re_mgr1; + if (re.test(stem)) { + w = stem; + } + } else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = re_mgr1; + if (re2.test(stem)) { + w = stem; + } + } + + // Step 5 + re = re_5; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = re_mgr1; + re2 = re_meq1; + re3 = re3_5; + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) { + w = stem; + } + } + + re = re_5_1; + re2 = re_mgr1; + if (re.test(w) && re2.test(w)) { + re = re_1b_2; + w = w.replace(re,""); + } + + // and turn initial Y back to y + + if (firstch == "y") { + w = firstch.toLowerCase() + w.substr(1); + } + + return w; + }; + + return function (token) { + return token.update(porterStemmer); + } +})(); + +lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') +/*! + * lunr.stopWordFilter + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.generateStopWordFilter builds a stopWordFilter function from the provided + * list of stop words. + * + * The built in lunr.stopWordFilter is built using this generator and can be used + * to generate custom stopWordFilters for applications or non English languages. + * + * @function + * @param {Array} token The token to pass through the filter + * @returns {lunr.PipelineFunction} + * @see lunr.Pipeline + * @see lunr.stopWordFilter + */ +lunr.generateStopWordFilter = function (stopWords) { + var words = stopWords.reduce(function (memo, stopWord) { + memo[stopWord] = stopWord + return memo + }, {}) + + return function (token) { + if (token && words[token.toString()] !== token.toString()) return token + } +} + +/** + * lunr.stopWordFilter is an English language stop word list filter, any words + * contained in the list will not be passed through the filter. + * + * This is intended to be used in the Pipeline. If the token does not pass the + * filter then undefined will be returned. + * + * @function + * @implements {lunr.PipelineFunction} + * @params {lunr.Token} token - A token to check for being a stop word. + * @returns {lunr.Token} + * @see {@link lunr.Pipeline} + */ +lunr.stopWordFilter = lunr.generateStopWordFilter([ + 'a', + 'able', + 'about', + 'across', + 'after', + 'all', + 'almost', + 'also', + 'am', + 'among', + 'an', + 'and', + 'any', + 'are', + 'as', + 'at', + 'be', + 'because', + 'been', + 'but', + 'by', + 'can', + 'cannot', + 'could', + 'dear', + 'did', + 'do', + 'does', + 'either', + 'else', + 'ever', + 'every', + 'for', + 'from', + 'get', + 'got', + 'had', + 'has', + 'have', + 'he', + 'her', + 'hers', + 'him', + 'his', + 'how', + 'however', + 'i', + 'if', + 'in', + 'into', + 'is', + 'it', + 'its', + 'just', + 'least', + 'let', + 'like', + 'likely', + 'may', + 'me', + 'might', + 'most', + 'must', + 'my', + 'neither', + 'no', + 'nor', + 'not', + 'of', + 'off', + 'often', + 'on', + 'only', + 'or', + 'other', + 'our', + 'own', + 'rather', + 'said', + 'say', + 'says', + 'she', + 'should', + 'since', + 'so', + 'some', + 'than', + 'that', + 'the', + 'their', + 'them', + 'then', + 'there', + 'these', + 'they', + 'this', + 'tis', + 'to', + 'too', + 'twas', + 'us', + 'wants', + 'was', + 'we', + 'were', + 'what', + 'when', + 'where', + 'which', + 'while', + 'who', + 'whom', + 'why', + 'will', + 'with', + 'would', + 'yet', + 'you', + 'your' +]) + +lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter') +/*! + * lunr.trimmer + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.trimmer is a pipeline function for trimming non word + * characters from the beginning and end of tokens before they + * enter the index. + * + * This implementation may not work correctly for non latin + * characters and should either be removed or adapted for use + * with languages with non-latin characters. + * + * @static + * @implements {lunr.PipelineFunction} + * @param {lunr.Token} token The token to pass through the filter + * @returns {lunr.Token} + * @see lunr.Pipeline + */ +lunr.trimmer = function (token) { + return token.update(function (s) { + return s.replace(/^\W+/, '').replace(/\W+$/, '') + }) +} + +lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer') +/*! + * lunr.TokenSet + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A token set is used to store the unique list of all tokens + * within an index. Token sets are also used to represent an + * incoming query to the index, this query token set and index + * token set are then intersected to find which tokens to look + * up in the inverted index. + * + * A token set can hold multiple tokens, as in the case of the + * index token set, or it can hold a single token as in the + * case of a simple query token set. + * + * Additionally token sets are used to perform wildcard matching. + * Leading, contained and trailing wildcards are supported, and + * from this edit distance matching can also be provided. + * + * Token sets are implemented as a minimal finite state automata, + * where both common prefixes and suffixes are shared between tokens. + * This helps to reduce the space used for storing the token set. + * + * @constructor + */ +lunr.TokenSet = function () { + this.final = false + this.edges = {} + this.id = lunr.TokenSet._nextId + lunr.TokenSet._nextId += 1 +} + +/** + * Keeps track of the next, auto increment, identifier to assign + * to a new tokenSet. + * + * TokenSets require a unique identifier to be correctly minimised. + * + * @private + */ +lunr.TokenSet._nextId = 1 + +/** + * Creates a TokenSet instance from the given sorted array of words. + * + * @param {String[]} arr - A sorted array of strings to create the set from. + * @returns {lunr.TokenSet} + * @throws Will throw an error if the input array is not sorted. + */ +lunr.TokenSet.fromArray = function (arr) { + var builder = new lunr.TokenSet.Builder + + for (var i = 0, len = arr.length; i < len; i++) { + builder.insert(arr[i]) + } + + builder.finish() + return builder.root +} + +/** + * Creates a token set from a query clause. + * + * @private + * @param {Object} clause - A single clause from lunr.Query. + * @param {string} clause.term - The query clause term. + * @param {number} [clause.editDistance] - The optional edit distance for the term. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.fromClause = function (clause) { + if ('editDistance' in clause) { + return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance) + } else { + return lunr.TokenSet.fromString(clause.term) + } +} + +/** + * Creates a token set representing a single string with a specified + * edit distance. + * + * Insertions, deletions, substitutions and transpositions are each + * treated as an edit distance of 1. + * + * Increasing the allowed edit distance will have a dramatic impact + * on the performance of both creating and intersecting these TokenSets. + * It is advised to keep the edit distance less than 3. + * + * @param {string} str - The string to create the token set from. + * @param {number} editDistance - The allowed edit distance to match. + * @returns {lunr.Vector} + */ +lunr.TokenSet.fromFuzzyString = function (str, editDistance) { + var root = new lunr.TokenSet + + var stack = [{ + node: root, + editsRemaining: editDistance, + str: str + }] + + while (stack.length) { + var frame = stack.pop() + + // no edit + if (frame.str.length > 0) { + var char = frame.str.charAt(0), + noEditNode + + if (char in frame.node.edges) { + noEditNode = frame.node.edges[char] + } else { + noEditNode = new lunr.TokenSet + frame.node.edges[char] = noEditNode + } + + if (frame.str.length == 1) { + noEditNode.final = true + } + + stack.push({ + node: noEditNode, + editsRemaining: frame.editsRemaining, + str: frame.str.slice(1) + }) + } + + if (frame.editsRemaining == 0) { + continue + } + + // insertion + if ("*" in frame.node.edges) { + var insertionNode = frame.node.edges["*"] + } else { + var insertionNode = new lunr.TokenSet + frame.node.edges["*"] = insertionNode + } + + if (frame.str.length == 0) { + insertionNode.final = true + } + + stack.push({ + node: insertionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str + }) + + // deletion + // can only do a deletion if we have enough edits remaining + // and if there are characters left to delete in the string + if (frame.str.length > 1) { + stack.push({ + node: frame.node, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) + } + + // deletion + // just removing the last character from the str + if (frame.str.length == 1) { + frame.node.final = true + } + + // substitution + // can only do a substitution if we have enough edits remaining + // and if there are characters left to substitute + if (frame.str.length >= 1) { + if ("*" in frame.node.edges) { + var substitutionNode = frame.node.edges["*"] + } else { + var substitutionNode = new lunr.TokenSet + frame.node.edges["*"] = substitutionNode + } + + if (frame.str.length == 1) { + substitutionNode.final = true + } + + stack.push({ + node: substitutionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) + } + + // transposition + // can only do a transposition if there are edits remaining + // and there are enough characters to transpose + if (frame.str.length > 1) { + var charA = frame.str.charAt(0), + charB = frame.str.charAt(1), + transposeNode + + if (charB in frame.node.edges) { + transposeNode = frame.node.edges[charB] + } else { + transposeNode = new lunr.TokenSet + frame.node.edges[charB] = transposeNode + } + + if (frame.str.length == 1) { + transposeNode.final = true + } + + stack.push({ + node: transposeNode, + editsRemaining: frame.editsRemaining - 1, + str: charA + frame.str.slice(2) + }) + } + } + + return root +} + +/** + * Creates a TokenSet from a string. + * + * The string may contain one or more wildcard characters (*) + * that will allow wildcard matching when intersecting with + * another TokenSet. + * + * @param {string} str - The string to create a TokenSet from. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.fromString = function (str) { + var node = new lunr.TokenSet, + root = node + + /* + * Iterates through all characters within the passed string + * appending a node for each character. + * + * When a wildcard character is found then a self + * referencing edge is introduced to continually match + * any number of any characters. + */ + for (var i = 0, len = str.length; i < len; i++) { + var char = str[i], + final = (i == len - 1) + + if (char == "*") { + node.edges[char] = node + node.final = final + + } else { + var next = new lunr.TokenSet + next.final = final + + node.edges[char] = next + node = next + } + } + + return root +} + +/** + * Converts this TokenSet into an array of strings + * contained within the TokenSet. + * + * This is not intended to be used on a TokenSet that + * contains wildcards, in these cases the results are + * undefined and are likely to cause an infinite loop. + * + * @returns {string[]} + */ +lunr.TokenSet.prototype.toArray = function () { + var words = [] + + var stack = [{ + prefix: "", + node: this + }] + + while (stack.length) { + var frame = stack.pop(), + edges = Object.keys(frame.node.edges), + len = edges.length + + if (frame.node.final) { + /* In Safari, at this point the prefix is sometimes corrupted, see: + * https://github.com/olivernn/lunr.js/issues/279 Calling any + * String.prototype method forces Safari to "cast" this string to what + * it's supposed to be, fixing the bug. */ + frame.prefix.charAt(0) + words.push(frame.prefix) + } + + for (var i = 0; i < len; i++) { + var edge = edges[i] + + stack.push({ + prefix: frame.prefix.concat(edge), + node: frame.node.edges[edge] + }) + } + } + + return words +} + +/** + * Generates a string representation of a TokenSet. + * + * This is intended to allow TokenSets to be used as keys + * in objects, largely to aid the construction and minimisation + * of a TokenSet. As such it is not designed to be a human + * friendly representation of the TokenSet. + * + * @returns {string} + */ +lunr.TokenSet.prototype.toString = function () { + // NOTE: Using Object.keys here as this.edges is very likely + // to enter 'hash-mode' with many keys being added + // + // avoiding a for-in loop here as it leads to the function + // being de-optimised (at least in V8). From some simple + // benchmarks the performance is comparable, but allowing + // V8 to optimize may mean easy performance wins in the future. + + if (this._str) { + return this._str + } + + var str = this.final ? '1' : '0', + labels = Object.keys(this.edges).sort(), + len = labels.length + + for (var i = 0; i < len; i++) { + var label = labels[i], + node = this.edges[label] + + str = str + label + node.id + } + + return str +} + +/** + * Returns a new TokenSet that is the intersection of + * this TokenSet and the passed TokenSet. + * + * This intersection will take into account any wildcards + * contained within the TokenSet. + * + * @param {lunr.TokenSet} b - An other TokenSet to intersect with. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.prototype.intersect = function (b) { + var output = new lunr.TokenSet, + frame = undefined + + var stack = [{ + qNode: b, + output: output, + node: this + }] + + while (stack.length) { + frame = stack.pop() + + // NOTE: As with the #toString method, we are using + // Object.keys and a for loop instead of a for-in loop + // as both of these objects enter 'hash' mode, causing + // the function to be de-optimised in V8 + var qEdges = Object.keys(frame.qNode.edges), + qLen = qEdges.length, + nEdges = Object.keys(frame.node.edges), + nLen = nEdges.length + + for (var q = 0; q < qLen; q++) { + var qEdge = qEdges[q] + + for (var n = 0; n < nLen; n++) { + var nEdge = nEdges[n] + + if (nEdge == qEdge || qEdge == '*') { + var node = frame.node.edges[nEdge], + qNode = frame.qNode.edges[qEdge], + final = node.final && qNode.final, + next = undefined + + if (nEdge in frame.output.edges) { + // an edge already exists for this character + // no need to create a new node, just set the finality + // bit unless this node is already final + next = frame.output.edges[nEdge] + next.final = next.final || final + + } else { + // no edge exists yet, must create one + // set the finality bit and insert it + // into the output + next = new lunr.TokenSet + next.final = final + frame.output.edges[nEdge] = next + } + + stack.push({ + qNode: qNode, + output: next, + node: node + }) + } + } + } + } + + return output +} +lunr.TokenSet.Builder = function () { + this.previousWord = "" + this.root = new lunr.TokenSet + this.uncheckedNodes = [] + this.minimizedNodes = {} +} + +lunr.TokenSet.Builder.prototype.insert = function (word) { + var node, + commonPrefix = 0 + + if (word < this.previousWord) { + throw new Error ("Out of order word insertion") + } + + for (var i = 0; i < word.length && i < this.previousWord.length; i++) { + if (word[i] != this.previousWord[i]) break + commonPrefix++ + } + + this.minimize(commonPrefix) + + if (this.uncheckedNodes.length == 0) { + node = this.root + } else { + node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child + } + + for (var i = commonPrefix; i < word.length; i++) { + var nextNode = new lunr.TokenSet, + char = word[i] + + node.edges[char] = nextNode + + this.uncheckedNodes.push({ + parent: node, + char: char, + child: nextNode + }) + + node = nextNode + } + + node.final = true + this.previousWord = word +} + +lunr.TokenSet.Builder.prototype.finish = function () { + this.minimize(0) +} + +lunr.TokenSet.Builder.prototype.minimize = function (downTo) { + for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) { + var node = this.uncheckedNodes[i], + childKey = node.child.toString() + + if (childKey in this.minimizedNodes) { + node.parent.edges[node.char] = this.minimizedNodes[childKey] + } else { + // Cache the key for this node since + // we know it can't change anymore + node.child._str = childKey + + this.minimizedNodes[childKey] = node.child + } + + this.uncheckedNodes.pop() + } +} +/*! + * lunr.Index + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * An index contains the built index of all documents and provides a query interface + * to the index. + * + * Usually instances of lunr.Index will not be created using this constructor, instead + * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be + * used to load previously built and serialized indexes. + * + * @constructor + * @param {Object} attrs - The attributes of the built search index. + * @param {Object} attrs.invertedIndex - An index of term/field to document reference. + * @param {Object} attrs.fieldVectors - Field vectors + * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens. + * @param {string[]} attrs.fields - The names of indexed document fields. + * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms. + */ +lunr.Index = function (attrs) { + this.invertedIndex = attrs.invertedIndex + this.fieldVectors = attrs.fieldVectors + this.tokenSet = attrs.tokenSet + this.fields = attrs.fields + this.pipeline = attrs.pipeline +} + +/** + * A result contains details of a document matching a search query. + * @typedef {Object} lunr.Index~Result + * @property {string} ref - The reference of the document this result represents. + * @property {number} score - A number between 0 and 1 representing how similar this document is to the query. + * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match. + */ + +/** + * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple + * query language which itself is parsed into an instance of lunr.Query. + * + * For programmatically building queries it is advised to directly use lunr.Query, the query language + * is best used for human entered text rather than program generated text. + * + * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported + * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello' + * or 'world', though those that contain both will rank higher in the results. + * + * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can + * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding + * wildcards will increase the number of documents that will be found but can also have a negative + * impact on query performance, especially with wildcards at the beginning of a term. + * + * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term + * hello in the title field will match this query. Using a field not present in the index will lead + * to an error being thrown. + * + * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term + * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported + * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2. + * Avoid large values for edit distance to improve query performance. + * + * Each term also supports a presence modifier. By default a term's presence in document is optional, however + * this can be changed to either required or prohibited. For a term's presence to be required in a document the + * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and + * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not + * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'. + * + * To escape special characters the backslash character '\' can be used, this allows searches to include + * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead + * of attempting to apply a boost of 2 to the search term "foo". + * + * @typedef {string} lunr.Index~QueryString + * @example Simple single term query + * hello + * @example Multiple term query + * hello world + * @example term scoped to a field + * title:hello + * @example term with a boost of 10 + * hello^10 + * @example term with an edit distance of 2 + * hello~2 + * @example terms with presence modifiers + * -foo +bar baz + */ + +/** + * Performs a search against the index using lunr query syntax. + * + * Results will be returned sorted by their score, the most relevant results + * will be returned first. For details on how the score is calculated, please see + * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}. + * + * For more programmatic querying use lunr.Index#query. + * + * @param {lunr.Index~QueryString} queryString - A string containing a lunr query. + * @throws {lunr.QueryParseError} If the passed query string cannot be parsed. + * @returns {lunr.Index~Result[]} + */ +lunr.Index.prototype.search = function (queryString) { + return this.query(function (query) { + var parser = new lunr.QueryParser(queryString, query) + parser.parse() + }) +} + +/** + * A query builder callback provides a query object to be used to express + * the query to perform on the index. + * + * @callback lunr.Index~queryBuilder + * @param {lunr.Query} query - The query object to build up. + * @this lunr.Query + */ + +/** + * Performs a query against the index using the yielded lunr.Query object. + * + * If performing programmatic queries against the index, this method is preferred + * over lunr.Index#search so as to avoid the additional query parsing overhead. + * + * A query object is yielded to the supplied function which should be used to + * express the query to be run against the index. + * + * Note that although this function takes a callback parameter it is _not_ an + * asynchronous operation, the callback is just yielded a query object to be + * customized. + * + * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query. + * @returns {lunr.Index~Result[]} + */ +lunr.Index.prototype.query = function (fn) { + // for each query clause + // * process terms + // * expand terms from token set + // * find matching documents and metadata + // * get document vectors + // * score documents + + var query = new lunr.Query(this.fields), + matchingFields = Object.create(null), + queryVectors = Object.create(null), + termFieldCache = Object.create(null), + requiredMatches = Object.create(null), + prohibitedMatches = Object.create(null) + + /* + * To support field level boosts a query vector is created per + * field. An empty vector is eagerly created to support negated + * queries. + */ + for (var i = 0; i < this.fields.length; i++) { + queryVectors[this.fields[i]] = new lunr.Vector + } + + fn.call(query, query) + + for (var i = 0; i < query.clauses.length; i++) { + /* + * Unless the pipeline has been disabled for this term, which is + * the case for terms with wildcards, we need to pass the clause + * term through the search pipeline. A pipeline returns an array + * of processed terms. Pipeline functions may expand the passed + * term, which means we may end up performing multiple index lookups + * for a single query term. + */ + var clause = query.clauses[i], + terms = null, + clauseMatches = lunr.Set.empty + + if (clause.usePipeline) { + terms = this.pipeline.runString(clause.term, { + fields: clause.fields + }) + } else { + terms = [clause.term] + } + + for (var m = 0; m < terms.length; m++) { + var term = terms[m] + + /* + * Each term returned from the pipeline needs to use the same query + * clause object, e.g. the same boost and or edit distance. The + * simplest way to do this is to re-use the clause object but mutate + * its term property. + */ + clause.term = term + + /* + * From the term in the clause we create a token set which will then + * be used to intersect the indexes token set to get a list of terms + * to lookup in the inverted index + */ + var termTokenSet = lunr.TokenSet.fromClause(clause), + expandedTerms = this.tokenSet.intersect(termTokenSet).toArray() + + /* + * If a term marked as required does not exist in the tokenSet it is + * impossible for the search to return any matches. We set all the field + * scoped required matches set to empty and stop examining any further + * clauses. + */ + if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = lunr.Set.empty + } + + break + } + + for (var j = 0; j < expandedTerms.length; j++) { + /* + * For each term get the posting and termIndex, this is required for + * building the query vector. + */ + var expandedTerm = expandedTerms[j], + posting = this.invertedIndex[expandedTerm], + termIndex = posting._index + + for (var k = 0; k < clause.fields.length; k++) { + /* + * For each field that this query term is scoped by (by default + * all fields are in scope) we need to get all the document refs + * that have this term in that field. + * + * The posting is the entry in the invertedIndex for the matching + * term from above. + */ + var field = clause.fields[k], + fieldPosting = posting[field], + matchingDocumentRefs = Object.keys(fieldPosting), + termField = expandedTerm + "/" + field, + matchingDocumentsSet = new lunr.Set(matchingDocumentRefs) + + /* + * if the presence of this term is required ensure that the matching + * documents are added to the set of required matches for this clause. + * + */ + if (clause.presence == lunr.Query.presence.REQUIRED) { + clauseMatches = clauseMatches.union(matchingDocumentsSet) + + if (requiredMatches[field] === undefined) { + requiredMatches[field] = lunr.Set.complete + } + } + + /* + * if the presence of this term is prohibited ensure that the matching + * documents are added to the set of prohibited matches for this field, + * creating that set if it does not yet exist. + */ + if (clause.presence == lunr.Query.presence.PROHIBITED) { + if (prohibitedMatches[field] === undefined) { + prohibitedMatches[field] = lunr.Set.empty + } + + prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet) + + /* + * Prohibited matches should not be part of the query vector used for + * similarity scoring and no metadata should be extracted so we continue + * to the next field + */ + continue + } + + /* + * The query field vector is populated using the termIndex found for + * the term and a unit value with the appropriate boost applied. + * Using upsert because there could already be an entry in the vector + * for the term we are working with. In that case we just add the scores + * together. + */ + queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b }) + + /** + * If we've already seen this term, field combo then we've already collected + * the matching documents and metadata, no need to go through all that again + */ + if (termFieldCache[termField]) { + continue + } + + for (var l = 0; l < matchingDocumentRefs.length; l++) { + /* + * All metadata for this term/field/document triple + * are then extracted and collected into an instance + * of lunr.MatchData ready to be returned in the query + * results + */ + var matchingDocumentRef = matchingDocumentRefs[l], + matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field), + metadata = fieldPosting[matchingDocumentRef], + fieldMatch + + if ((fieldMatch = matchingFields[matchingFieldRef]) === undefined) { + matchingFields[matchingFieldRef] = new lunr.MatchData (expandedTerm, field, metadata) + } else { + fieldMatch.add(expandedTerm, field, metadata) + } + + } + + termFieldCache[termField] = true + } + } + } + + /** + * If the presence was required we need to update the requiredMatches field sets. + * We do this after all fields for the term have collected their matches because + * the clause terms presence is required in _any_ of the fields not _all_ of the + * fields. + */ + if (clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = requiredMatches[field].intersect(clauseMatches) + } + } + } + + /** + * Need to combine the field scoped required and prohibited + * matching documents into a global set of required and prohibited + * matches + */ + var allRequiredMatches = lunr.Set.complete, + allProhibitedMatches = lunr.Set.empty + + for (var i = 0; i < this.fields.length; i++) { + var field = this.fields[i] + + if (requiredMatches[field]) { + allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field]) + } + + if (prohibitedMatches[field]) { + allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field]) + } + } + + var matchingFieldRefs = Object.keys(matchingFields), + results = [], + matches = Object.create(null) + + /* + * If the query is negated (contains only prohibited terms) + * we need to get _all_ fieldRefs currently existing in the + * index. This is only done when we know that the query is + * entirely prohibited terms to avoid any cost of getting all + * fieldRefs unnecessarily. + * + * Additionally, blank MatchData must be created to correctly + * populate the results. + */ + if (query.isNegated()) { + matchingFieldRefs = Object.keys(this.fieldVectors) + + for (var i = 0; i < matchingFieldRefs.length; i++) { + var matchingFieldRef = matchingFieldRefs[i] + var fieldRef = lunr.FieldRef.fromString(matchingFieldRef) + matchingFields[matchingFieldRef] = new lunr.MatchData + } + } + + for (var i = 0; i < matchingFieldRefs.length; i++) { + /* + * Currently we have document fields that match the query, but we + * need to return documents. The matchData and scores are combined + * from multiple fields belonging to the same document. + * + * Scores are calculated by field, using the query vectors created + * above, and combined into a final document score using addition. + */ + var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]), + docRef = fieldRef.docRef + + if (!allRequiredMatches.contains(docRef)) { + continue + } + + if (allProhibitedMatches.contains(docRef)) { + continue + } + + var fieldVector = this.fieldVectors[fieldRef], + score = queryVectors[fieldRef.fieldName].similarity(fieldVector), + docMatch + + if ((docMatch = matches[docRef]) !== undefined) { + docMatch.score += score + docMatch.matchData.combine(matchingFields[fieldRef]) + } else { + var match = { + ref: docRef, + score: score, + matchData: matchingFields[fieldRef] + } + matches[docRef] = match + results.push(match) + } + } + + /* + * Sort the results objects by score, highest first. + */ + return results.sort(function (a, b) { + return b.score - a.score + }) +} + +/** + * Prepares the index for JSON serialization. + * + * The schema for this JSON blob will be described in a + * separate JSON schema file. + * + * @returns {Object} + */ +lunr.Index.prototype.toJSON = function () { + var invertedIndex = Object.keys(this.invertedIndex) + .sort() + .map(function (term) { + return [term, this.invertedIndex[term]] + }, this) + + var fieldVectors = Object.keys(this.fieldVectors) + .map(function (ref) { + return [ref, this.fieldVectors[ref].toJSON()] + }, this) + + return { + version: lunr.version, + fields: this.fields, + fieldVectors: fieldVectors, + invertedIndex: invertedIndex, + pipeline: this.pipeline.toJSON() + } +} + +/** + * Loads a previously serialized lunr.Index + * + * @param {Object} serializedIndex - A previously serialized lunr.Index + * @returns {lunr.Index} + */ +lunr.Index.load = function (serializedIndex) { + var attrs = {}, + fieldVectors = {}, + serializedVectors = serializedIndex.fieldVectors, + invertedIndex = Object.create(null), + serializedInvertedIndex = serializedIndex.invertedIndex, + tokenSetBuilder = new lunr.TokenSet.Builder, + pipeline = lunr.Pipeline.load(serializedIndex.pipeline) + + if (serializedIndex.version != lunr.version) { + lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'") + } + + for (var i = 0; i < serializedVectors.length; i++) { + var tuple = serializedVectors[i], + ref = tuple[0], + elements = tuple[1] + + fieldVectors[ref] = new lunr.Vector(elements) + } + + for (var i = 0; i < serializedInvertedIndex.length; i++) { + var tuple = serializedInvertedIndex[i], + term = tuple[0], + posting = tuple[1] + + tokenSetBuilder.insert(term) + invertedIndex[term] = posting + } + + tokenSetBuilder.finish() + + attrs.fields = serializedIndex.fields + + attrs.fieldVectors = fieldVectors + attrs.invertedIndex = invertedIndex + attrs.tokenSet = tokenSetBuilder.root + attrs.pipeline = pipeline + + return new lunr.Index(attrs) +} +/*! + * lunr.Builder + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.Builder performs indexing on a set of documents and + * returns instances of lunr.Index ready for querying. + * + * All configuration of the index is done via the builder, the + * fields to index, the document reference, the text processing + * pipeline and document scoring parameters are all set on the + * builder before indexing. + * + * @constructor + * @property {string} _ref - Internal reference to the document reference field. + * @property {string[]} _fields - Internal reference to the document fields to index. + * @property {object} invertedIndex - The inverted index maps terms to document fields. + * @property {object} documentTermFrequencies - Keeps track of document term frequencies. + * @property {object} documentLengths - Keeps track of the length of documents added to the index. + * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing. + * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing. + * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index. + * @property {number} documentCount - Keeps track of the total number of documents indexed. + * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75. + * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2. + * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space. + * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index. + */ +lunr.Builder = function () { + this._ref = "id" + this._fields = Object.create(null) + this._documents = Object.create(null) + this.invertedIndex = Object.create(null) + this.fieldTermFrequencies = {} + this.fieldLengths = {} + this.tokenizer = lunr.tokenizer + this.pipeline = new lunr.Pipeline + this.searchPipeline = new lunr.Pipeline + this.documentCount = 0 + this._b = 0.75 + this._k1 = 1.2 + this.termIndex = 0 + this.metadataWhitelist = [] +} + +/** + * Sets the document field used as the document reference. Every document must have this field. + * The type of this field in the document should be a string, if it is not a string it will be + * coerced into a string by calling toString. + * + * The default ref is 'id'. + * + * The ref should _not_ be changed during indexing, it should be set before any documents are + * added to the index. Changing it during indexing can lead to inconsistent results. + * + * @param {string} ref - The name of the reference field in the document. + */ +lunr.Builder.prototype.ref = function (ref) { + this._ref = ref +} + +/** + * A function that is used to extract a field from a document. + * + * Lunr expects a field to be at the top level of a document, if however the field + * is deeply nested within a document an extractor function can be used to extract + * the right field for indexing. + * + * @callback fieldExtractor + * @param {object} doc - The document being added to the index. + * @returns {?(string|object|object[])} obj - The object that will be indexed for this field. + * @example Extracting a nested field + * function (doc) { return doc.nested.field } + */ + +/** + * Adds a field to the list of document fields that will be indexed. Every document being + * indexed should have this field. Null values for this field in indexed documents will + * not cause errors but will limit the chance of that document being retrieved by searches. + * + * All fields should be added before adding documents to the index. Adding fields after + * a document has been indexed will have no effect on already indexed documents. + * + * Fields can be boosted at build time. This allows terms within that field to have more + * importance when ranking search results. Use a field boost to specify that matches within + * one field are more important than other fields. + * + * @param {string} fieldName - The name of a field to index in all documents. + * @param {object} attributes - Optional attributes associated with this field. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this field. + * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document. + * @throws {RangeError} fieldName cannot contain unsupported characters '/' + */ +lunr.Builder.prototype.field = function (fieldName, attributes) { + if (/\//.test(fieldName)) { + throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'") + } + + this._fields[fieldName] = attributes || {} +} + +/** + * A parameter to tune the amount of field length normalisation that is applied when + * calculating relevance scores. A value of 0 will completely disable any normalisation + * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b + * will be clamped to the range 0 - 1. + * + * @param {number} number - The value to set for this tuning parameter. + */ +lunr.Builder.prototype.b = function (number) { + if (number < 0) { + this._b = 0 + } else if (number > 1) { + this._b = 1 + } else { + this._b = number + } +} + +/** + * A parameter that controls the speed at which a rise in term frequency results in term + * frequency saturation. The default value is 1.2. Setting this to a higher value will give + * slower saturation levels, a lower value will result in quicker saturation. + * + * @param {number} number - The value to set for this tuning parameter. + */ +lunr.Builder.prototype.k1 = function (number) { + this._k1 = number +} + +/** + * Adds a document to the index. + * + * Before adding fields to the index the index should have been fully setup, with the document + * ref and all fields to index already having been specified. + * + * The document must have a field name as specified by the ref (by default this is 'id') and + * it should have all fields defined for indexing, though null or undefined values will not + * cause errors. + * + * Entire documents can be boosted at build time. Applying a boost to a document indicates that + * this document should rank higher in search results than other documents. + * + * @param {object} doc - The document to add to the index. + * @param {object} attributes - Optional attributes associated with this document. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this document. + */ +lunr.Builder.prototype.add = function (doc, attributes) { + var docRef = doc[this._ref], + fields = Object.keys(this._fields) + + this._documents[docRef] = attributes || {} + this.documentCount += 1 + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i], + extractor = this._fields[fieldName].extractor, + field = extractor ? extractor(doc) : doc[fieldName], + tokens = this.tokenizer(field, { + fields: [fieldName] + }), + terms = this.pipeline.run(tokens), + fieldRef = new lunr.FieldRef (docRef, fieldName), + fieldTerms = Object.create(null) + + this.fieldTermFrequencies[fieldRef] = fieldTerms + this.fieldLengths[fieldRef] = 0 + + // store the length of this field for this document + this.fieldLengths[fieldRef] += terms.length + + // calculate term frequencies for this field + for (var j = 0; j < terms.length; j++) { + var term = terms[j] + + if (fieldTerms[term] == undefined) { + fieldTerms[term] = 0 + } + + fieldTerms[term] += 1 + + // add to inverted index + // create an initial posting if one doesn't exist + if (this.invertedIndex[term] == undefined) { + var posting = Object.create(null) + posting["_index"] = this.termIndex + this.termIndex += 1 + + for (var k = 0; k < fields.length; k++) { + posting[fields[k]] = Object.create(null) + } + + this.invertedIndex[term] = posting + } + + // add an entry for this term/fieldName/docRef to the invertedIndex + if (this.invertedIndex[term][fieldName][docRef] == undefined) { + this.invertedIndex[term][fieldName][docRef] = Object.create(null) + } + + // store all whitelisted metadata about this token in the + // inverted index + for (var l = 0; l < this.metadataWhitelist.length; l++) { + var metadataKey = this.metadataWhitelist[l], + metadata = term.metadata[metadataKey] + + if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) { + this.invertedIndex[term][fieldName][docRef][metadataKey] = [] + } + + this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata) + } + } + + } +} + +/** + * Calculates the average document length for this index + * + * @private + */ +lunr.Builder.prototype.calculateAverageFieldLengths = function () { + + var fieldRefs = Object.keys(this.fieldLengths), + numberOfFields = fieldRefs.length, + accumulator = {}, + documentsWithField = {} + + for (var i = 0; i < numberOfFields; i++) { + var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), + field = fieldRef.fieldName + + documentsWithField[field] || (documentsWithField[field] = 0) + documentsWithField[field] += 1 + + accumulator[field] || (accumulator[field] = 0) + accumulator[field] += this.fieldLengths[fieldRef] + } + + var fields = Object.keys(this._fields) + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i] + accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName] + } + + this.averageFieldLength = accumulator +} + +/** + * Builds a vector space model of every document using lunr.Vector + * + * @private + */ +lunr.Builder.prototype.createFieldVectors = function () { + var fieldVectors = {}, + fieldRefs = Object.keys(this.fieldTermFrequencies), + fieldRefsLength = fieldRefs.length, + termIdfCache = Object.create(null) + + for (var i = 0; i < fieldRefsLength; i++) { + var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), + fieldName = fieldRef.fieldName, + fieldLength = this.fieldLengths[fieldRef], + fieldVector = new lunr.Vector, + termFrequencies = this.fieldTermFrequencies[fieldRef], + terms = Object.keys(termFrequencies), + termsLength = terms.length + + + var fieldBoost = this._fields[fieldName].boost || 1, + docBoost = this._documents[fieldRef.docRef].boost || 1 + + for (var j = 0; j < termsLength; j++) { + var term = terms[j], + tf = termFrequencies[term], + termIndex = this.invertedIndex[term]._index, + idf, score, scoreWithPrecision + + if (termIdfCache[term] === undefined) { + idf = lunr.idf(this.invertedIndex[term], this.documentCount) + termIdfCache[term] = idf + } else { + idf = termIdfCache[term] + } + + score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf) + score *= fieldBoost + score *= docBoost + scoreWithPrecision = Math.round(score * 1000) / 1000 + // Converts 1.23456789 to 1.234. + // Reducing the precision so that the vectors take up less + // space when serialised. Doing it now so that they behave + // the same before and after serialisation. Also, this is + // the fastest approach to reducing a number's precision in + // JavaScript. + + fieldVector.insert(termIndex, scoreWithPrecision) + } + + fieldVectors[fieldRef] = fieldVector + } + + this.fieldVectors = fieldVectors +} + +/** + * Creates a token set of all tokens in the index using lunr.TokenSet + * + * @private + */ +lunr.Builder.prototype.createTokenSet = function () { + this.tokenSet = lunr.TokenSet.fromArray( + Object.keys(this.invertedIndex).sort() + ) +} + +/** + * Builds the index, creating an instance of lunr.Index. + * + * This completes the indexing process and should only be called + * once all documents have been added to the index. + * + * @returns {lunr.Index} + */ +lunr.Builder.prototype.build = function () { + this.calculateAverageFieldLengths() + this.createFieldVectors() + this.createTokenSet() + + return new lunr.Index({ + invertedIndex: this.invertedIndex, + fieldVectors: this.fieldVectors, + tokenSet: this.tokenSet, + fields: Object.keys(this._fields), + pipeline: this.searchPipeline + }) +} + +/** + * Applies a plugin to the index builder. + * + * A plugin is a function that is called with the index builder as its context. + * Plugins can be used to customise or extend the behaviour of the index + * in some way. A plugin is just a function, that encapsulated the custom + * behaviour that should be applied when building the index. + * + * The plugin function will be called with the index builder as its argument, additional + * arguments can also be passed when calling use. The function will be called + * with the index builder as its context. + * + * @param {Function} plugin The plugin to apply. + */ +lunr.Builder.prototype.use = function (fn) { + var args = Array.prototype.slice.call(arguments, 1) + args.unshift(this) + fn.apply(this, args) +} +/** + * Contains and collects metadata about a matching document. + * A single instance of lunr.MatchData is returned as part of every + * lunr.Index~Result. + * + * @constructor + * @param {string} term - The term this match data is associated with + * @param {string} field - The field in which the term was found + * @param {object} metadata - The metadata recorded about this term in this field + * @property {object} metadata - A cloned collection of metadata associated with this document. + * @see {@link lunr.Index~Result} + */ +lunr.MatchData = function (term, field, metadata) { + var clonedMetadata = Object.create(null), + metadataKeys = Object.keys(metadata || {}) + + // Cloning the metadata to prevent the original + // being mutated during match data combination. + // Metadata is kept in an array within the inverted + // index so cloning the data can be done with + // Array#slice + for (var i = 0; i < metadataKeys.length; i++) { + var key = metadataKeys[i] + clonedMetadata[key] = metadata[key].slice() + } + + this.metadata = Object.create(null) + + if (term !== undefined) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = clonedMetadata + } +} + +/** + * An instance of lunr.MatchData will be created for every term that matches a + * document. However only one instance is required in a lunr.Index~Result. This + * method combines metadata from another instance of lunr.MatchData with this + * objects metadata. + * + * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one. + * @see {@link lunr.Index~Result} + */ +lunr.MatchData.prototype.combine = function (otherMatchData) { + var terms = Object.keys(otherMatchData.metadata) + + for (var i = 0; i < terms.length; i++) { + var term = terms[i], + fields = Object.keys(otherMatchData.metadata[term]) + + if (this.metadata[term] == undefined) { + this.metadata[term] = Object.create(null) + } + + for (var j = 0; j < fields.length; j++) { + var field = fields[j], + keys = Object.keys(otherMatchData.metadata[term][field]) + + if (this.metadata[term][field] == undefined) { + this.metadata[term][field] = Object.create(null) + } + + for (var k = 0; k < keys.length; k++) { + var key = keys[k] + + if (this.metadata[term][field][key] == undefined) { + this.metadata[term][field][key] = otherMatchData.metadata[term][field][key] + } else { + this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key]) + } + + } + } + } +} + +/** + * Add metadata for a term/field pair to this instance of match data. + * + * @param {string} term - The term this match data is associated with + * @param {string} field - The field in which the term was found + * @param {object} metadata - The metadata recorded about this term in this field + */ +lunr.MatchData.prototype.add = function (term, field, metadata) { + if (!(term in this.metadata)) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = metadata + return + } + + if (!(field in this.metadata[term])) { + this.metadata[term][field] = metadata + return + } + + var metadataKeys = Object.keys(metadata) + + for (var i = 0; i < metadataKeys.length; i++) { + var key = metadataKeys[i] + + if (key in this.metadata[term][field]) { + this.metadata[term][field][key] = this.metadata[term][field][key].concat(metadata[key]) + } else { + this.metadata[term][field][key] = metadata[key] + } + } +} +/** + * A lunr.Query provides a programmatic way of defining queries to be performed + * against a {@link lunr.Index}. + * + * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method + * so the query object is pre-initialized with the right index fields. + * + * @constructor + * @property {lunr.Query~Clause[]} clauses - An array of query clauses. + * @property {string[]} allFields - An array of all available fields in a lunr.Index. + */ +lunr.Query = function (allFields) { + this.clauses = [] + this.allFields = allFields +} + +/** + * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause. + * + * This allows wildcards to be added to the beginning and end of a term without having to manually do any string + * concatenation. + * + * The wildcard constants can be bitwise combined to select both leading and trailing wildcards. + * + * @constant + * @default + * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour + * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists + * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with trailing wildcard + * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING }) + * @example query term with leading and trailing wildcard + * query.term('foo', { + * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING + * }) + */ + +lunr.Query.wildcard = new String ("*") +lunr.Query.wildcard.NONE = 0 +lunr.Query.wildcard.LEADING = 1 +lunr.Query.wildcard.TRAILING = 2 + +/** + * Constants for indicating what kind of presence a term must have in matching documents. + * + * @constant + * @enum {number} + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with required presence + * query.term('foo', { presence: lunr.Query.presence.REQUIRED }) + */ +lunr.Query.presence = { + /** + * Term's presence in a document is optional, this is the default value. + */ + OPTIONAL: 1, + + /** + * Term's presence in a document is required, documents that do not contain + * this term will not be returned. + */ + REQUIRED: 2, + + /** + * Term's presence in a document is prohibited, documents that do contain + * this term will not be returned. + */ + PROHIBITED: 3 +} + +/** + * A single clause in a {@link lunr.Query} contains a term and details on how to + * match that term against a {@link lunr.Index}. + * + * @typedef {Object} lunr.Query~Clause + * @property {string[]} fields - The fields in an index this clause should be matched against. + * @property {number} [boost=1] - Any boost that should be applied when matching this clause. + * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be. + * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline. + * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended. + * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents. + */ + +/** + * Adds a {@link lunr.Query~Clause} to this query. + * + * Unless the clause contains the fields to be matched all fields will be matched. In addition + * a default boost of 1 is applied to the clause. + * + * @param {lunr.Query~Clause} clause - The clause to add to this query. + * @see lunr.Query~Clause + * @returns {lunr.Query} + */ +lunr.Query.prototype.clause = function (clause) { + if (!('fields' in clause)) { + clause.fields = this.allFields + } + + if (!('boost' in clause)) { + clause.boost = 1 + } + + if (!('usePipeline' in clause)) { + clause.usePipeline = true + } + + if (!('wildcard' in clause)) { + clause.wildcard = lunr.Query.wildcard.NONE + } + + if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) { + clause.term = "*" + clause.term + } + + if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) { + clause.term = "" + clause.term + "*" + } + + if (!('presence' in clause)) { + clause.presence = lunr.Query.presence.OPTIONAL + } + + this.clauses.push(clause) + + return this +} + +/** + * A negated query is one in which every clause has a presence of + * prohibited. These queries require some special processing to return + * the expected results. + * + * @returns boolean + */ +lunr.Query.prototype.isNegated = function () { + for (var i = 0; i < this.clauses.length; i++) { + if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) { + return false + } + } + + return true +} + +/** + * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause} + * to the list of clauses that make up this query. + * + * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion + * to a token or token-like string should be done before calling this method. + * + * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an + * array, each term in the array will share the same options. + * + * @param {object|object[]} term - The term(s) to add to the query. + * @param {object} [options] - Any additional properties to add to the query clause. + * @returns {lunr.Query} + * @see lunr.Query#clause + * @see lunr.Query~Clause + * @example adding a single term to a query + * query.term("foo") + * @example adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard + * query.term("foo", { + * fields: ["title"], + * boost: 10, + * wildcard: lunr.Query.wildcard.TRAILING + * }) + * @example using lunr.tokenizer to convert a string to tokens before using them as terms + * query.term(lunr.tokenizer("foo bar")) + */ +lunr.Query.prototype.term = function (term, options) { + if (Array.isArray(term)) { + term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this) + return this + } + + var clause = options || {} + clause.term = term.toString() + + this.clause(clause) + + return this +} +lunr.QueryParseError = function (message, start, end) { + this.name = "QueryParseError" + this.message = message + this.start = start + this.end = end +} + +lunr.QueryParseError.prototype = new Error +lunr.QueryLexer = function (str) { + this.lexemes = [] + this.str = str + this.length = str.length + this.pos = 0 + this.start = 0 + this.escapeCharPositions = [] +} + +lunr.QueryLexer.prototype.run = function () { + var state = lunr.QueryLexer.lexText + + while (state) { + state = state(this) + } +} + +lunr.QueryLexer.prototype.sliceString = function () { + var subSlices = [], + sliceStart = this.start, + sliceEnd = this.pos + + for (var i = 0; i < this.escapeCharPositions.length; i++) { + sliceEnd = this.escapeCharPositions[i] + subSlices.push(this.str.slice(sliceStart, sliceEnd)) + sliceStart = sliceEnd + 1 + } + + subSlices.push(this.str.slice(sliceStart, this.pos)) + this.escapeCharPositions.length = 0 + + return subSlices.join('') +} + +lunr.QueryLexer.prototype.emit = function (type) { + this.lexemes.push({ + type: type, + str: this.sliceString(), + start: this.start, + end: this.pos + }) + + this.start = this.pos +} + +lunr.QueryLexer.prototype.escapeCharacter = function () { + this.escapeCharPositions.push(this.pos - 1) + this.pos += 1 +} + +lunr.QueryLexer.prototype.next = function () { + if (this.pos >= this.length) { + return lunr.QueryLexer.EOS + } + + var char = this.str.charAt(this.pos) + this.pos += 1 + return char +} + +lunr.QueryLexer.prototype.width = function () { + return this.pos - this.start +} + +lunr.QueryLexer.prototype.ignore = function () { + if (this.start == this.pos) { + this.pos += 1 + } + + this.start = this.pos +} + +lunr.QueryLexer.prototype.backup = function () { + this.pos -= 1 +} + +lunr.QueryLexer.prototype.acceptDigitRun = function () { + var char, charCode + + do { + char = this.next() + charCode = char.charCodeAt(0) + } while (charCode > 47 && charCode < 58) + + if (char != lunr.QueryLexer.EOS) { + this.backup() + } +} + +lunr.QueryLexer.prototype.more = function () { + return this.pos < this.length +} + +lunr.QueryLexer.EOS = 'EOS' +lunr.QueryLexer.FIELD = 'FIELD' +lunr.QueryLexer.TERM = 'TERM' +lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE' +lunr.QueryLexer.BOOST = 'BOOST' +lunr.QueryLexer.PRESENCE = 'PRESENCE' + +lunr.QueryLexer.lexField = function (lexer) { + lexer.backup() + lexer.emit(lunr.QueryLexer.FIELD) + lexer.ignore() + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexTerm = function (lexer) { + if (lexer.width() > 1) { + lexer.backup() + lexer.emit(lunr.QueryLexer.TERM) + } + + lexer.ignore() + + if (lexer.more()) { + return lunr.QueryLexer.lexText + } +} + +lunr.QueryLexer.lexEditDistance = function (lexer) { + lexer.ignore() + lexer.acceptDigitRun() + lexer.emit(lunr.QueryLexer.EDIT_DISTANCE) + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexBoost = function (lexer) { + lexer.ignore() + lexer.acceptDigitRun() + lexer.emit(lunr.QueryLexer.BOOST) + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexEOS = function (lexer) { + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } +} + +// This matches the separator used when tokenising fields +// within a document. These should match otherwise it is +// not possible to search for some tokens within a document. +// +// It is possible for the user to change the separator on the +// tokenizer so it _might_ clash with any other of the special +// characters already used within the search string, e.g. :. +// +// This means that it is possible to change the separator in +// such a way that makes some words unsearchable using a search +// string. +lunr.QueryLexer.termSeparator = lunr.tokenizer.separator + +lunr.QueryLexer.lexText = function (lexer) { + while (true) { + var char = lexer.next() + + if (char == lunr.QueryLexer.EOS) { + return lunr.QueryLexer.lexEOS + } + + // Escape character is '\' + if (char.charCodeAt(0) == 92) { + lexer.escapeCharacter() + continue + } + + if (char == ":") { + return lunr.QueryLexer.lexField + } + + if (char == "~") { + lexer.backup() + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } + return lunr.QueryLexer.lexEditDistance + } + + if (char == "^") { + lexer.backup() + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } + return lunr.QueryLexer.lexBoost + } + + // "+" indicates term presence is required + // checking for length to ensure that only + // leading "+" are considered + if (char == "+" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + // "-" indicates term presence is prohibited + // checking for length to ensure that only + // leading "-" are considered + if (char == "-" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + if (char.match(lunr.QueryLexer.termSeparator)) { + return lunr.QueryLexer.lexTerm + } + } +} + +lunr.QueryParser = function (str, query) { + this.lexer = new lunr.QueryLexer (str) + this.query = query + this.currentClause = {} + this.lexemeIdx = 0 +} + +lunr.QueryParser.prototype.parse = function () { + this.lexer.run() + this.lexemes = this.lexer.lexemes + + var state = lunr.QueryParser.parseClause + + while (state) { + state = state(this) + } + + return this.query +} + +lunr.QueryParser.prototype.peekLexeme = function () { + return this.lexemes[this.lexemeIdx] +} + +lunr.QueryParser.prototype.consumeLexeme = function () { + var lexeme = this.peekLexeme() + this.lexemeIdx += 1 + return lexeme +} + +lunr.QueryParser.prototype.nextClause = function () { + var completedClause = this.currentClause + this.query.clause(completedClause) + this.currentClause = {} +} + +lunr.QueryParser.parseClause = function (parser) { + var lexeme = parser.peekLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.type) { + case lunr.QueryLexer.PRESENCE: + return lunr.QueryParser.parsePresence + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expected either a field or a term, found " + lexeme.type + + if (lexeme.str.length >= 1) { + errorMessage += " with value '" + lexeme.str + "'" + } + + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } +} + +lunr.QueryParser.parsePresence = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.str) { + case "-": + parser.currentClause.presence = lunr.Query.presence.PROHIBITED + break + case "+": + parser.currentClause.presence = lunr.Query.presence.REQUIRED + break + default: + var errorMessage = "unrecognised presence operator'" + lexeme.str + "'" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term or field, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseField = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + if (parser.query.allFields.indexOf(lexeme.str) == -1) { + var possibleFields = parser.query.allFields.map(function (f) { return "'" + f + "'" }).join(', '), + errorMessage = "unrecognised field '" + lexeme.str + "', possible fields: " + possibleFields + + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.fields = [lexeme.str] + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseTerm = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + parser.currentClause.term = lexeme.str.toLowerCase() + + if (lexeme.str.indexOf("*") != -1) { + parser.currentClause.usePipeline = false + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseEditDistance = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + var editDistance = parseInt(lexeme.str, 10) + + if (isNaN(editDistance)) { + var errorMessage = "edit distance must be numeric" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.editDistance = editDistance + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseBoost = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + var boost = parseInt(lexeme.str, 10) + + if (isNaN(boost)) { + var errorMessage = "boost must be numeric" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.boost = boost + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + + /** + * export the module via AMD, CommonJS or as a browser global + * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js + */ + ;(function (root, factory) { + if (typeof define === 'function' && define.amd) { + // AMD. Register as an anonymous module. + define(factory) + } else if (typeof exports === 'object') { + /** + * Node. Does not work with strict CommonJS, but + * only CommonJS-like environments that support module.exports, + * like Node. + */ + module.exports = factory() + } else { + // Browser globals (root is window) + root.lunr = factory() + } + }(this, function () { + /** + * Just return a value to define the module export. + * This example returns an object, but the module + * can return a function as the exported value. + */ + return lunr + })) +})(); diff --git a/search/main.js b/search/main.js new file mode 100644 index 0000000..a5e469d --- /dev/null +++ b/search/main.js @@ -0,0 +1,109 @@ +function getSearchTermFromLocation() { + var sPageURL = window.location.search.substring(1); + var sURLVariables = sPageURL.split('&'); + for (var i = 0; i < sURLVariables.length; i++) { + var sParameterName = sURLVariables[i].split('='); + if (sParameterName[0] == 'q') { + return decodeURIComponent(sParameterName[1].replace(/\+/g, '%20')); + } + } +} + +function joinUrl (base, path) { + if (path.substring(0, 1) === "/") { + // path starts with `/`. Thus it is absolute. + return path; + } + if (base.substring(base.length-1) === "/") { + // base ends with `/` + return base + path; + } + return base + "/" + path; +} + +function escapeHtml (value) { + return value.replace(/&/g, '&') + .replace(/"/g, '"') + .replace(//g, '>'); +} + +function formatResult (location, title, summary) { + return ''; +} + +function displayResults (results) { + var search_results = document.getElementById("mkdocs-search-results"); + while (search_results.firstChild) { + search_results.removeChild(search_results.firstChild); + } + if (results.length > 0){ + for (var i=0; i < results.length; i++){ + var result = results[i]; + var html = formatResult(result.location, result.title, result.summary); + search_results.insertAdjacentHTML('beforeend', html); + } + } else { + var noResultsText = search_results.getAttribute('data-no-results-text'); + if (!noResultsText) { + noResultsText = "No results found"; + } + search_results.insertAdjacentHTML('beforeend', '

' + noResultsText + '

'); + } +} + +function doSearch () { + var query = document.getElementById('mkdocs-search-query').value; + if (query.length > min_search_length) { + if (!window.Worker) { + displayResults(search(query)); + } else { + searchWorker.postMessage({query: query}); + } + } else { + // Clear results for short queries + displayResults([]); + } +} + +function initSearch () { + var search_input = document.getElementById('mkdocs-search-query'); + if (search_input) { + search_input.addEventListener("keyup", doSearch); + } + var term = getSearchTermFromLocation(); + if (term) { + search_input.value = term; + doSearch(); + } +} + +function onWorkerMessage (e) { + if (e.data.allowSearch) { + initSearch(); + } else if (e.data.results) { + var results = e.data.results; + displayResults(results); + } else if (e.data.config) { + min_search_length = e.data.config.min_search_length-1; + } +} + +if (!window.Worker) { + console.log('Web Worker API not supported'); + // load index in main thread + $.getScript(joinUrl(base_url, "search/worker.js")).done(function () { + console.log('Loaded worker'); + init(); + window.postMessage = function (msg) { + onWorkerMessage({data: msg}); + }; + }).fail(function (jqxhr, settings, exception) { + console.error('Could not load worker.js'); + }); +} else { + // Wrap search in a web worker + var searchWorker = new Worker(joinUrl(base_url, "search/worker.js")); + searchWorker.postMessage({init: true}); + searchWorker.onmessage = onWorkerMessage; +} diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 0000000..8e61279 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"1. LightningDB is \u00b6 A distributed in-memory DBMS for real-time big data analytics Realtime ingestion and analytics for large scale data Advantages in random small data access based on DRAM/SSD resident KV Store Optimized for time series data and geospatial data 2. Architecture \u00b6 Spark with Redis/Rocksdb key-value stores No I/O bottleneck due to redis in DRAM and RocksDB in SSDs due to the small-sized key/value I/O and DRAM/SSDs\u2019 short latency (~200us) Filter predicates push down to redis and only associated partitions are chosen to be scanned 3. Features \u00b6 Ingestion performance (500,000 records/sec/node) Extreme partitioning (up-to 2 billion partitions for a single node) Real-time query performance by using fine-grained partitions and filter acceleration (vector processing by exploiting XEON SIMD instructions) Column-store / row-store support DRAM - SSD - HDD Tiering High compression ratio and compression speed (Gzip level compression ratio w/ LZ4 level speed) Low Write Amplification for SSD lifetime Asynchronous replication with low latency and high performance Node-based scale-out(Adding new nodes and scale out without data rebalancing )","title":"Overview"},{"location":"#1-lightningdb-is","text":"A distributed in-memory DBMS for real-time big data analytics Realtime ingestion and analytics for large scale data Advantages in random small data access based on DRAM/SSD resident KV Store Optimized for time series data and geospatial data","title":"1. LightningDB is"},{"location":"#2-architecture","text":"Spark with Redis/Rocksdb key-value stores No I/O bottleneck due to redis in DRAM and RocksDB in SSDs due to the small-sized key/value I/O and DRAM/SSDs\u2019 short latency (~200us) Filter predicates push down to redis and only associated partitions are chosen to be scanned","title":"2. Architecture"},{"location":"#3-features","text":"Ingestion performance (500,000 records/sec/node) Extreme partitioning (up-to 2 billion partitions for a single node) Real-time query performance by using fine-grained partitions and filter acceleration (vector processing by exploiting XEON SIMD instructions) Column-store / row-store support DRAM - SSD - HDD Tiering High compression ratio and compression speed (Gzip level compression ratio w/ LZ4 level speed) Low Write Amplification for SSD lifetime Asynchronous replication with low latency and high performance Node-based scale-out(Adding new nodes and scale out without data rebalancing )","title":"3. Features"},{"location":"awards-recognition/","text":"Awards and Recognition \u00b6 2023 \u00b6 SK TECH SUMMIT 2023 IMAGE\ub97c \uc81c\ub300\ub85c \uc774\ud574\ud558\ub294 \u2018AI\u2019\ub294 \uc5c6\ub098\uc694? (Vision-Language Model\uc744 \ud65c\uc6a9\ud55c SKT\ub9cc\uc758 Vision Data Asset \uad6c\ucd95\uae30) \u00b6 Speaker: Sungho Kim, Jiwon Ryu(SK Telecom) 2022 \u00b6 NVIDIA GTC 22 Vision data warehouse and accelerating the analytics for massive vision data \u00b6 Speaker: Sungho Kim(SK Telecom), Allen Xu(NVIDIA) 2021 \u00b6 NVIDIA GTC 21 Deep-Learning Data-Pipeline Optimization for Network Data Analysis in SK Telecom by Employing Spark Rapids for Custom Data Source \u00b6 Speaker: Dooyoung Hwan(SK Telecom), Thomas Graves(NVIDIA) 2020 \u00b6 Spark AI Summit 2020 Vectorized Deep Learning Acceleration from Preprocessing to Inference and Training on Apache Spark in SK Telecom \u00b6 Speaker: Hongchan Roh(SK Telecom) 2019 \u00b6 Spark AI Summit Europe 2019 Spark AI Usecase in Telco: Network Quality Analysis and Prediction with Geospatial Visualization \u00b6 Speaker: Hongchan Roh, Dooyoung Hwang(SK Telecom)","title":"Awards and Recognition"},{"location":"awards-recognition/#awards-and-recognition","text":"","title":"Awards and Recognition"},{"location":"awards-recognition/#2023","text":"","title":"2023"},{"location":"awards-recognition/#sk-tech-summit-2023-image-ai-vision-language-model-skt-vision-data-asset","text":"Speaker: Sungho Kim, Jiwon Ryu(SK Telecom)","title":" SK TECH SUMMIT 2023 IMAGE\ub97c \uc81c\ub300\ub85c \uc774\ud574\ud558\ub294 \u2018AI\u2019\ub294 \uc5c6\ub098\uc694? (Vision-Language Model\uc744 \ud65c\uc6a9\ud55c SKT\ub9cc\uc758 Vision Data Asset \uad6c\ucd95\uae30)"},{"location":"awards-recognition/#2022","text":"","title":"2022"},{"location":"awards-recognition/#nvidia-gtc-22-vision-data-warehouse-and-accelerating-the-analytics-for-massive-vision-data","text":"Speaker: Sungho Kim(SK Telecom), Allen Xu(NVIDIA)","title":" NVIDIA GTC 22 Vision data warehouse and accelerating the analytics for massive vision data"},{"location":"awards-recognition/#2021","text":"","title":"2021"},{"location":"awards-recognition/#nvidia-gtc-21-deep-learning-data-pipeline-optimization-for-network-data-analysis-in-sk-telecom-by-employing-spark-rapids-for-custom-data-source","text":"Speaker: Dooyoung Hwan(SK Telecom), Thomas Graves(NVIDIA)","title":" NVIDIA GTC 21 Deep-Learning Data-Pipeline Optimization for Network Data Analysis in SK Telecom by Employing Spark Rapids for Custom Data Source"},{"location":"awards-recognition/#2020","text":"","title":"2020"},{"location":"awards-recognition/#spark-ai-summit-2020-vectorized-deep-learning-acceleration-from-preprocessing-to-inference-and-training-on-apache-spark-in-sk-telecom","text":"Speaker: Hongchan Roh(SK Telecom)","title":" Spark AI Summit 2020 Vectorized Deep Learning Acceleration from Preprocessing to Inference and Training on Apache Spark in SK Telecom"},{"location":"awards-recognition/#2019","text":"","title":"2019"},{"location":"awards-recognition/#spark-ai-summit-europe-2019-spark-ai-usecase-in-telco-network-quality-analysis-and-prediction-with-geospatial-visualization","text":"Speaker: Hongchan Roh, Dooyoung Hwang(SK Telecom)","title":" Spark AI Summit Europe 2019 Spark AI Usecase in Telco: Network Quality Analysis and Prediction\u00a0with Geospatial Visualization"},{"location":"build-lightningdb-on-k8s/","text":"Build 'LightningDB' (Admin Only) \u00b6 1. LightningDB Source Code(Private Repository) \u00b6 $ git clone https://github.com/mnms/LightningDB 2. Build \u00b6 - v1 \u00b6 Branch: release/flashbase_v1.4.3 Commands: $ ./build.sh compile $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3 $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3 - v2 \u00b6 Branch: release/flashbase_v2.0.0 Commands: $ ./build.sh compile debug $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0 $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0 - v2 CXL-CMS \u00b6 Branch: cms-integration Prerequisite(install daxctl): $ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch // json-c (version: json-c-0.14-20200419) $ git clone https://github.com/json-c/json-c.git $ cd json-c $ git checkout json-c-0.14-20200419 -b json-c-0.14-20200419 $ mkdir json-c-build $ cd json-c-build/ $ cmake ../ $ make -j48 //Required Min Version: v75 (latest version: v78) $ git clone https://github.com/pmem/ndctl $ git checkout v75 -b v75 $ meson setup build; $ meson compile -C build; $ meson install -C build; Commands: $ ./build.sh compile debug // dax-ctl \uc744 \uc774\ubbf8\uc9c0 base \uacbd\ub85c\uc5d0 \uc124\uce58 \ud544\uc694 // \ucef4\ud30c\uc77c \uc791\uc5c5 \ub514\ub809\ud1a0\ub9ac\uac00 \"/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output\"\ub77c \uac00\uc815 // ndctl github \ucef4\ud30c\uc77c \ub514\ub809\ud1a0\ub9ac\ub85c \uc774\ub3d9 $ cd ndctl $ rm -rf build $ meson -Drootprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output -Dlibdir=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output/lib build -Dprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output $ meson compile -C build; $ meson install -C build; $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration Tip How to use maximum cores to compile (e.g. max cpu core:56) In 'build.sh', use cmake --build . --target install -- -j56 and mvn clean install -DskipTests -P $RELEASE_MODE $MAVEN_OPTS -T 56 Build 'ltdb-http API Server' (Admin Only) \u00b6 1. ltdb-http Source Code(Private Repository) \u00b6 $ git clone https://github.com/mnms/ltdb-http 2. Build \u00b6 - v1 \u00b6 Branch: develop Commands: $ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision $ cd target-k8s $ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz $ cd ltdb-http $ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop $ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop - v2 / v2 CXL-CMS \u00b6 Branch: develop-v2 Commands: $ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision $ cd target-k8s $ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz $ cd ltdb-http $ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2 $ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2 Build 'Thunderquery API Server' (Admin Only) \u00b6 1. Thunderquery Source Code(Private Repository) \u00b6 $ git clone https://github.com/mnms/thunderquery_api $ git clone https://github.com/mnms/thunderquery-cli 2. Build \u00b6 Branch: develop Prerequisite(install musl-gcc): $ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch $ vi /etc/yum.repos.d/cert-forensics-tools.repo [cert-forensics-tools] name=Cert Forensics Tools Repository baseurl=https://forensics.cert.org/centos/cert/8/x86_64/ enabled=1 gpgcheck=1 gpgkey=https://forensics.cert.org/forensics.asc $ yum clean all $ yum makecache $ yum install musl-gcc.x86_64 Register public key to github $ cat ~/.ssh/id_rsa.pub Command: $ vi ~/.cargo/config.toml [net] git-fetch-with-cli = true $ cd thunderquery_api $ cargo install --path . --target=x86_64-unknown-linux-musl $ cd thunderquery-cli $ cargo install --path . --target=x86_64-unknown-linux-musl $ cd thunderquery_api ## thunderquery-cli binary \ub97c api \ub514\ub809\ud1a0\ub9ac\ub85c \ubcf5\uc0ac ## $ cp ../thunderquery-cli/target/x86_64-unknown-linux-musl/release/thunderquery-cli target/x86_64-unknown-linux-musl/release $ docker build . -t harbor.k8s.lightningdb/ltdb/thunderquery_api:develop $ docker push harbor.k8s.lightningdb/ltdb/thunderquery_api:develop","title":"Build LightningDB(Admin Only)"},{"location":"build-lightningdb-on-k8s/#build-lightningdb-admin-only","text":"","title":"Build 'LightningDB' (Admin Only)"},{"location":"build-lightningdb-on-k8s/#1-lightningdb-source-codeprivate-repository","text":"$ git clone https://github.com/mnms/LightningDB","title":"1. LightningDB Source Code(Private Repository)"},{"location":"build-lightningdb-on-k8s/#2-build","text":"","title":"2. Build"},{"location":"build-lightningdb-on-k8s/#-v1","text":"Branch: release/flashbase_v1.4.3 Commands: $ ./build.sh compile $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3 $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v1.4.3","title":"- v1"},{"location":"build-lightningdb-on-k8s/#-v2","text":"Branch: release/flashbase_v2.0.0 Commands: $ ./build.sh compile debug $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0 $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2.0.0","title":"- v2"},{"location":"build-lightningdb-on-k8s/#-v2-cxl-cms","text":"Branch: cms-integration Prerequisite(install daxctl): $ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch // json-c (version: json-c-0.14-20200419) $ git clone https://github.com/json-c/json-c.git $ cd json-c $ git checkout json-c-0.14-20200419 -b json-c-0.14-20200419 $ mkdir json-c-build $ cd json-c-build/ $ cmake ../ $ make -j48 //Required Min Version: v75 (latest version: v78) $ git clone https://github.com/pmem/ndctl $ git checkout v75 -b v75 $ meson setup build; $ meson compile -C build; $ meson install -C build; Commands: $ ./build.sh compile debug // dax-ctl \uc744 \uc774\ubbf8\uc9c0 base \uacbd\ub85c\uc5d0 \uc124\uce58 \ud544\uc694 // \ucef4\ud30c\uc77c \uc791\uc5c5 \ub514\ub809\ud1a0\ub9ac\uac00 \"/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output\"\ub77c \uac00\uc815 // ndctl github \ucef4\ud30c\uc77c \ub514\ub809\ud1a0\ub9ac\ub85c \uc774\ub3d9 $ cd ndctl $ rm -rf build $ meson -Drootprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output -Dlibdir=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output/lib build -Dprefix=/root/ldb/LightningDB_v2_cxl/nvkvs/debug/output $ meson compile -C build; $ meson install -C build; $ cd nvkvs $ docker build . -t harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration $ docker push harbor.k8s.lightningdb/ltdb/nvkvs:v2-cms-integration Tip How to use maximum cores to compile (e.g. max cpu core:56) In 'build.sh', use cmake --build . --target install -- -j56 and mvn clean install -DskipTests -P $RELEASE_MODE $MAVEN_OPTS -T 56","title":"- v2 CXL-CMS"},{"location":"build-lightningdb-on-k8s/#build-ltdb-http-api-server-admin-only","text":"","title":"Build 'ltdb-http API Server' (Admin Only)"},{"location":"build-lightningdb-on-k8s/#1-ltdb-http-source-codeprivate-repository","text":"$ git clone https://github.com/mnms/ltdb-http","title":"1. ltdb-http Source Code(Private Repository)"},{"location":"build-lightningdb-on-k8s/#2-build_1","text":"","title":"2. Build"},{"location":"build-lightningdb-on-k8s/#-v1_1","text":"Branch: develop Commands: $ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision $ cd target-k8s $ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz $ cd ltdb-http $ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop $ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop","title":"- v1"},{"location":"build-lightningdb-on-k8s/#-v2-v2-cxl-cms","text":"Branch: develop-v2 Commands: $ mvn clean package -DskipTests -P release-k8s,dist-k8s,tgz -Dsite-spec=k8s -Dk8s.namespace=metavision $ cd target-k8s $ tar xzvf ltdb-http-1.0-k8s-xxx_xxx.tar.gz $ cd ltdb-http $ docker build . -t harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2 $ docker push harbor.k8s.lightningdb/ltdb/ltdb-http:develop-v2","title":"- v2 / v2 CXL-CMS"},{"location":"build-lightningdb-on-k8s/#build-thunderquery-api-server-admin-only","text":"","title":"Build 'Thunderquery API Server' (Admin Only)"},{"location":"build-lightningdb-on-k8s/#1-thunderquery-source-codeprivate-repository","text":"$ git clone https://github.com/mnms/thunderquery_api $ git clone https://github.com/mnms/thunderquery-cli","title":"1. Thunderquery Source Code(Private Repository)"},{"location":"build-lightningdb-on-k8s/#2-build_2","text":"Branch: develop Prerequisite(install musl-gcc): $ yum install -y kmod-devel rubygem-asciidoctor.noarch iniparser-devel.x86_64 meson.noarch $ vi /etc/yum.repos.d/cert-forensics-tools.repo [cert-forensics-tools] name=Cert Forensics Tools Repository baseurl=https://forensics.cert.org/centos/cert/8/x86_64/ enabled=1 gpgcheck=1 gpgkey=https://forensics.cert.org/forensics.asc $ yum clean all $ yum makecache $ yum install musl-gcc.x86_64 Register public key to github $ cat ~/.ssh/id_rsa.pub Command: $ vi ~/.cargo/config.toml [net] git-fetch-with-cli = true $ cd thunderquery_api $ cargo install --path . --target=x86_64-unknown-linux-musl $ cd thunderquery-cli $ cargo install --path . --target=x86_64-unknown-linux-musl $ cd thunderquery_api ## thunderquery-cli binary \ub97c api \ub514\ub809\ud1a0\ub9ac\ub85c \ubcf5\uc0ac ## $ cp ../thunderquery-cli/target/x86_64-unknown-linux-musl/release/thunderquery-cli target/x86_64-unknown-linux-musl/release $ docker build . -t harbor.k8s.lightningdb/ltdb/thunderquery_api:develop $ docker push harbor.k8s.lightningdb/ltdb/thunderquery_api:develop","title":"2. Build"},{"location":"cli-cli/","text":"1. ping \u00b6 You can use ping command to check the status of the nodes. Options All nodes cli ping --all A single node cli ping {hostname} {port} Examples matthew@lightningdb:21> cli ping --all alive redis 12/12 matthew@lightningdb:21> cli ping myServer 20101 PONG 2. config \u00b6 You can read or write the configuration values of the current cluster. Options Read All nodes cli config get {feature name} --all A sing node cli config get -h {hostname} -p {port} Write All nodes cli config set {feature name} {value} --all A sing node cli config set {feature name} {value} -h {hostname} -p {port} Examples Read and write the configuration value of all nodes. matthew@lightningdb:21> cli config get maxmemory --all +--------+----------------------+--------+ | TYPE | ADDR | RESULT | +--------+----------------------+--------+ | Master | 192.168.111.41:20100 | 300mb | | Master | 192.168.111.41:20101 | 300mb | | Master | 192.168.111.41:20102 | 300mb | | Master | 192.168.111.44:20100 | 300mb | | Master | 192.168.111.44:20101 | 300mb | | Master | 192.168.111.44:20102 | 300mb | | Slave | 192.168.111.41:20150 | 300mb | | Slave | 192.168.111.41:20151 | 300mb | | Slave | 192.168.111.41:20152 | 300mb | | Slave | 192.168.111.44:20150 | 300mb | | Slave | 192.168.111.44:20151 | 300mb | | Slave | 192.168.111.44:20152 | 300mb | +--------+----------------------+--------+ matthew@lightningdb:21> cli config set maxmemory 500mb --all success 12/12 matthew@lightningdb:21> cli config get maxmemory --all +--------+----------------------+--------+ | TYPE | ADDR | RESULT | +--------+----------------------+--------+ | Master | 192.168.111.41:20100 | 500mb | | Master | 192.168.111.41:20101 | 500mb | | Master | 192.168.111.41:20102 | 500mb | | Master | 192.168.111.44:20100 | 500mb | | Master | 192.168.111.44:20101 | 500mb | | Master | 192.168.111.44:20102 | 500mb | | Slave | 192.168.111.41:20150 | 500mb | | Slave | 192.168.111.41:20151 | 500mb | | Slave | 192.168.111.41:20152 | 500mb | | Slave | 192.168.111.44:20150 | 500mb | | Slave | 192.168.111.44:20151 | 500mb | | Slave | 192.168.111.44:20152 | 500mb | +--------+----------------------+--------+ Read and write the configuration value of a single node. matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101 500mb matthew@lightningdb:21> cli config set maxmemory 300mb -h myServer -p 20101 OK matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101 300mb matthew@lightningdb:21> 3. cluster info \u00b6 You can get the information and stats of the current cluster. matthew@lightningdb:21> cli cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:12 cluster_size:6 cluster_current_epoch:14 cluster_my_epoch:6 cluster_stats_messages_ping_sent:953859 cluster_stats_messages_pong_sent:917798 cluster_stats_messages_meet_sent:10 cluster_stats_messages_sent:1871667 cluster_stats_messages_ping_received:917795 cluster_stats_messages_pong_received:951370 cluster_stats_messages_meet_received:3 cluster_stats_messages_received:1869168 4. cluster nodes \u00b6 You can get the distribution and status of each node. matthew@lightningdb:21> cli cluster nodes 4b8fe9d135670daabe19437e3b840b1c770ffa2f 192.168.111.44:20151 slave 985a2215d2acb3f1612751a13e0d7466d874cfe5 0 1604891127367 10 connected 4dd5dff5008ccd89cf18faef736fe6492eb34d05 192.168.111.41:20152 slave 9bff873f9f5f84cd3b78288524230b5cd1c6190f 0 1604891128000 8 connected 15b3c06c1edeb5d2eeb6c0f35c9f27cf616acd11 192.168.111.44:20101 myself,slave 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 0 1604891118000 13 connected 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 192.168.111.41:20100 master - 0 1604891130372 1 connected 0-2730 9bff873f9f5f84cd3b78288524230b5cd1c6190f 192.168.111.44:20102 master - 0 1604891126000 6 connected 8193-10923 60f88a9db445997112cf8947931988152767878f 192.168.111.44:20152 slave 974c0540741d89c7569b63345faa852361043e8b 0 1604891122000 11 connected 985a2215d2acb3f1612751a13e0d7466d874cfe5 192.168.111.41:20101 master - 0 1604891125365 5 connected 2731-5461 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 192.168.111.44:20100 master - 0 1604891129371 4 connected 13654-16383 974c0540741d89c7569b63345faa852361043e8b 192.168.111.41:20102 master - 0 1604891124363 2 connected 5462-8192 9c6aef212b6d68d2a0298c1902629e1fdc95f943 192.168.111.41:20150 slave 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 0 1604891128370 4 connected 474303b3b9e9f7b84b157ecf52ce11e153a28716 192.168.111.44:20150 slave 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 0 1604891126366 13 connected 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 192.168.111.41:20151 master - 0 1604891131375 14 connected 10924-13653 5. cluster slots \u00b6 You can get the slot information. matthew@lightningdb:21> cli cluster slots +-------+-------+----------------+--------+----------------+----------+ | start | end | m_ip | m_port | s_ip_0 | s_port_0 | +-------+-------+----------------+--------+----------------+----------+ | 0 | 2730 | 192.168.111.41 | 20100 | 192.168.111.44 | 20150 | | 2731 | 5461 | 192.168.111.41 | 20101 | 192.168.111.44 | 20151 | | 5462 | 8192 | 192.168.111.41 | 20102 | 192.168.111.44 | 20152 | | 8193 | 10923 | 192.168.111.44 | 20102 | 192.168.111.41 | 20152 | | 10924 | 13653 | 192.168.111.41 | 20151 | 192.168.111.44 | 20101 | | 13654 | 16383 | 192.168.111.44 | 20100 | 192.168.111.41 | 20150 | +-------+-------+----------------+--------+----------------+----------+","title":"Redis3 cli (LightningDB v1.x)"},{"location":"cli-cli/#1-ping","text":"You can use ping command to check the status of the nodes. Options All nodes cli ping --all A single node cli ping {hostname} {port} Examples matthew@lightningdb:21> cli ping --all alive redis 12/12 matthew@lightningdb:21> cli ping myServer 20101 PONG","title":"1. ping"},{"location":"cli-cli/#2-config","text":"You can read or write the configuration values of the current cluster. Options Read All nodes cli config get {feature name} --all A sing node cli config get -h {hostname} -p {port} Write All nodes cli config set {feature name} {value} --all A sing node cli config set {feature name} {value} -h {hostname} -p {port} Examples Read and write the configuration value of all nodes. matthew@lightningdb:21> cli config get maxmemory --all +--------+----------------------+--------+ | TYPE | ADDR | RESULT | +--------+----------------------+--------+ | Master | 192.168.111.41:20100 | 300mb | | Master | 192.168.111.41:20101 | 300mb | | Master | 192.168.111.41:20102 | 300mb | | Master | 192.168.111.44:20100 | 300mb | | Master | 192.168.111.44:20101 | 300mb | | Master | 192.168.111.44:20102 | 300mb | | Slave | 192.168.111.41:20150 | 300mb | | Slave | 192.168.111.41:20151 | 300mb | | Slave | 192.168.111.41:20152 | 300mb | | Slave | 192.168.111.44:20150 | 300mb | | Slave | 192.168.111.44:20151 | 300mb | | Slave | 192.168.111.44:20152 | 300mb | +--------+----------------------+--------+ matthew@lightningdb:21> cli config set maxmemory 500mb --all success 12/12 matthew@lightningdb:21> cli config get maxmemory --all +--------+----------------------+--------+ | TYPE | ADDR | RESULT | +--------+----------------------+--------+ | Master | 192.168.111.41:20100 | 500mb | | Master | 192.168.111.41:20101 | 500mb | | Master | 192.168.111.41:20102 | 500mb | | Master | 192.168.111.44:20100 | 500mb | | Master | 192.168.111.44:20101 | 500mb | | Master | 192.168.111.44:20102 | 500mb | | Slave | 192.168.111.41:20150 | 500mb | | Slave | 192.168.111.41:20151 | 500mb | | Slave | 192.168.111.41:20152 | 500mb | | Slave | 192.168.111.44:20150 | 500mb | | Slave | 192.168.111.44:20151 | 500mb | | Slave | 192.168.111.44:20152 | 500mb | +--------+----------------------+--------+ Read and write the configuration value of a single node. matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101 500mb matthew@lightningdb:21> cli config set maxmemory 300mb -h myServer -p 20101 OK matthew@lightningdb:21> cli config get maxmemory -h myServer -p 20101 300mb matthew@lightningdb:21>","title":"2. config"},{"location":"cli-cli/#3-cluster-info","text":"You can get the information and stats of the current cluster. matthew@lightningdb:21> cli cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:12 cluster_size:6 cluster_current_epoch:14 cluster_my_epoch:6 cluster_stats_messages_ping_sent:953859 cluster_stats_messages_pong_sent:917798 cluster_stats_messages_meet_sent:10 cluster_stats_messages_sent:1871667 cluster_stats_messages_ping_received:917795 cluster_stats_messages_pong_received:951370 cluster_stats_messages_meet_received:3 cluster_stats_messages_received:1869168","title":"3. cluster info"},{"location":"cli-cli/#4-cluster-nodes","text":"You can get the distribution and status of each node. matthew@lightningdb:21> cli cluster nodes 4b8fe9d135670daabe19437e3b840b1c770ffa2f 192.168.111.44:20151 slave 985a2215d2acb3f1612751a13e0d7466d874cfe5 0 1604891127367 10 connected 4dd5dff5008ccd89cf18faef736fe6492eb34d05 192.168.111.41:20152 slave 9bff873f9f5f84cd3b78288524230b5cd1c6190f 0 1604891128000 8 connected 15b3c06c1edeb5d2eeb6c0f35c9f27cf616acd11 192.168.111.44:20101 myself,slave 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 0 1604891118000 13 connected 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 192.168.111.41:20100 master - 0 1604891130372 1 connected 0-2730 9bff873f9f5f84cd3b78288524230b5cd1c6190f 192.168.111.44:20102 master - 0 1604891126000 6 connected 8193-10923 60f88a9db445997112cf8947931988152767878f 192.168.111.44:20152 slave 974c0540741d89c7569b63345faa852361043e8b 0 1604891122000 11 connected 985a2215d2acb3f1612751a13e0d7466d874cfe5 192.168.111.41:20101 master - 0 1604891125365 5 connected 2731-5461 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 192.168.111.44:20100 master - 0 1604891129371 4 connected 13654-16383 974c0540741d89c7569b63345faa852361043e8b 192.168.111.41:20102 master - 0 1604891124363 2 connected 5462-8192 9c6aef212b6d68d2a0298c1902629e1fdc95f943 192.168.111.41:20150 slave 85de73ca2aa668a79fe5636ec74e68dee6f9b36a 0 1604891128370 4 connected 474303b3b9e9f7b84b157ecf52ce11e153a28716 192.168.111.44:20150 slave 8a800fbf3518e1a0e6b332516455ef4aa6bb3be9 0 1604891126366 13 connected 4b6bc980b33dd1eecc87babfb5762eda9e7921e7 192.168.111.41:20151 master - 0 1604891131375 14 connected 10924-13653","title":"4. cluster nodes"},{"location":"cli-cli/#5-cluster-slots","text":"You can get the slot information. matthew@lightningdb:21> cli cluster slots +-------+-------+----------------+--------+----------------+----------+ | start | end | m_ip | m_port | s_ip_0 | s_port_0 | +-------+-------+----------------+--------+----------------+----------+ | 0 | 2730 | 192.168.111.41 | 20100 | 192.168.111.44 | 20150 | | 2731 | 5461 | 192.168.111.41 | 20101 | 192.168.111.44 | 20151 | | 5462 | 8192 | 192.168.111.41 | 20102 | 192.168.111.44 | 20152 | | 8193 | 10923 | 192.168.111.44 | 20102 | 192.168.111.41 | 20152 | | 10924 | 13653 | 192.168.111.41 | 20151 | 192.168.111.44 | 20101 | | 13654 | 16383 | 192.168.111.44 | 20100 | 192.168.111.41 | 20150 | +-------+-------+----------------+--------+----------------+----------+","title":"5. cluster slots"},{"location":"cli-cli2/","text":"Note By default, we support all of the features provided in LightningDB v1.x, and we only point you to the ones that have been added and changed. 1. createTable \u00b6 Command \"TABLE.META.WRITE\" \"createTable\" \"catalog name\" \"namespace name\" \"table name\" \"schema binary\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"createTable\" TABLE.META.WRITE createTable catalog.namespace.table arrow::schema summary: Create a new table since: 2.0.0 group: table.meta 127.0.0.1:7389> \"TABLE.META.WRITE\" \"createTable\" \"cat_1.test.table\" \"\\x10\\x00\\x00\\x00\\x00\\x00\\n\\x00\\x0e\\x00\\x06\\x00\\r\\x00\\b\\x00\\n\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x10\\x00\\x00\\x00\\x00\\x01\\n\\x00\\x0c\\x00\\x00\\x00\\b\\x00\\x04\\x00\\n\\x00\\x00\\x00\\b\\x00\\x00\\x00\\xc4\\x01\\x00\\x00\\t\\x00\\x00\\x00\\x80\\x01\\x00\\x00D\\x01\\x00\\x00\\x18\\x01\\x00\\x00\\xec\\x00\\x00\\x00\\xc0\\x00\\x00\\x00\\x98\\x00\\x00\\x00h\\x00\\x00\\x00@\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xac\\xfe\\xff\\xff\\b\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x0e\\x00\\x00\\x00127.0.0.1:7389\\x00\\x00\\x13\\x00\\x00\\x00properties.location\\x00\\xe4\\xfe\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00job\\x00\\x0b\\x00\\x00\\x00partition.1\\x00\\b\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x01\\x00\\x00\\x001\\x00\\x00\\x00\\x10\\x00\\x00\\x00internal.version\\x00\\x00\\x00\\x004\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00age\\x00\\x0b\\x00\\x00\\x00partition.0\\x00X\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x01\\x00\\x00\\x002\\x00\\x00\\x00\\x0e\\x00\\x00\\x00partition.size\\x00\\x00\\x80\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00512\\x00\\x0c\\x00\\x00\\x00cva.capacity\\x00\\x00\\x00\\x00\\xa8\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x02\\x00\\x00\\x0024\\x00\\x00\\x0e\\x00\\x00\\x00properties.ttl\\x00\\x00\\xd0\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x04\\x00\\x00\\x002560\\x00\\x00\\x00\\x00\\x11\\x00\\x00\\x00rowgroup.capacity\\x00\\x00\\x00\\b\\x00\\x0c\\x00\\b\\x00\\x04\\x00\\b\\x00\\x00\\x00\\b\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x0e\\x00\\x00\\x00127.0.0.1:7379\\x00\\x00\\x14\\x00\\x00\\x00properties.metastore\\x00\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x88\\x00\\x00\\x004\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x96\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x00\\x00\\x05\\x01\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x84\\xff\\xff\\xff\\x03\\x00\\x00\\x00job\\x00\\xc2\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\\x02\\x01 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\b\\x00\\x0c\\x00\\b\\x00\\a\\x00\\b\\x00\\x00\\x00\\x00\\x00\\x00\\x01 \\x00\\x00\\x00\\x03\\x00\\x00\\x00age\\x00\\x00\\x00\\x12\\x00\\x18\\x00\\x14\\x00\\x13\\x00\\x12\\x00\\x0c\\x00\\x00\\x00\\b\\x00\\x04\\x00\\x12\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x00\\x00\\x05\\x01\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x04\\x00\\x04\\x00\\x00\\x00\\x04\\x00\\x00\\x00name\\x00\\x00\\x00\\x00\" 2. truncateTable \u00b6 Command \"TABLE.META.WRITE\" \"truncateTable\" \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"truncateTable\" TABLE.META.WRITE truncateTable catalog.namespace.table summary: Truncate the table(Remove all data in the table) since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 11) \"32\\x1eProfessor\" 12) \"32\\x1eSales Manager\" 13) \"33\\x1eProfessor\" 14) \"36\\x1eProfessor\" 15) \"41\\x1eBanker\" 16) \"43\\x1eSales Manager\" 17) \"45\\x1eBanker\" 18) \"47\\x1eBanker\" 19) \"48\\x1eCEO\" 127.0.0.1:7389> TABLE.META.WRITE truncateTable \"cat_1.test.table\" \"OK\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" (empty list or set) 3. dropTable \u00b6 Command \"TABLE.META.WRITE\" \"dropTable\" \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"dropTable\" TABLE.META.WRITE dropTable catalog.namespace.table summary: Drop the table(Remove all data and the schema) since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.WRITE dropTable \"cat_1.test.table\" \"OK\" 127.0.0.1:7389> TABLE.META.READ showTables (empty list or set) 4. dropAllTables \u00b6 Command \"TABLE.META.WRITE\" \"dropAllTables\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"dropAllTables\" TABLE.META.WRITE dropAllTables - summary: Drop all tables since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.WRITE dropAllTables 1 tables are deleted. 5. setTableTtl \u00b6 Command \"TABLE.META.WRITE\" \"settablettl\" \"{catalog name}.{namespace name}.{table name}\" \"{ttl time(unit: msec)}\" Example 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"seTtableTtl\" TABLE.META.WRITE setTableTtl catalog.namespace.table ttl(msec) summary: Set the ttl of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.WRITE setTableTtl \"cat_1.test.table\" 30000 OK 6. showTables \u00b6 Command \"TABLE.META.READ\" \"showTables\" Examples 127.0.0.1:7389> help TABLE.META.READ showTables TABLE.META.READ showTables - summary: Get the list of tables with their own version since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 7. describeTable \u00b6 Command \"TABLE.META.READ\" \"describeTable\" \"table name\" Examples 127.0.0.1:7389> help TABLE.META.READ describeTables TABLE.META.READ describeTables catalog.namespace.table summary: Get all columns and partitions of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.READ describeTables \"cat_1.test.table\" 1) \"name: string\" 2) \"age: int32\" 3) \"job: string\" 4) \"[ partitions: age job ]\" 8. getTableTtl \u00b6 Command \"TABLE.META.READ\" gettablettl \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help TABLE.META.READ getTableTtl TABLE.META.READ getTableTtl catalog.namespace.table summary: Get the ttl of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.READ getTableTtl * 1) \"cat_1.test.network_table\" 2) \"86400000\" 3) \"cat_1.test.table\" 4) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.* 1) \"cat_1.test.network_table\" 2) \"86400000\" 3) \"cat_1.test.table\" 4) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl *.network_table 1) \"cat_1.test.network_table\" 2) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.test.network_table 1) \"cat_1.test.network_table\" 2) \"86400000\" 127.0.0.1:7389> 9. getPartitionTtl \u00b6 Command \"TABLE.META.READ\" getPartitionTtl \"{catalog name}.{namespace name}.{table name}\" \"partition string with regular expression\" Examples 127.0.0.1:7389> help TABLE.META.READ getPartitionTtl TABLE.META.READ getPartitionTtl partition-string summary: Get the ttl of the partition in the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"86350123\" 3) \"22\\x1eTutor\" 4) \"86350139\" 5) \"23\\x1eBanker\" 6) \"86350126\" 7) \"23\\x1eProfessor\" 8) \"86350125\" 9) \"23\\x1eSales Manager\" 10) \"86350137\" 11) \"24\\x1eStudent\" 12) \"86350121\" 13) \"26\\x1eStudent\" 14) \"86350124\" 15) \"27\\x1eSales Manager\" 16) \"86350132\" 17) \"29\\x1eBanker\" 18) \"86350124\" 19) \"29\\x1eProfessor\" 20) \"86350125\" 21) \"32\\x1eProfessor\" 22) \"86350127\" 23) \"32\\x1eSales Manager\" 24) \"86350123\" 25) \"33\\x1eProfessor\" 26) \"86350120\" 27) \"36\\x1eProfessor\" 28) \"86350134\" 29) \"40\\x1eBanker\" 30) \"86350119\" 31) \"41\\x1eBanker\" 32) \"86350120\" 33) \"43\\x1eSales Manager\" 34) \"86350133\" 35) \"45\\x1eBanker\" 36) \"86350128\" 37) \"47\\x1eBanker\" 38) \"86350124\" 39) \"48\\x1eCEO\" 40) \"86350138\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"23*\" 1) \"23\\x1eBanker\" 2) \"86343642\" 3) \"23\\x1eProfessor\" 4) \"86343641\" 5) \"23\\x1eSales Manager\" 6) \"86343653\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"*CEO\" 1) \"48\\x1eCEO\" 2) \"86336153\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"45\\x1eBanker\" 1) \"45\\x1eBanker\" 2) \"86324848\" 127.0.0.1:7389> 10. insert \u00b6 - Command - \"TABLE.DATA.WRITE\" \"Insert\" \"{catalog name}.{namespace name}.{table name}\" \"table version\" \"partition string\" \"binaries... ...\" - Examples 127.0.0.1:7389> help \"TABLE.DATA.WRITE\" \"Insert\" TABLE.DATA.WRITE insert catalog.namespace.table table-version partition-string data summary: Insert a new data(row) since: 2.0.0 group: table.data 1636425657.602951 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"40\\x1eBanker\" \"Jeannie\" \"(\\x00\\x00\\x00\" \"Banker\" 1636425657.604043 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"33\\x1eProfessor\" \"Ardith\" \"!\\x00\\x00\\x00\" \"Professor\" 1636425657.604529 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"41\\x1eBanker\" \"Elena\" \")\\x00\\x00\\x00\" \"Banker\" 1636425657.605351 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"24\\x1eStudent\" \"Corliss\" \"\\x18\\x00\\x00\\x00\" \"Student\" 1636425657.607351 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"41\\x1eBanker\" \"Kiyoko\" \")\\x00\\x00\\x00\" \"Banker\" 1636425657.608057 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"21\\x1eSales Manager\" \"Hilton\" \"\\x15\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.608455 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"32\\x1eSales Manager\" \"Becky\" \" \\x00\\x00\\x00\" \"Sales Manager\" 1636425657.609218 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"29\\x1eBanker\" \"Wendie\" \"\\x1d\\x00\\x00\\x00\" \"Banker\" 1636425657.609940 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"26\\x1eStudent\" \"Carolina\" \"\\x1a\\x00\\x00\\x00\" \"Student\" 1636425657.610284 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"47\\x1eBanker\" \"Laquita\" \"/\\x00\\x00\\x00\" \"Banker\" 1636425657.610638 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eProfessor\" \"Stephani\" \"\\x17\\x00\\x00\\x00\" \"Professor\" 1636425657.610964 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"29\\x1eProfessor\" \"Emile\" \"\\x1d\\x00\\x00\\x00\" \"Professor\" 1636425657.612257 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eBanker\" \"Cherri\" \"\\x17\\x00\\x00\\x00\" \"Banker\" 1636425657.612630 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"47\\x1eBanker\" \"Raleigh\" \"/\\x00\\x00\\x00\" \"Banker\" 1636425657.612943 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"32\\x1eProfessor\" \"Hollis\" \" \\x00\\x00\\x00\" \"Professor\" 1636425657.614136 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"45\\x1eBanker\" \"Brigette\" \"-\\x00\\x00\\x00\" \"Banker\" 1636425657.615558 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"21\\x1eSales Manager\" \"Damian\" \"\\x15\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.617321 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"27\\x1eSales Manager\" \"Star\" \"\\x1b\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.618819 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"43\\x1eSales Manager\" \"Elba\" \"+\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.619621 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"36\\x1eProfessor\" \"Lourie\" \"$\\x00\\x00\\x00\" \"Professor\" 1636425657.622977 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eSales Manager\" \"\\xea\\xb0\\x80\\xeb\\x82\\x98\\xeb\\x82\\x98\\xeb\\x82\\x98\\xea\\xb0\\x80\\xeb\\x82\\x98\\xeb\\x82\\x98\" \"\\x17\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.623555 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"48\\x1eCEO\" \"Elon\" \"0\\x00\\x00\\x00\" \"CEO\" 1636425657.624359 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"22\\x1eTutor\" \"Kijung\" \"\\x16\\x00\\x00\\x00\" \"Tutor\" 11. partitions \u00b6 a. Query with a pattern \u00b6 Commnad \"TABLE.DATA.READ\" \"partitions\" \"{catalog name}.{namespace name}.{table name}\" \"pattern(normaly '*')\" Examples 127.0.0.1:7389> help TABLE.DATA.READ partitions TABLE.DATA.READ partitions catalog.namespace.table pattern partition-filter(optional) summary: Get the list of partitions with the pattern and filter since: 2.0.0 group: table.data 127.0.0.1:7389> 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 11) \"32\\x1eProfessor\" 12) \"32\\x1eSales Manager\" 13) \"33\\x1eProfessor\" 14) \"36\\x1eProfessor\" 15) \"40\\x1eBanker\" 16) \"41\\x1eBanker\" 17) \"43\\x1eSales Manager\" 18) \"45\\x1eBanker\" 19) \"47\\x1eBanker\" 20) \"48\\x1eCEO\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"29*\" 1) \"29\\x1eBanker\" 2) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*Professor\" 1) \"23\\x1eProfessor\" 2) \"29\\x1eProfessor\" 3) \"32\\x1eProfessor\" 4) \"33\\x1eProfessor\" 5) \"36\\x1eProfessor\" b. Query with a pattern and filters \u00b6 Command \"TABLE.DATA.READ\" \"partitions\" \"catalog name\" \"namespace name\" \"table name\" \"pattern(normaly '*')\" \"partition filter\" Examples 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e30\\x1eLTE\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eEQ\" 1) \"32\\x1eProfessor\" 2) \"32\\x1eSales Manager\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eLT\\x1ejob\\x1eCEO\\x1eLTE\\x1eAND\" 1) \"23\\x1eBanker\" 2) \"29\\x1eBanker\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eLT\\x1ejob\\x1eCEO\\x1eGTE\\x1eAND\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eProfessor\" 4) \"23\\x1eSales Manager\" 5) \"24\\x1eStudent\" 6) \"26\\x1eStudent\" 7) \"27\\x1eSales Manager\" 8) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eGT\\x1ejob\\x1eCEO\\x1eGTE\\x1eAND\" 1) \"33\\x1eProfessor\" 2) \"36\\x1eProfessor\" 3) \"43\\x1eSales Manager\" 4) \"48\\x1eCEO\" 12. select \u00b6 Command \"TABLE.DATA.READ\" \"select\" \"catalog name\" \"namespace name\" \"table name\" \"pattern(normaly '*')\" \"partition filter\" \"data filter\" Examples 127.0.0.1:7389> help TABLE.DATA.READ select TABLE.DATA.READ select catalog.namespace.table projection partition-filter data-filter summary: Get the data with the pattern and filter since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ select xxx .... 13. getPartitionRowCount \u00b6 Command \"TABLE.DATA.READ\" \"getPartitionRowCount\" \"{catalog name}.{namespace name}.{table name}\" \"partition string with regular expression\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowCount TABLE.DATA.READ getPartitionRowCount catalog.namespace.table partition-string summary: Get the count of the rows in the partition since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" * 1) \"21\\x1eSales Manager\" 2) \"2\" 3) \"22\\x1eTutor\" 4) \"1\" 5) \"23\\x1eBanker\" 6) \"1\" 7) \"23\\x1eProfessor\" 8) \"1\" 9) \"23\\x1eSales Manager\" 10) \"1\" 11) \"24\\x1eStudent\" 12) \"1\" 13) \"26\\x1eStudent\" 14) \"1\" 15) \"27\\x1eSales Manager\" 16) \"1\" 17) \"29\\x1eBanker\" 18) \"1\" 19) \"29\\x1eProfessor\" 20) \"1\" 21) \"32\\x1eProfessor\" 22) \"1\" 23) \"32\\x1eSales Manager\" 24) \"1\" 25) \"33\\x1eProfessor\" 26) \"1\" 27) \"36\\x1eProfessor\" 28) \"1\" 29) \"40\\x1eBanker\" 30) \"1\" 31) \"41\\x1eBanker\" 32) \"2\" 33) \"43\\x1eSales Manager\" 34) \"1\" 35) \"45\\x1eBanker\" 36) \"1\" 37) \"47\\x1eBanker\" 38) \"2\" 39) \"48\\x1eCEO\" 40) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"23*\" 1) \"23\\x1eBanker\" 2) \"1\" 3) \"23\\x1eProfessor\" 4) \"1\" 5) \"23\\x1eSales Manager\" 6) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"*Professor\" 1) \"23\\x1eProfessor\" 2) \"1\" 3) \"29\\x1eProfessor\" 4) \"1\" 5) \"32\\x1eProfessor\" 6) \"1\" 7) \"33\\x1eProfessor\" 8) \"1\" 9) \"36\\x1eProfessor\" 10) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"45\\x1eBanker\" 1) \"45\\x1eBanker\" 2) \"1\" 14. getPartitionRowGroup \u00b6 Command \"TABLE.DATA.READ\" \"getPartitionRowGroup\" \"{catalog name}.{namespace name}.{table name}\" \"partition string\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowGroup TABLE.DATA.READ getPartitionRowGroup catalog.namespace.table partition-string summary: Get the count of the rows in the each row-group of the partition since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowGroup \"cat_1.test.table\" \"21\\x1eSales Manager\" 1) \"0\" 2) \"1\" 3) \"1\" 4) \"2\" 127.0.0.1:7389> 15. getTableRowCount \u00b6 Command \"TABLE.DATA.READ\" \"gettablerowcount\" \"{catalog name}.{namespace name}.{table name} with regular expression\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getTableRowCount TABLE.DATA.READ getTableRowCount - summary: Get the row count of each table since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getTableRowCount * 1) \"cat_1.test.network_table\" 2) \"33229\" 3) \"cat_1.test.table\" 4) \"23\" 127.0.0.1:7389>","title":"Redis5 cli (LightningDB v2.x)"},{"location":"cli-cli2/#1-createtable","text":"Command \"TABLE.META.WRITE\" \"createTable\" \"catalog name\" \"namespace name\" \"table name\" \"schema binary\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"createTable\" TABLE.META.WRITE createTable catalog.namespace.table arrow::schema summary: Create a new table since: 2.0.0 group: table.meta 127.0.0.1:7389> \"TABLE.META.WRITE\" \"createTable\" \"cat_1.test.table\" \"\\x10\\x00\\x00\\x00\\x00\\x00\\n\\x00\\x0e\\x00\\x06\\x00\\r\\x00\\b\\x00\\n\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x10\\x00\\x00\\x00\\x00\\x01\\n\\x00\\x0c\\x00\\x00\\x00\\b\\x00\\x04\\x00\\n\\x00\\x00\\x00\\b\\x00\\x00\\x00\\xc4\\x01\\x00\\x00\\t\\x00\\x00\\x00\\x80\\x01\\x00\\x00D\\x01\\x00\\x00\\x18\\x01\\x00\\x00\\xec\\x00\\x00\\x00\\xc0\\x00\\x00\\x00\\x98\\x00\\x00\\x00h\\x00\\x00\\x00@\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\xac\\xfe\\xff\\xff\\b\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x0e\\x00\\x00\\x00127.0.0.1:7389\\x00\\x00\\x13\\x00\\x00\\x00properties.location\\x00\\xe4\\xfe\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00job\\x00\\x0b\\x00\\x00\\x00partition.1\\x00\\b\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x01\\x00\\x00\\x001\\x00\\x00\\x00\\x10\\x00\\x00\\x00internal.version\\x00\\x00\\x00\\x004\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00age\\x00\\x0b\\x00\\x00\\x00partition.0\\x00X\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x01\\x00\\x00\\x002\\x00\\x00\\x00\\x0e\\x00\\x00\\x00partition.size\\x00\\x00\\x80\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x03\\x00\\x00\\x00512\\x00\\x0c\\x00\\x00\\x00cva.capacity\\x00\\x00\\x00\\x00\\xa8\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x0c\\x00\\x00\\x00\\x02\\x00\\x00\\x0024\\x00\\x00\\x0e\\x00\\x00\\x00properties.ttl\\x00\\x00\\xd0\\xff\\xff\\xff\\b\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x04\\x00\\x00\\x002560\\x00\\x00\\x00\\x00\\x11\\x00\\x00\\x00rowgroup.capacity\\x00\\x00\\x00\\b\\x00\\x0c\\x00\\b\\x00\\x04\\x00\\b\\x00\\x00\\x00\\b\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x0e\\x00\\x00\\x00127.0.0.1:7379\\x00\\x00\\x14\\x00\\x00\\x00properties.metastore\\x00\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x88\\x00\\x00\\x004\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x96\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x00\\x00\\x05\\x01\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x84\\xff\\xff\\xff\\x03\\x00\\x00\\x00job\\x00\\xc2\\xff\\xff\\xff\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\\x02\\x01 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\b\\x00\\x0c\\x00\\b\\x00\\a\\x00\\b\\x00\\x00\\x00\\x00\\x00\\x00\\x01 \\x00\\x00\\x00\\x03\\x00\\x00\\x00age\\x00\\x00\\x00\\x12\\x00\\x18\\x00\\x14\\x00\\x13\\x00\\x12\\x00\\x0c\\x00\\x00\\x00\\b\\x00\\x04\\x00\\x12\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x18\\x00\\x00\\x00\\x00\\x00\\x05\\x01\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x04\\x00\\x04\\x00\\x00\\x00\\x04\\x00\\x00\\x00name\\x00\\x00\\x00\\x00\"","title":"1. createTable"},{"location":"cli-cli2/#2-truncatetable","text":"Command \"TABLE.META.WRITE\" \"truncateTable\" \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"truncateTable\" TABLE.META.WRITE truncateTable catalog.namespace.table summary: Truncate the table(Remove all data in the table) since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 11) \"32\\x1eProfessor\" 12) \"32\\x1eSales Manager\" 13) \"33\\x1eProfessor\" 14) \"36\\x1eProfessor\" 15) \"41\\x1eBanker\" 16) \"43\\x1eSales Manager\" 17) \"45\\x1eBanker\" 18) \"47\\x1eBanker\" 19) \"48\\x1eCEO\" 127.0.0.1:7389> TABLE.META.WRITE truncateTable \"cat_1.test.table\" \"OK\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" (empty list or set)","title":"2. truncateTable"},{"location":"cli-cli2/#3-droptable","text":"Command \"TABLE.META.WRITE\" \"dropTable\" \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"dropTable\" TABLE.META.WRITE dropTable catalog.namespace.table summary: Drop the table(Remove all data and the schema) since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.WRITE dropTable \"cat_1.test.table\" \"OK\" 127.0.0.1:7389> TABLE.META.READ showTables (empty list or set)","title":"3. dropTable"},{"location":"cli-cli2/#4-dropalltables","text":"Command \"TABLE.META.WRITE\" \"dropAllTables\" Examples 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"dropAllTables\" TABLE.META.WRITE dropAllTables - summary: Drop all tables since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.WRITE dropAllTables 1 tables are deleted.","title":"4. dropAllTables"},{"location":"cli-cli2/#5-settablettl","text":"Command \"TABLE.META.WRITE\" \"settablettl\" \"{catalog name}.{namespace name}.{table name}\" \"{ttl time(unit: msec)}\" Example 127.0.0.1:7389> help \"TABLE.META.WRITE\" \"seTtableTtl\" TABLE.META.WRITE setTableTtl catalog.namespace.table ttl(msec) summary: Set the ttl of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.WRITE setTableTtl \"cat_1.test.table\" 30000 OK","title":"5. setTableTtl"},{"location":"cli-cli2/#6-showtables","text":"Command \"TABLE.META.READ\" \"showTables\" Examples 127.0.0.1:7389> help TABLE.META.READ showTables TABLE.META.READ showTables - summary: Get the list of tables with their own version since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\"","title":"6. showTables"},{"location":"cli-cli2/#7-describetable","text":"Command \"TABLE.META.READ\" \"describeTable\" \"table name\" Examples 127.0.0.1:7389> help TABLE.META.READ describeTables TABLE.META.READ describeTables catalog.namespace.table summary: Get all columns and partitions of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> 127.0.0.1:7389> TABLE.META.READ showTables 1) \"cat_1.test.table\" 2) \"version: 1\" 127.0.0.1:7389> TABLE.META.READ describeTables \"cat_1.test.table\" 1) \"name: string\" 2) \"age: int32\" 3) \"job: string\" 4) \"[ partitions: age job ]\"","title":"7. describeTable"},{"location":"cli-cli2/#8-gettablettl","text":"Command \"TABLE.META.READ\" gettablettl \"{catalog name}.{namespace name}.{table name}\" Examples 127.0.0.1:7389> help TABLE.META.READ getTableTtl TABLE.META.READ getTableTtl catalog.namespace.table summary: Get the ttl of the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.READ getTableTtl * 1) \"cat_1.test.network_table\" 2) \"86400000\" 3) \"cat_1.test.table\" 4) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.* 1) \"cat_1.test.network_table\" 2) \"86400000\" 3) \"cat_1.test.table\" 4) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl *.network_table 1) \"cat_1.test.network_table\" 2) \"86400000\" 127.0.0.1:7389> TABLE.META.READ getTableTtl cat_1.test.network_table 1) \"cat_1.test.network_table\" 2) \"86400000\" 127.0.0.1:7389>","title":"8. getTableTtl"},{"location":"cli-cli2/#9-getpartitionttl","text":"Command \"TABLE.META.READ\" getPartitionTtl \"{catalog name}.{namespace name}.{table name}\" \"partition string with regular expression\" Examples 127.0.0.1:7389> help TABLE.META.READ getPartitionTtl TABLE.META.READ getPartitionTtl partition-string summary: Get the ttl of the partition in the table since: 2.0.0 group: table.meta 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"86350123\" 3) \"22\\x1eTutor\" 4) \"86350139\" 5) \"23\\x1eBanker\" 6) \"86350126\" 7) \"23\\x1eProfessor\" 8) \"86350125\" 9) \"23\\x1eSales Manager\" 10) \"86350137\" 11) \"24\\x1eStudent\" 12) \"86350121\" 13) \"26\\x1eStudent\" 14) \"86350124\" 15) \"27\\x1eSales Manager\" 16) \"86350132\" 17) \"29\\x1eBanker\" 18) \"86350124\" 19) \"29\\x1eProfessor\" 20) \"86350125\" 21) \"32\\x1eProfessor\" 22) \"86350127\" 23) \"32\\x1eSales Manager\" 24) \"86350123\" 25) \"33\\x1eProfessor\" 26) \"86350120\" 27) \"36\\x1eProfessor\" 28) \"86350134\" 29) \"40\\x1eBanker\" 30) \"86350119\" 31) \"41\\x1eBanker\" 32) \"86350120\" 33) \"43\\x1eSales Manager\" 34) \"86350133\" 35) \"45\\x1eBanker\" 36) \"86350128\" 37) \"47\\x1eBanker\" 38) \"86350124\" 39) \"48\\x1eCEO\" 40) \"86350138\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"23*\" 1) \"23\\x1eBanker\" 2) \"86343642\" 3) \"23\\x1eProfessor\" 4) \"86343641\" 5) \"23\\x1eSales Manager\" 6) \"86343653\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"*CEO\" 1) \"48\\x1eCEO\" 2) \"86336153\" 127.0.0.1:7389> TABLE.META.READ getPartitionTtl \"cat_1.test.table\" \"45\\x1eBanker\" 1) \"45\\x1eBanker\" 2) \"86324848\" 127.0.0.1:7389>","title":"9. getPartitionTtl"},{"location":"cli-cli2/#10-insert","text":"- Command - \"TABLE.DATA.WRITE\" \"Insert\" \"{catalog name}.{namespace name}.{table name}\" \"table version\" \"partition string\" \"binaries... ...\" - Examples 127.0.0.1:7389> help \"TABLE.DATA.WRITE\" \"Insert\" TABLE.DATA.WRITE insert catalog.namespace.table table-version partition-string data summary: Insert a new data(row) since: 2.0.0 group: table.data 1636425657.602951 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"40\\x1eBanker\" \"Jeannie\" \"(\\x00\\x00\\x00\" \"Banker\" 1636425657.604043 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"33\\x1eProfessor\" \"Ardith\" \"!\\x00\\x00\\x00\" \"Professor\" 1636425657.604529 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"41\\x1eBanker\" \"Elena\" \")\\x00\\x00\\x00\" \"Banker\" 1636425657.605351 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"24\\x1eStudent\" \"Corliss\" \"\\x18\\x00\\x00\\x00\" \"Student\" 1636425657.607351 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"41\\x1eBanker\" \"Kiyoko\" \")\\x00\\x00\\x00\" \"Banker\" 1636425657.608057 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"21\\x1eSales Manager\" \"Hilton\" \"\\x15\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.608455 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"32\\x1eSales Manager\" \"Becky\" \" \\x00\\x00\\x00\" \"Sales Manager\" 1636425657.609218 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"29\\x1eBanker\" \"Wendie\" \"\\x1d\\x00\\x00\\x00\" \"Banker\" 1636425657.609940 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"26\\x1eStudent\" \"Carolina\" \"\\x1a\\x00\\x00\\x00\" \"Student\" 1636425657.610284 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"47\\x1eBanker\" \"Laquita\" \"/\\x00\\x00\\x00\" \"Banker\" 1636425657.610638 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eProfessor\" \"Stephani\" \"\\x17\\x00\\x00\\x00\" \"Professor\" 1636425657.610964 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"29\\x1eProfessor\" \"Emile\" \"\\x1d\\x00\\x00\\x00\" \"Professor\" 1636425657.612257 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eBanker\" \"Cherri\" \"\\x17\\x00\\x00\\x00\" \"Banker\" 1636425657.612630 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"47\\x1eBanker\" \"Raleigh\" \"/\\x00\\x00\\x00\" \"Banker\" 1636425657.612943 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"32\\x1eProfessor\" \"Hollis\" \" \\x00\\x00\\x00\" \"Professor\" 1636425657.614136 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"45\\x1eBanker\" \"Brigette\" \"-\\x00\\x00\\x00\" \"Banker\" 1636425657.615558 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"21\\x1eSales Manager\" \"Damian\" \"\\x15\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.617321 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"27\\x1eSales Manager\" \"Star\" \"\\x1b\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.618819 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"43\\x1eSales Manager\" \"Elba\" \"+\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.619621 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"36\\x1eProfessor\" \"Lourie\" \"$\\x00\\x00\\x00\" \"Professor\" 1636425657.622977 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"23\\x1eSales Manager\" \"\\xea\\xb0\\x80\\xeb\\x82\\x98\\xeb\\x82\\x98\\xeb\\x82\\x98\\xea\\xb0\\x80\\xeb\\x82\\x98\\xeb\\x82\\x98\" \"\\x17\\x00\\x00\\x00\" \"Sales Manager\" 1636425657.623555 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"48\\x1eCEO\" \"Elon\" \"0\\x00\\x00\\x00\" \"CEO\" 1636425657.624359 [0 127.0.0.1:53881] \"TABLE.DATA.WRITE\" \"Insert\" \"cat_1.test.table\" \"1\" \"22\\x1eTutor\" \"Kijung\" \"\\x16\\x00\\x00\\x00\" \"Tutor\"","title":"10. insert"},{"location":"cli-cli2/#11-partitions","text":"","title":"11. partitions"},{"location":"cli-cli2/#a-query-with-a-pattern","text":"Commnad \"TABLE.DATA.READ\" \"partitions\" \"{catalog name}.{namespace name}.{table name}\" \"pattern(normaly '*')\" Examples 127.0.0.1:7389> help TABLE.DATA.READ partitions TABLE.DATA.READ partitions catalog.namespace.table pattern partition-filter(optional) summary: Get the list of partitions with the pattern and filter since: 2.0.0 group: table.data 127.0.0.1:7389> 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 11) \"32\\x1eProfessor\" 12) \"32\\x1eSales Manager\" 13) \"33\\x1eProfessor\" 14) \"36\\x1eProfessor\" 15) \"40\\x1eBanker\" 16) \"41\\x1eBanker\" 17) \"43\\x1eSales Manager\" 18) \"45\\x1eBanker\" 19) \"47\\x1eBanker\" 20) \"48\\x1eCEO\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"29*\" 1) \"29\\x1eBanker\" 2) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*Professor\" 1) \"23\\x1eProfessor\" 2) \"29\\x1eProfessor\" 3) \"32\\x1eProfessor\" 4) \"33\\x1eProfessor\" 5) \"36\\x1eProfessor\"","title":"a. Query with a pattern"},{"location":"cli-cli2/#b-query-with-a-pattern-and-filters","text":"Command \"TABLE.DATA.READ\" \"partitions\" \"catalog name\" \"namespace name\" \"table name\" \"pattern(normaly '*')\" \"partition filter\" Examples 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e30\\x1eLTE\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eBanker\" 4) \"23\\x1eProfessor\" 5) \"23\\x1eSales Manager\" 6) \"24\\x1eStudent\" 7) \"26\\x1eStudent\" 8) \"27\\x1eSales Manager\" 9) \"29\\x1eBanker\" 10) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eEQ\" 1) \"32\\x1eProfessor\" 2) \"32\\x1eSales Manager\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eLT\\x1ejob\\x1eCEO\\x1eLTE\\x1eAND\" 1) \"23\\x1eBanker\" 2) \"29\\x1eBanker\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eLT\\x1ejob\\x1eCEO\\x1eGTE\\x1eAND\" 1) \"21\\x1eSales Manager\" 2) \"22\\x1eTutor\" 3) \"23\\x1eProfessor\" 4) \"23\\x1eSales Manager\" 5) \"24\\x1eStudent\" 6) \"26\\x1eStudent\" 7) \"27\\x1eSales Manager\" 8) \"29\\x1eProfessor\" 127.0.0.1:7389> TABLE.DATA.READ partitions \"cat_1.test.table\" \"*\" \"age\\x1e32\\x1eGT\\x1ejob\\x1eCEO\\x1eGTE\\x1eAND\" 1) \"33\\x1eProfessor\" 2) \"36\\x1eProfessor\" 3) \"43\\x1eSales Manager\" 4) \"48\\x1eCEO\"","title":"b. Query with a pattern and filters"},{"location":"cli-cli2/#12-select","text":"Command \"TABLE.DATA.READ\" \"select\" \"catalog name\" \"namespace name\" \"table name\" \"pattern(normaly '*')\" \"partition filter\" \"data filter\" Examples 127.0.0.1:7389> help TABLE.DATA.READ select TABLE.DATA.READ select catalog.namespace.table projection partition-filter data-filter summary: Get the data with the pattern and filter since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ select xxx ....","title":"12. select"},{"location":"cli-cli2/#13-getpartitionrowcount","text":"Command \"TABLE.DATA.READ\" \"getPartitionRowCount\" \"{catalog name}.{namespace name}.{table name}\" \"partition string with regular expression\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowCount TABLE.DATA.READ getPartitionRowCount catalog.namespace.table partition-string summary: Get the count of the rows in the partition since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" * 1) \"21\\x1eSales Manager\" 2) \"2\" 3) \"22\\x1eTutor\" 4) \"1\" 5) \"23\\x1eBanker\" 6) \"1\" 7) \"23\\x1eProfessor\" 8) \"1\" 9) \"23\\x1eSales Manager\" 10) \"1\" 11) \"24\\x1eStudent\" 12) \"1\" 13) \"26\\x1eStudent\" 14) \"1\" 15) \"27\\x1eSales Manager\" 16) \"1\" 17) \"29\\x1eBanker\" 18) \"1\" 19) \"29\\x1eProfessor\" 20) \"1\" 21) \"32\\x1eProfessor\" 22) \"1\" 23) \"32\\x1eSales Manager\" 24) \"1\" 25) \"33\\x1eProfessor\" 26) \"1\" 27) \"36\\x1eProfessor\" 28) \"1\" 29) \"40\\x1eBanker\" 30) \"1\" 31) \"41\\x1eBanker\" 32) \"2\" 33) \"43\\x1eSales Manager\" 34) \"1\" 35) \"45\\x1eBanker\" 36) \"1\" 37) \"47\\x1eBanker\" 38) \"2\" 39) \"48\\x1eCEO\" 40) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"23*\" 1) \"23\\x1eBanker\" 2) \"1\" 3) \"23\\x1eProfessor\" 4) \"1\" 5) \"23\\x1eSales Manager\" 6) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"*Professor\" 1) \"23\\x1eProfessor\" 2) \"1\" 3) \"29\\x1eProfessor\" 4) \"1\" 5) \"32\\x1eProfessor\" 6) \"1\" 7) \"33\\x1eProfessor\" 8) \"1\" 9) \"36\\x1eProfessor\" 10) \"1\" 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowCount \"cat_1.test.table\" \"45\\x1eBanker\" 1) \"45\\x1eBanker\" 2) \"1\"","title":"13. getPartitionRowCount"},{"location":"cli-cli2/#14-getpartitionrowgroup","text":"Command \"TABLE.DATA.READ\" \"getPartitionRowGroup\" \"{catalog name}.{namespace name}.{table name}\" \"partition string\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getPartitionRowGroup TABLE.DATA.READ getPartitionRowGroup catalog.namespace.table partition-string summary: Get the count of the rows in the each row-group of the partition since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getPartitionRowGroup \"cat_1.test.table\" \"21\\x1eSales Manager\" 1) \"0\" 2) \"1\" 3) \"1\" 4) \"2\" 127.0.0.1:7389>","title":"14. getPartitionRowGroup"},{"location":"cli-cli2/#15-gettablerowcount","text":"Command \"TABLE.DATA.READ\" \"gettablerowcount\" \"{catalog name}.{namespace name}.{table name} with regular expression\" Examples 127.0.0.1:7389> help TABLE.DATA.READ getTableRowCount TABLE.DATA.READ getTableRowCount - summary: Get the row count of each table since: 2.0.0 group: table.data 127.0.0.1:7389> TABLE.DATA.READ getTableRowCount * 1) \"cat_1.test.network_table\" 2) \"33229\" 3) \"cat_1.test.table\" 4) \"23\" 127.0.0.1:7389>","title":"15. getTableRowCount"},{"location":"cli-cluster/","text":"Note Command Line Interface(CLI) of LightningDB supports not only deploy and start command but also many commands to access and manipulate data in LightningDB. If you want to see the list of cluster commands, use the cluster command without any option. ec2-user@lightningdb:1> cluster NAME ltcli cluster - This is cluster command SYNOPSIS ltcli cluster COMMAND DESCRIPTION This is cluster command COMMANDS COMMAND is one of the following: add_slave Add slaves to cluster additionally clean Clean cluster configure create Create cluster ls Check cluster list rebalance Rebalance restart Restart redist cluster rowcount Query and show cluster row count start Start cluster stop Stop cluster use Change selected cluster 1. Deploy and Start \u00b6 (1) Cluster configure redis-{port}.conf is generated with using redis-{master/slave}.conf.template and redis.properties files. matthew@lightningdb:21> cluster configure Check status of hosts... OK sync conf +----------------+--------+ | HOST | STATUS | +----------------+--------+ | 192.168.111.44 | OK | | 192.168.111.41 | OK | +----------------+--------+ OK (2) Cluster start Backup logs of the previous master/slave nodes All log files of previous master/slave nodes in ${SR2_HOME}/logs/redis/ 1 will be moved to ${SR2_HOME}/logs/redis/backup/ . Generate directories to save data Save aof and rdb files of redis-server and RocksDB files in ${SR2_REDIS_DATA} Start redis-server process Start master and slave redis-server with ${SR2_HOME}/conf/redis/redis-{port}.conf file Log files will be saved in ${SR2_HOME}/logs/redis/ ec2-user@lightningdb:1> cluster start Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK Backup redis master log in each MASTER hosts... - 127.0.0.1 Generate redis configuration files for master hosts sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up Errors ErrorCode 11 Redis-server(master) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process} . $ cluster start ... ... [ErrorCode 11] Fail to start... Must be checked running MASTER redis processes! We estimate that redis process is . ErrorCode 12 Redis-server(slave) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process} . $ cluster start ... [ErrorCode 12] Fail to start... Must be checked running SLAVE redis processes! We estimate that redis process is . Conf file not exist Conf file is not found. To resove this error, use cluster configure and then cluster start . $ cluster start ... FileNotExistError: ${SR2_HOME}/conf/redis/redis-{port}.conf Max try error \u200b For detail information, please check the log files. $ cluster start ... ClusterRedisError: Fail to start redis: max try exceed Recommendation Command: 'monitor' (3) Cluster create After checking the information of the cluster, create a cluster of LightningDB. Case 1) When redis-server processes are running, create a cluster only. ec2-user@lightningdb:1>cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18100 - 127.0.0.1:18103 - 127.0.0.1:18104 - 127.0.0.1:18101 - 127.0.0.1:18102 Adding slots... - 127.0.0.1:18100, 3280 - 127.0.0.1:18103, 3276 - 127.0.0.1:18104, 3276 - 127.0.0.1:18101, 3276 - 127.0.0.1:18102, 3276 Check cluster state and asign slot... Ok create cluster complete. Case 2) When redis-server processes are not running, create a cluster after launching redis-server processes with cluster start command. ec2-user@lightningdb:4>cluster create Check status of hosts... OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18103 - 127.0.0.1:18104 - 127.0.0.1:18101 - 127.0.0.1:18102 - 127.0.0.1:18100 Adding slots... - 127.0.0.1:18103, 3280 - 127.0.0.1:18104, 3276 - 127.0.0.1:18101, 3276 - 127.0.0.1:18102, 3276 - 127.0.0.1:18100, 3276 Check cluster state and asign slot... Ok create cluster complete. Errors When redis servers are not running, this error(Errno 111) will occur. To solve this error, use cluster start command previously. ec2-user@lightningdb:1>cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y 127.0.0.1:18100 - [Errno 111] Connection refused (4) Cluster stop \u200bGracefully kill all redis-servers(master/slave) with SIGINT \u200b\u200b ec2-user@lightningdb:1> cluster stop Check status of hosts... OK Stopping master cluster of redis... cur: 5 / total: 5 cur: 0 / total: 5 Complete all redis process down Options Force to kill all redis-servers(master/slave) with SIGKILL --force (5) Cluster clean Remove conf files for redis-server and all data(aof, rdb, RocksDB) of LightningDB ec2-user@lightningdb:1> cluster clean Removing redis generated master configuration files - 127.0.0.1 Removing flash db directory, appendonly and dump.rdb files in master - 127.0.0.1 Removing master node configuration - 127.0.0.1 (6) Cluster restart\u200b Process cluster stop and then cluster start .\u200b\u200b Options Force to kill all redis-servers(master/slave) with SIGKILL and then start again. --force-stop Remove all data(aof, rdb, RocksDB, conf files) before starting again. --reset Process cluster create . This command should be called with --reset . --cluster (7) Update version You can update LightningDB by using the 'deploy' command. > c 1 // alias of 'cluster use 1' > deploy (Watch out) Cluster 1 is already deployed. Do you want to deploy again? (y/n) [n] y Select installer Select installer [ INSTALLER LIST ] (1) lightningdb.release.master.5a6a38.bin (2) lightningdb.trial.master.dbcb9e-dirty.bin (3) lightningdb.trial.master.dbcb9e.bin Please enter the number, file path or URL of the installer you want to use. you can also add a file in list by copy to '$FBPATH/releases/' 1 OK, lightningdb.release.master.5a6a38.bin Restore Do you want to restore conf? (y/n) y If the current settings will be reused, type 'y'. Check all settings finally Backup path of cluster: ${base-directory}/backup/cluster_${cluster-id}_bak_${time-stamp} Backup path of conf files: $FBAPTH/conf_backup/cluster_${cluster-id}_conf_bak_${time-stamp} +-----------------+---------------------------------------------------+ | NAME | VALUE | +-----------------+---------------------------------------------------+ | installer | lightningdb.release.master.5a6a38.bin | | nodes | nodeA | | | nodeB | | | nodeC | | | nodeD | | master ports | 18100 | | slave ports | 18150-18151 | | ssd count | 3 | | redis data path | ~/sata_ssd/ssd_ | | redis db path | ~/sata_ssd/ssd_ | | flash db path | ~/sata_ssd/ssd_ | +-----------------+---------------------------------------------------+ Do you want to proceed with the deploy accroding to the above information? (y/n) y Check status of hosts... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | nodeA | OK | | nodeB | OK | | nodeC | OK | | nodeD | OK | +-----------+--------+ Checking for cluster exist... +------+--------+ | HOST | STATUS | +------+--------+ Backup conf of cluster 1... OK, cluster_1_conf_bak_ Backup info of cluster 1 at nodeA... OK, cluster_1_bak_ Backup info of cluster 1 at nodeB... OK, cluster_1_bak_ Backup info of cluster 1 at nodeC... OK, cluster_1_bak_ Backup info of cluster 1 at nodeD... OK, cluster_1_bak_ Transfer installer and execute... - nodeA - nodeB - nodeC - nodeD Sync conf... Complete to deploy cluster 1. Cluster 1 selected. Restart > cluster restart After the restart, the new version will be applied. 2. Monitor \u00b6 (1) Cluster use Change the cluster to use LTCLI. Use cluster use or c commands. Examples ec2-user@lightningdb:2> cluster use 1 Cluster '1' selected. ec2-user@lightningdb:1> c 2 Cluster '2' selected. (2) Cluster ls List the deployed clusters. Examples ec2-user@lightningdb:2> cluster ls [1, 2] (3) Cluster rowcount Check the count of records that are stored in the cluster. Examples ec2-user@lightningdb:1> cluster rowcount 0 (4) Cluster tree User can check the status of master nodes and slaves and show which master and slave nodes are linked. Examples ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(connected) |__ 127.0.0.1:18952(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) (5) Cluster distribution The distribution of Master/Slave nodes are displayed with their hostnames(IP addresses). Examples matthew@lightningdb:21> cluster distribution +-----------------------+--------+-------+ | HOST | MASTER | SLAVE | +-----------------------+--------+-------+ | fbg04(192.168.111.41) | 4 | 2 | | fbg05(192.168.111.44) | 2 | 4 | | TOTAL | 6 | 6 | +-----------------------+--------+-------+ 3. Failover \u00b6 (1) Cluster failover_list failovered masters The node, that initialized as a slave by the cluster, becomes a master by failover now. no-slave masters Masters without slaves. You need to replicate the failbacked slaves to this node. no-slot masters Not yet added into the cluster or masters without slot failbacked slaves The nodes, that initialized as a master, becomes a slave by failback now. Examples matthew@lightningdb:21> cluster failover_list 1) failovered masters: 192.168.111.44:20152 192.168.111.44:20153 192.168.111.44:20156 2) no-slave masters: 192.168.111.44:20100 192.168.111.41:20101 3) no-slot masters: 192.168.111.44:20152 4) failbacked slaves: 192.168.111.41:20102 192.168.111.41:20105 (2) Cluster do_replicate You can add a node as the slave of a master nodes like cluster do_replicate {slave's IP}:{slave's Port} {master's IP}:{master's Port} . The IP addresses of masters or slaves can be replaced with their hostnames. Examples matthew@lightningdb:21> cluster tree 192.168.111.44:20101(connected) |__ 192.168.111.44:20151(connected) 192.168.111.44:20102(connected) |__ 192.168.111.44:20152(connected) 192.168.111.44:20150(connected) |__ 192.168.111.44:20100(connected) matthew@lightningdb:21> cluster do_replicate 192.168.111.44:20100 192.168.111.44:20101 Start to replicate... OK matthew@lightningdb:21> cluster tree 192.168.111.44:20101(connected) |__ 192.168.111.44:20100(connected) |__ 192.168.111.44:20151(connected) 192.168.111.44:20102(connected) |__ 192.168.111.44:20152(connected) 192.168.111.44:20150(connected) with hostnames, matthew@lightningdb:21> cluster do_replicate fbg05:20100 fbg05:20101 Start to replicate... OK (3) Cluster find_noaddr & cluster forget_noaddr You can find and remove 'noaddr' nodes in the current cluster. 'noaddr' nodes are no more valid nodes. Examples matthew@lightningdb:21> cluster find_noaddr +------------------------------------------+ | UUID | +------------------------------------------+ | 40675af73cd8fa1272a20fe9536ad19c398b5bca | +------------------------------------------+ matthew@lightningdb:21> cluster forget_noaddr \"27\" nodes have forgot \"40675af73cd8fa1272a20fe9536ad19c398b5bca\" matthew@lightningdb:21> cluster find_noaddr +------+ | UUID | +------+ (4) Cluster failover If a master node is killed, its slave node will automatically promote after 'cluster-node-time' 2 . User can promote the slave node immediately by using the 'cluster failover' command. Examples Step 1) Check the status of the cluster In this case, '127.0.0.1:18902' node is killed. ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(disconnected) <--- Killed! |__ 127.0.0.1:18952(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) Step 2) Do failover with 'cluster failover' command ec2-user@lightningdb:9> cluster failover failover 127.0.0.1:18952 for 127.0.0.1:18902 OK ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(disconnected) <--- Killed! 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) 127.0.0.1:18952(connected) <--- Promoted to master! (5) Cluster failback With 'cluster failback' command, the killed node is restarted and added to the cluster as the slave node. Examples ec2-user@lightningdb:9> cluster failback run 127.0.0.1:18902 ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) 127.0.0.1:18952(connected) <--- Promoted to master! |__ 127.0.0.1:18902(connected) <--- Failbacked. Now this node is slave! (6) Cluster reset_distribution To initialize the node distribution, use 'reset-distribution'. Examples matthew@lightningdb:21> cluster failover_list 1) failovered masters: 192.168.111.44:20152 2) no-slave masters: 3) no-slot masters: 4) failbacked slaves: 192.168.111.41:20101 matthew@lightningdb:21> cluster reset_distribution '192.168.111.41:20101' will be master... OK matthew@lightningdb:21> cluster failover_list 1) failovered masters: 2) no-slave masters: 3) no-slot masters: 4) failbacked slaves: (7) Cluster nodes_with_dir & Cluster masters_with_dir Cluster nodes_with_dir List up all nodes those are using the disk with HW fault. Cluster masters_with_dir List up all master those are using the disk with HW fault. Examples matthew@lightningdb:21> cluster nodes_with_dir 192.168.111.44 matthew03 +----------------+-------+------------------------------------------+ | HOST | PORT | PATH | +----------------+-------+------------------------------------------+ | 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20150 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20153 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20156 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | +----------------+-------+------------------------------------------+ matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03 +----------------+-------+------------------------------------------+ | HOST | PORT | PATH | +----------------+-------+------------------------------------------+ | 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | +----------------+-------+------------------------------------------+ with hostnames, matthew@lightningdb:21> cluster nodes_with_dir fbg05 matthew02 +-------+-------+------------------------------------------+ | HOST | PORT | PATH | +-------+-------+------------------------------------------+ | fbg05 | 20101 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew | | fbg05 | 20152 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew | +-------+-------+------------------------------------------+ matthew@lightningdb:21> cluster masters_with_dir fbg05 matthew02 +-------+-------+------------------------------------------+ | HOST | PORT | PATH | +-------+-------+------------------------------------------+ | fbg05 | 20101 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew | +-------+-------+------------------------------------------+ (8) Cluster failover_with_dir Do failover and change the master using the disk to the slave Examples matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03 +----------------+-------+------------------------------------------+ | HOST | PORT | PATH | +----------------+-------+------------------------------------------+ | 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | +----------------+-------+------------------------------------------+ matthew@lightningdb:21> cluster failover_list 1) failovered masters: 2) no-slave masters: 3) no-slot masters: 4) failbacked slaves: matthew@lightningdb:21> cluster failover_with_dir 192.168.111.44 matthew03 '192.168.111.41:20152' will be master... OK '192.168.111.41:20155' will be master... OK matthew@lightningdb:21> cluster failover_list 1) failovered masters: 192.168.111.41:20152 192.168.111.41:20155 2) no-slave masters: 3) no-slot masters: 4) failbacked slaves: 192.168.111.44:20102 192.168.111.44:20105 matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03 +------+------+------+ | HOST | PORT | PATH | +------+------+------+ with hostnames, matthew@lightningdb:21> cluster masters_with_dir fbg05 matthew01 +-------+-------+------------------------------------------+ | HOST | PORT | PATH | +-------+-------+------------------------------------------+ | fbg05 | 20151 | /sata_ssd/ssd_02/matthew01/nvkvs/matthew | +-------+-------+------------------------------------------+ matthew@lightningdb:21> cluster tree 192.168.111.44:20102(connected) |__ 192.168.111.44:20152(connected) 192.168.111.44:20150(connected) |__ 192.168.111.44:20100(connected) 192.168.111.44:20151(connected) |__ 192.168.111.44:20101(connected) matthew@lightningdb:21> cluster failover_with_dir fbg05 matthew01 '192.168.111.44:20101' will be master... OK matthew@lightningdb:21> cluster tree 192.168.111.44:20101(connected) |__ 192.168.111.44:20151(connected) 192.168.111.44:20102(connected) |__ 192.168.111.44:20152(connected) 192.168.111.44:20150(connected) |__ 192.168.111.44:20100(connected) (9) Cluster force_failover When a server need to be shutdown by HW fault or checking, change all masters in the server to slaves by failover of those slaves. Examples matthew@lightningdb:21> cluster distribution +----------------+--------+-------+ | HOST | MASTER | SLAVE | +----------------+--------+-------+ | 192.168.111.44 | 7 | 7 | | 192.168.111.41 | 7 | 7 | | TOTAL | 14 | 14 | +----------------+--------+-------+ matthew@lightningdb:21> cluster force_failover 192.168.111.41 '192.168.111.44:20150' will be master... OK '192.168.111.44:20151' will be master... OK '192.168.111.44:20152' will be master... OK '192.168.111.44:20153' will be master... OK '192.168.111.44:20154' will be master... OK '192.168.111.44:20155' will be master... OK '192.168.111.44:20156' will be master... OK matthew@lightningdb:21> cluster distribution +----------------+--------+-------+ | HOST | MASTER | SLAVE | +----------------+--------+-------+ | 192.168.111.44 | 14 | 0 | | 192.168.111.41 | 0 | 14 | | TOTAL | 14 | 14 | +----------------+--------+-------+ matthew@lightningdb:21> 4. Scale out \u00b6 (1) Cluster add_slave Warning Before using the add-slave command, ingestion to master nodes should be stopped. After replication and sync between master and slave are completed, ingestion will be available again. You can add a slave to a cluster that is configured only with the master without redundancy. Create cluster only with masters Procedure for configuring the test environment. If cluster with the only masters already exists, go to the add slave info . Proceed with the deploy. Enter 0 in replicas as shown below when deploy. ec2-user@lightningdb:2> deploy 3 Select installer [ INSTALLER LIST ] (1) lightningdb.dev.master.5a6a38.bin Please enter the number, file path or url of the installer you want to use. you can also add file in list by copy to '$FBPATH/releases/' https://flashbase.s3.ap-northeast-2.amazonaws.com/lightningdb.release.master.5a6a38.bin Downloading lightningdb.release.master.5a6a38.bin [==================================================] 100% OK, lightningdb.release.master.5a6a38.bin Please type host list separated by comma(,) [127.0.0.1] OK, ['127.0.0.1'] How many masters would you like to create on each host? [5] OK, 5 Please type ports separate with comma(,) and use hyphen(-) for range. [18300-18304] OK, ['18300-18304'] How many replicas would you like to create on each master? [0] OK, 0 How many ssd would you like to use? [3] OK, 3 Type prefix of db path [~/sata_ssd/ssd_] OK, ~/sata_ssd/ssd_ +--------------+---------------------------------+ | NAME | VALUE | +--------------+---------------------------------+ | installer | lightningdb.dev.master.5a6a38.bin | | hosts | 127.0.0.1 | | master ports | 18300-18304 | | ssd count | 3 | | db path | ~/sata_ssd/ssd_ | +--------------+---------------------------------+ Do you want to proceed with the deploy accroding to the above information? (y/n) y Check status of hosts... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Checking for cluster exist... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | CLEAN | +-----------+--------+ OK Transfer installer and execute... - 127.0.0.1 Sync conf... Complete to deploy cluster 3. Cluster '3' selected. When the deploy is complete, start and create the cluster. ec2-user@lightningdb:3> cluster start Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18300|18301|18302|18303|18304 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up ec2-user@lightningdb:3> cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18300 | MASTER | | 127.0.0.1 | 18301 | MASTER | | 127.0.0.1 | 18302 | MASTER | | 127.0.0.1 | 18303 | MASTER | | 127.0.0.1 | 18304 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18300 - 127.0.0.1:18303 - 127.0.0.1:18304 - 127.0.0.1:18301 - 127.0.0.1:18302 Adding slots... - 127.0.0.1:18300, 3280 - 127.0.0.1:18303, 3276 - 127.0.0.1:18304, 3276 - 127.0.0.1:18301, 3276 - 127.0.0.1:18302, 3276 Check cluster state and asign slot... Ok create cluster complete. ec2-user@lightningdb:3> Add slave info Open the conf file. ec2-user@lightningdb:3> conf cluster You can modify redis.properties by entering the command as shown above. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) ) ## Slave hosts and ports (optional) [[export]] SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) [[export]] SR2_REDIS_SLAVE_PORTS=( $(seq 18600 18609) ) ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"~/sata_ssd/ssd_\" export SR2_REDIS_DB_PATH=\"~/sata_ssd/ssd_\" export SR2_FLASH_DB_PATH=\"~/sata_ssd/ssd_\" ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Modify SR2_REDIS_SLAVE_HOSTS and SR2_REDIS_SLAVE_PORTS as shown below. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 18350 18354) ) ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"~/sata_ssd/ssd_\" export SR2_REDIS_DB_PATH=\"~/sata_ssd/ssd_\" export SR2_FLASH_DB_PATH=\"~/sata_ssd/ssd_\" ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Save the modification and exit. ec2-user@lightningdb:3> conf cluster Check status of hosts... OK sync conf OK Complete edit Execute cluster add-slave command ec2-user@lightningdb:3> cluster add-slave Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK clean redis conf, node conf, db data of master clean redis conf, node conf, db data of slave - 127.0.0.1 Backup redis slave log in each SLAVE hosts... - 127.0.0.1 create redis data directory in each SLAVE hosts - 127.0.0.1 sync conf OK Starting slave nodes : 127.0.0.1 : 18350|18351|18352|18353|18354 ... Wait until all redis process up... cur: 10 / total: 10 Complete all redis process up replicate [M] 127.0.0.1 18300 - [S] 127.0.0.1 18350 replicate [M] 127.0.0.1 18301 - [S] 127.0.0.1 18351 replicate [M] 127.0.0.1 18302 - [S] 127.0.0.1 18352 replicate [M] 127.0.0.1 18303 - [S] 127.0.0.1 18353 replicate [M] 127.0.0.1 18304 - [S] 127.0.0.1 18354 5 / 5 meet complete. Check configuration information ec2-user@lightningdb:3> cli cluster nodes 0549ec03031213f95121ceff6c9c13800aef848c 127.0.0.1:18303 master - 0 1574132251126 3 connected 3280-6555 1b09519d37ebb1c09095158b4f1c9f318ddfc747 127.0.0.1:18352 slave a6a8013cf0032f0f36baec3162122b3d993dd2c8 0 1574132251025 6 connected c7dc4815e24054104dff61cac6b13256a84ac4ae 127.0.0.1:18353 slave 0549ec03031213f95121ceff6c9c13800aef848c 0 1574132251126 3 connected 0ab96cb79165ddca7d7134f80aea844bd49ae2e1 127.0.0.1:18351 slave 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 0 1574132250724 4 connected 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 127.0.0.1:18301 master - 0 1574132250524 4 connected 9832-13107 e67005a46984445e559a1408dd0a4b24a8c92259 127.0.0.1:18304 master - 0 1574132251126 5 connected 6556-9831 a6a8013cf0032f0f36baec3162122b3d993dd2c8 127.0.0.1:18302 master - 0 1574132251126 2 connected 13108-16383 492cdf4b1dedab5fb94e7129da2a0e05f6c46c4f 127.0.0.1:18350 slave 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 0 1574132251126 6 connected f9f7fcee9009f25618e63d2771ee2529f814c131 127.0.0.1:18354 slave e67005a46984445e559a1408dd0a4b24a8c92259 0 1574132250724 5 connected 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 127.0.0.1:18300 myself,master - 0 1574132250000 1 connected 0-3279 (2) Scale out You can scale out the current cluster with a new server. Examples Check the current distribution of masters/slaves in each server. matthew@lightningdb:21> cluster distribution +-----------------------+--------+-------+ | HOST | MASTER | SLAVE | +-----------------------+--------+-------+ | fbg04(192.168.111.41) | 3 | 3 | | TOTAL | 3 | 3 | +-----------------------+--------+-------+ Scale out with the new server. matthew@lightningdb:21> cluster scaleout Please type hosts to scaleout separated by comma(,) [127.0.0.1] fbg05 OK, ['fbg05'] Check status of hosts... OK Checking cluster exist... - fbg04 - fbg05 OK +-------+-------+--------+ | HOST | PORT | TYPE | +-------+-------+--------+ | fbg04 | 20100 | MASTER | | fbg04 | 20101 | MASTER | | fbg04 | 20102 | MASTER | | fbg05 | 20100 | MASTER | | fbg05 | 20101 | MASTER | | fbg05 | 20102 | MASTER | | fbg04 | 20150 | SLAVE | | fbg04 | 20151 | SLAVE | | fbg04 | 20152 | SLAVE | | fbg05 | 20150 | SLAVE | | fbg05 | 20151 | SLAVE | | fbg05 | 20152 | SLAVE | +-------+-------+--------+ replicas: 1 Do you want to proceed with replicate according to the above information? (y/n) y Backup redis master log in each MASTER hosts... - fbg04 - fbg05 Backup redis slave log in each SLAVE hosts... - fbg04 - fbg05 create redis data directory in each MASTER - fbg04 - fbg05 create redis data directory in each SLAVE - fbg04 - fbg05 sync conf OK Starting master nodes : fbg04 : 20100|20101|20102 ... Starting master nodes : fbg05 : 20100|20101|20102 ... Starting slave nodes : fbg04 : 20150|20151|20152 ... Starting slave nodes : fbg05 : 20150|20151|20152 ... Wait until all redis process up... alive redis 12/12 Complete all redis process up. Replicate [M] fbg04:20100 - [S] fbg05:20150 Replicate [M] fbg04:20101 - [S] fbg05:20151 Replicate [M] fbg04:20102 - [S] fbg05:20152 Replicate [M] fbg05:20100 - [S] fbg04:20150 Replicate [M] fbg05:20101 - [S] fbg04:20151 Replicate [M] fbg05:20102 - [S] fbg04:20152 6 / 6 replicate completion. M: 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 192.168.111.41 20100 slots:5462-10922 (5461 slots) M: 2ee3d14c92321132e12cddb90dde8240ea6b8768 192.168.111.44 20101 slots: (0 slots) S: 0516e827969880b2322ae112e70e809b395c6d46 192.168.111.44 20151 slots: (0 slots) S: fd1466ec198951cbe7e172ae34bd5b3db66aa309 192.168.111.44 20150 slots: (0 slots) S: 28e4d04419c90c7b1bb4b067f9e15d4012d313b1 192.168.111.44 20152 slots: (0 slots) S: 56e1d3ab563b23bbf857a8f502d1c4b24ce74a3c 192.168.111.41 20151 slots: (0 slots) M: 00d9cea97499097645eecd0bddf0f4679a6f1be1 192.168.111.44 20100 slots: (0 slots) S: 9a21e798fc8d69a4b04910b9e4b87a69417d33fe 192.168.111.41 20150 slots: (0 slots) M: 6afbfe0ed8d701d269d8b2837253678d3452fb70 192.168.111.41 20102 slots:0-5461 (5462 slots) M: 7e2e3de6daebd6e144365d58db19629cfb1b87d1 192.168.111.41 20101 slots:10923-16383 (5461 slots) S: 1df738824e9d41622158a4102ba4aab355225747 192.168.111.41 20152 slots: (0 slots) M: 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 192.168.111.44 20102 slots: (0 slots) >>> Performing Cluster Check (using node 192.168.111.41:20100) [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered err_perc: 50.009156 err_perc: 50.018308 err_perc: 50.009156 >>> Rebalancing across 6 nodes. Total weight = 6 2ee3d14c92321132e12cddb90dde8240ea6b8768 balance is -2732 00d9cea97499097645eecd0bddf0f4679a6f1be1 balance is -2731 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 balance is -2731 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 balance is 2731 7e2e3de6daebd6e144365d58db19629cfb1b87d1 balance is 2731 6afbfe0ed8d701d269d8b2837253678d3452fb70 balance is 2732 Moving 2732 slots from 6afbfe0ed8d701d269d8b2837253678d3452fb70 to 2ee3d14c92321132e12cddb90dde8240ea6b8768 ############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################ Moving 2731 slots from 7e2e3de6daebd6e144365d58db19629cfb1b87d1 to 00d9cea97499097645eecd0bddf0f4679a6f1be1 ########################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### Moving 2731 slots from 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 to 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 ########################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### OK The result of scale out matthew@lightningdb:21> cluster distribution +-----------------------+--------+-------+ | HOST | MASTER | SLAVE | +-----------------------+--------+-------+ | fbg04(192.168.111.41) | 3 | 3 | | fbg05(192.168.111.44) | 3 | 3 | | TOTAL | 6 | 6 | +-----------------------+--------+-------+ If user types 'cfc 1', ${SR2_HOME} will be '~/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT'. \u21a9 'cluster-node-time' can be set with using 'config set' command. Its default time is 1200,000 msec. \u21a9","title":"Cluster"},{"location":"cli-cluster/#1-deploy-and-start","text":"(1) Cluster configure redis-{port}.conf is generated with using redis-{master/slave}.conf.template and redis.properties files. matthew@lightningdb:21> cluster configure Check status of hosts... OK sync conf +----------------+--------+ | HOST | STATUS | +----------------+--------+ | 192.168.111.44 | OK | | 192.168.111.41 | OK | +----------------+--------+ OK (2) Cluster start Backup logs of the previous master/slave nodes All log files of previous master/slave nodes in ${SR2_HOME}/logs/redis/ 1 will be moved to ${SR2_HOME}/logs/redis/backup/ . Generate directories to save data Save aof and rdb files of redis-server and RocksDB files in ${SR2_REDIS_DATA} Start redis-server process Start master and slave redis-server with ${SR2_HOME}/conf/redis/redis-{port}.conf file Log files will be saved in ${SR2_HOME}/logs/redis/ ec2-user@lightningdb:1> cluster start Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK Backup redis master log in each MASTER hosts... - 127.0.0.1 Generate redis configuration files for master hosts sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up Errors ErrorCode 11 Redis-server(master) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process} . $ cluster start ... ... [ErrorCode 11] Fail to start... Must be checked running MASTER redis processes! We estimate that redis process is . ErrorCode 12 Redis-server(slave) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process} . $ cluster start ... [ErrorCode 12] Fail to start... Must be checked running SLAVE redis processes! We estimate that redis process is . Conf file not exist Conf file is not found. To resove this error, use cluster configure and then cluster start . $ cluster start ... FileNotExistError: ${SR2_HOME}/conf/redis/redis-{port}.conf Max try error \u200b For detail information, please check the log files. $ cluster start ... ClusterRedisError: Fail to start redis: max try exceed Recommendation Command: 'monitor' (3) Cluster create After checking the information of the cluster, create a cluster of LightningDB. Case 1) When redis-server processes are running, create a cluster only. ec2-user@lightningdb:1>cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18100 - 127.0.0.1:18103 - 127.0.0.1:18104 - 127.0.0.1:18101 - 127.0.0.1:18102 Adding slots... - 127.0.0.1:18100, 3280 - 127.0.0.1:18103, 3276 - 127.0.0.1:18104, 3276 - 127.0.0.1:18101, 3276 - 127.0.0.1:18102, 3276 Check cluster state and asign slot... Ok create cluster complete. Case 2) When redis-server processes are not running, create a cluster after launching redis-server processes with cluster start command. ec2-user@lightningdb:4>cluster create Check status of hosts... OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18103 - 127.0.0.1:18104 - 127.0.0.1:18101 - 127.0.0.1:18102 - 127.0.0.1:18100 Adding slots... - 127.0.0.1:18103, 3280 - 127.0.0.1:18104, 3276 - 127.0.0.1:18101, 3276 - 127.0.0.1:18102, 3276 - 127.0.0.1:18100, 3276 Check cluster state and asign slot... Ok create cluster complete. Errors When redis servers are not running, this error(Errno 111) will occur. To solve this error, use cluster start command previously. ec2-user@lightningdb:1>cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y 127.0.0.1:18100 - [Errno 111] Connection refused (4) Cluster stop \u200bGracefully kill all redis-servers(master/slave) with SIGINT \u200b\u200b ec2-user@lightningdb:1> cluster stop Check status of hosts... OK Stopping master cluster of redis... cur: 5 / total: 5 cur: 0 / total: 5 Complete all redis process down Options Force to kill all redis-servers(master/slave) with SIGKILL --force (5) Cluster clean Remove conf files for redis-server and all data(aof, rdb, RocksDB) of LightningDB ec2-user@lightningdb:1> cluster clean Removing redis generated master configuration files - 127.0.0.1 Removing flash db directory, appendonly and dump.rdb files in master - 127.0.0.1 Removing master node configuration - 127.0.0.1 (6) Cluster restart\u200b Process cluster stop and then cluster start .\u200b\u200b Options Force to kill all redis-servers(master/slave) with SIGKILL and then start again. --force-stop Remove all data(aof, rdb, RocksDB, conf files) before starting again. --reset Process cluster create . This command should be called with --reset . --cluster (7) Update version You can update LightningDB by using the 'deploy' command. > c 1 // alias of 'cluster use 1' > deploy (Watch out) Cluster 1 is already deployed. Do you want to deploy again? (y/n) [n] y Select installer Select installer [ INSTALLER LIST ] (1) lightningdb.release.master.5a6a38.bin (2) lightningdb.trial.master.dbcb9e-dirty.bin (3) lightningdb.trial.master.dbcb9e.bin Please enter the number, file path or URL of the installer you want to use. you can also add a file in list by copy to '$FBPATH/releases/' 1 OK, lightningdb.release.master.5a6a38.bin Restore Do you want to restore conf? (y/n) y If the current settings will be reused, type 'y'. Check all settings finally Backup path of cluster: ${base-directory}/backup/cluster_${cluster-id}_bak_${time-stamp} Backup path of conf files: $FBAPTH/conf_backup/cluster_${cluster-id}_conf_bak_${time-stamp} +-----------------+---------------------------------------------------+ | NAME | VALUE | +-----------------+---------------------------------------------------+ | installer | lightningdb.release.master.5a6a38.bin | | nodes | nodeA | | | nodeB | | | nodeC | | | nodeD | | master ports | 18100 | | slave ports | 18150-18151 | | ssd count | 3 | | redis data path | ~/sata_ssd/ssd_ | | redis db path | ~/sata_ssd/ssd_ | | flash db path | ~/sata_ssd/ssd_ | +-----------------+---------------------------------------------------+ Do you want to proceed with the deploy accroding to the above information? (y/n) y Check status of hosts... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | nodeA | OK | | nodeB | OK | | nodeC | OK | | nodeD | OK | +-----------+--------+ Checking for cluster exist... +------+--------+ | HOST | STATUS | +------+--------+ Backup conf of cluster 1... OK, cluster_1_conf_bak_ Backup info of cluster 1 at nodeA... OK, cluster_1_bak_ Backup info of cluster 1 at nodeB... OK, cluster_1_bak_ Backup info of cluster 1 at nodeC... OK, cluster_1_bak_ Backup info of cluster 1 at nodeD... OK, cluster_1_bak_ Transfer installer and execute... - nodeA - nodeB - nodeC - nodeD Sync conf... Complete to deploy cluster 1. Cluster 1 selected. Restart > cluster restart After the restart, the new version will be applied.","title":"1. Deploy and Start"},{"location":"cli-cluster/#2-monitor","text":"(1) Cluster use Change the cluster to use LTCLI. Use cluster use or c commands. Examples ec2-user@lightningdb:2> cluster use 1 Cluster '1' selected. ec2-user@lightningdb:1> c 2 Cluster '2' selected. (2) Cluster ls List the deployed clusters. Examples ec2-user@lightningdb:2> cluster ls [1, 2] (3) Cluster rowcount Check the count of records that are stored in the cluster. Examples ec2-user@lightningdb:1> cluster rowcount 0 (4) Cluster tree User can check the status of master nodes and slaves and show which master and slave nodes are linked. Examples ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(connected) |__ 127.0.0.1:18952(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) (5) Cluster distribution The distribution of Master/Slave nodes are displayed with their hostnames(IP addresses). Examples matthew@lightningdb:21> cluster distribution +-----------------------+--------+-------+ | HOST | MASTER | SLAVE | +-----------------------+--------+-------+ | fbg04(192.168.111.41) | 4 | 2 | | fbg05(192.168.111.44) | 2 | 4 | | TOTAL | 6 | 6 | +-----------------------+--------+-------+","title":"2. Monitor"},{"location":"cli-cluster/#3-failover","text":"(1) Cluster failover_list failovered masters The node, that initialized as a slave by the cluster, becomes a master by failover now. no-slave masters Masters without slaves. You need to replicate the failbacked slaves to this node. no-slot masters Not yet added into the cluster or masters without slot failbacked slaves The nodes, that initialized as a master, becomes a slave by failback now. Examples matthew@lightningdb:21> cluster failover_list 1) failovered masters: 192.168.111.44:20152 192.168.111.44:20153 192.168.111.44:20156 2) no-slave masters: 192.168.111.44:20100 192.168.111.41:20101 3) no-slot masters: 192.168.111.44:20152 4) failbacked slaves: 192.168.111.41:20102 192.168.111.41:20105 (2) Cluster do_replicate You can add a node as the slave of a master nodes like cluster do_replicate {slave's IP}:{slave's Port} {master's IP}:{master's Port} . The IP addresses of masters or slaves can be replaced with their hostnames. Examples matthew@lightningdb:21> cluster tree 192.168.111.44:20101(connected) |__ 192.168.111.44:20151(connected) 192.168.111.44:20102(connected) |__ 192.168.111.44:20152(connected) 192.168.111.44:20150(connected) |__ 192.168.111.44:20100(connected) matthew@lightningdb:21> cluster do_replicate 192.168.111.44:20100 192.168.111.44:20101 Start to replicate... OK matthew@lightningdb:21> cluster tree 192.168.111.44:20101(connected) |__ 192.168.111.44:20100(connected) |__ 192.168.111.44:20151(connected) 192.168.111.44:20102(connected) |__ 192.168.111.44:20152(connected) 192.168.111.44:20150(connected) with hostnames, matthew@lightningdb:21> cluster do_replicate fbg05:20100 fbg05:20101 Start to replicate... OK (3) Cluster find_noaddr & cluster forget_noaddr You can find and remove 'noaddr' nodes in the current cluster. 'noaddr' nodes are no more valid nodes. Examples matthew@lightningdb:21> cluster find_noaddr +------------------------------------------+ | UUID | +------------------------------------------+ | 40675af73cd8fa1272a20fe9536ad19c398b5bca | +------------------------------------------+ matthew@lightningdb:21> cluster forget_noaddr \"27\" nodes have forgot \"40675af73cd8fa1272a20fe9536ad19c398b5bca\" matthew@lightningdb:21> cluster find_noaddr +------+ | UUID | +------+ (4) Cluster failover If a master node is killed, its slave node will automatically promote after 'cluster-node-time' 2 . User can promote the slave node immediately by using the 'cluster failover' command. Examples Step 1) Check the status of the cluster In this case, '127.0.0.1:18902' node is killed. ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(disconnected) <--- Killed! |__ 127.0.0.1:18952(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) Step 2) Do failover with 'cluster failover' command ec2-user@lightningdb:9> cluster failover failover 127.0.0.1:18952 for 127.0.0.1:18902 OK ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(disconnected) <--- Killed! 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) 127.0.0.1:18952(connected) <--- Promoted to master! (5) Cluster failback With 'cluster failback' command, the killed node is restarted and added to the cluster as the slave node. Examples ec2-user@lightningdb:9> cluster failback run 127.0.0.1:18902 ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) 127.0.0.1:18952(connected) <--- Promoted to master! |__ 127.0.0.1:18902(connected) <--- Failbacked. Now this node is slave! (6) Cluster reset_distribution To initialize the node distribution, use 'reset-distribution'. Examples matthew@lightningdb:21> cluster failover_list 1) failovered masters: 192.168.111.44:20152 2) no-slave masters: 3) no-slot masters: 4) failbacked slaves: 192.168.111.41:20101 matthew@lightningdb:21> cluster reset_distribution '192.168.111.41:20101' will be master... OK matthew@lightningdb:21> cluster failover_list 1) failovered masters: 2) no-slave masters: 3) no-slot masters: 4) failbacked slaves: (7) Cluster nodes_with_dir & Cluster masters_with_dir Cluster nodes_with_dir List up all nodes those are using the disk with HW fault. Cluster masters_with_dir List up all master those are using the disk with HW fault. Examples matthew@lightningdb:21> cluster nodes_with_dir 192.168.111.44 matthew03 +----------------+-------+------------------------------------------+ | HOST | PORT | PATH | +----------------+-------+------------------------------------------+ | 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20150 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20153 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20156 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | +----------------+-------+------------------------------------------+ matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03 +----------------+-------+------------------------------------------+ | HOST | PORT | PATH | +----------------+-------+------------------------------------------+ | 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | +----------------+-------+------------------------------------------+ with hostnames, matthew@lightningdb:21> cluster nodes_with_dir fbg05 matthew02 +-------+-------+------------------------------------------+ | HOST | PORT | PATH | +-------+-------+------------------------------------------+ | fbg05 | 20101 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew | | fbg05 | 20152 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew | +-------+-------+------------------------------------------+ matthew@lightningdb:21> cluster masters_with_dir fbg05 matthew02 +-------+-------+------------------------------------------+ | HOST | PORT | PATH | +-------+-------+------------------------------------------+ | fbg05 | 20101 | /sata_ssd/ssd_02/matthew02/nvkvs/matthew | +-------+-------+------------------------------------------+ (8) Cluster failover_with_dir Do failover and change the master using the disk to the slave Examples matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03 +----------------+-------+------------------------------------------+ | HOST | PORT | PATH | +----------------+-------+------------------------------------------+ | 192.168.111.44 | 20102 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | | 192.168.111.44 | 20105 | /sata_ssd/ssd_02/matthew03/nvkvs/matthew | +----------------+-------+------------------------------------------+ matthew@lightningdb:21> cluster failover_list 1) failovered masters: 2) no-slave masters: 3) no-slot masters: 4) failbacked slaves: matthew@lightningdb:21> cluster failover_with_dir 192.168.111.44 matthew03 '192.168.111.41:20152' will be master... OK '192.168.111.41:20155' will be master... OK matthew@lightningdb:21> cluster failover_list 1) failovered masters: 192.168.111.41:20152 192.168.111.41:20155 2) no-slave masters: 3) no-slot masters: 4) failbacked slaves: 192.168.111.44:20102 192.168.111.44:20105 matthew@lightningdb:21> cluster masters_with_dir 192.168.111.44 matthew03 +------+------+------+ | HOST | PORT | PATH | +------+------+------+ with hostnames, matthew@lightningdb:21> cluster masters_with_dir fbg05 matthew01 +-------+-------+------------------------------------------+ | HOST | PORT | PATH | +-------+-------+------------------------------------------+ | fbg05 | 20151 | /sata_ssd/ssd_02/matthew01/nvkvs/matthew | +-------+-------+------------------------------------------+ matthew@lightningdb:21> cluster tree 192.168.111.44:20102(connected) |__ 192.168.111.44:20152(connected) 192.168.111.44:20150(connected) |__ 192.168.111.44:20100(connected) 192.168.111.44:20151(connected) |__ 192.168.111.44:20101(connected) matthew@lightningdb:21> cluster failover_with_dir fbg05 matthew01 '192.168.111.44:20101' will be master... OK matthew@lightningdb:21> cluster tree 192.168.111.44:20101(connected) |__ 192.168.111.44:20151(connected) 192.168.111.44:20102(connected) |__ 192.168.111.44:20152(connected) 192.168.111.44:20150(connected) |__ 192.168.111.44:20100(connected) (9) Cluster force_failover When a server need to be shutdown by HW fault or checking, change all masters in the server to slaves by failover of those slaves. Examples matthew@lightningdb:21> cluster distribution +----------------+--------+-------+ | HOST | MASTER | SLAVE | +----------------+--------+-------+ | 192.168.111.44 | 7 | 7 | | 192.168.111.41 | 7 | 7 | | TOTAL | 14 | 14 | +----------------+--------+-------+ matthew@lightningdb:21> cluster force_failover 192.168.111.41 '192.168.111.44:20150' will be master... OK '192.168.111.44:20151' will be master... OK '192.168.111.44:20152' will be master... OK '192.168.111.44:20153' will be master... OK '192.168.111.44:20154' will be master... OK '192.168.111.44:20155' will be master... OK '192.168.111.44:20156' will be master... OK matthew@lightningdb:21> cluster distribution +----------------+--------+-------+ | HOST | MASTER | SLAVE | +----------------+--------+-------+ | 192.168.111.44 | 14 | 0 | | 192.168.111.41 | 0 | 14 | | TOTAL | 14 | 14 | +----------------+--------+-------+ matthew@lightningdb:21>","title":"3. Failover"},{"location":"cli-cluster/#4-scale-out","text":"(1) Cluster add_slave Warning Before using the add-slave command, ingestion to master nodes should be stopped. After replication and sync between master and slave are completed, ingestion will be available again. You can add a slave to a cluster that is configured only with the master without redundancy. Create cluster only with masters Procedure for configuring the test environment. If cluster with the only masters already exists, go to the add slave info . Proceed with the deploy. Enter 0 in replicas as shown below when deploy. ec2-user@lightningdb:2> deploy 3 Select installer [ INSTALLER LIST ] (1) lightningdb.dev.master.5a6a38.bin Please enter the number, file path or url of the installer you want to use. you can also add file in list by copy to '$FBPATH/releases/' https://flashbase.s3.ap-northeast-2.amazonaws.com/lightningdb.release.master.5a6a38.bin Downloading lightningdb.release.master.5a6a38.bin [==================================================] 100% OK, lightningdb.release.master.5a6a38.bin Please type host list separated by comma(,) [127.0.0.1] OK, ['127.0.0.1'] How many masters would you like to create on each host? [5] OK, 5 Please type ports separate with comma(,) and use hyphen(-) for range. [18300-18304] OK, ['18300-18304'] How many replicas would you like to create on each master? [0] OK, 0 How many ssd would you like to use? [3] OK, 3 Type prefix of db path [~/sata_ssd/ssd_] OK, ~/sata_ssd/ssd_ +--------------+---------------------------------+ | NAME | VALUE | +--------------+---------------------------------+ | installer | lightningdb.dev.master.5a6a38.bin | | hosts | 127.0.0.1 | | master ports | 18300-18304 | | ssd count | 3 | | db path | ~/sata_ssd/ssd_ | +--------------+---------------------------------+ Do you want to proceed with the deploy accroding to the above information? (y/n) y Check status of hosts... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Checking for cluster exist... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | CLEAN | +-----------+--------+ OK Transfer installer and execute... - 127.0.0.1 Sync conf... Complete to deploy cluster 3. Cluster '3' selected. When the deploy is complete, start and create the cluster. ec2-user@lightningdb:3> cluster start Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18300|18301|18302|18303|18304 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up ec2-user@lightningdb:3> cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18300 | MASTER | | 127.0.0.1 | 18301 | MASTER | | 127.0.0.1 | 18302 | MASTER | | 127.0.0.1 | 18303 | MASTER | | 127.0.0.1 | 18304 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18300 - 127.0.0.1:18303 - 127.0.0.1:18304 - 127.0.0.1:18301 - 127.0.0.1:18302 Adding slots... - 127.0.0.1:18300, 3280 - 127.0.0.1:18303, 3276 - 127.0.0.1:18304, 3276 - 127.0.0.1:18301, 3276 - 127.0.0.1:18302, 3276 Check cluster state and asign slot... Ok create cluster complete. ec2-user@lightningdb:3> Add slave info Open the conf file. ec2-user@lightningdb:3> conf cluster You can modify redis.properties by entering the command as shown above. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) ) ## Slave hosts and ports (optional) [[export]] SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) [[export]] SR2_REDIS_SLAVE_PORTS=( $(seq 18600 18609) ) ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"~/sata_ssd/ssd_\" export SR2_REDIS_DB_PATH=\"~/sata_ssd/ssd_\" export SR2_FLASH_DB_PATH=\"~/sata_ssd/ssd_\" ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Modify SR2_REDIS_SLAVE_HOSTS and SR2_REDIS_SLAVE_PORTS as shown below. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 18350 18354) ) ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"~/sata_ssd/ssd_\" export SR2_REDIS_DB_PATH=\"~/sata_ssd/ssd_\" export SR2_FLASH_DB_PATH=\"~/sata_ssd/ssd_\" ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Save the modification and exit. ec2-user@lightningdb:3> conf cluster Check status of hosts... OK sync conf OK Complete edit Execute cluster add-slave command ec2-user@lightningdb:3> cluster add-slave Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK clean redis conf, node conf, db data of master clean redis conf, node conf, db data of slave - 127.0.0.1 Backup redis slave log in each SLAVE hosts... - 127.0.0.1 create redis data directory in each SLAVE hosts - 127.0.0.1 sync conf OK Starting slave nodes : 127.0.0.1 : 18350|18351|18352|18353|18354 ... Wait until all redis process up... cur: 10 / total: 10 Complete all redis process up replicate [M] 127.0.0.1 18300 - [S] 127.0.0.1 18350 replicate [M] 127.0.0.1 18301 - [S] 127.0.0.1 18351 replicate [M] 127.0.0.1 18302 - [S] 127.0.0.1 18352 replicate [M] 127.0.0.1 18303 - [S] 127.0.0.1 18353 replicate [M] 127.0.0.1 18304 - [S] 127.0.0.1 18354 5 / 5 meet complete. Check configuration information ec2-user@lightningdb:3> cli cluster nodes 0549ec03031213f95121ceff6c9c13800aef848c 127.0.0.1:18303 master - 0 1574132251126 3 connected 3280-6555 1b09519d37ebb1c09095158b4f1c9f318ddfc747 127.0.0.1:18352 slave a6a8013cf0032f0f36baec3162122b3d993dd2c8 0 1574132251025 6 connected c7dc4815e24054104dff61cac6b13256a84ac4ae 127.0.0.1:18353 slave 0549ec03031213f95121ceff6c9c13800aef848c 0 1574132251126 3 connected 0ab96cb79165ddca7d7134f80aea844bd49ae2e1 127.0.0.1:18351 slave 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 0 1574132250724 4 connected 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 127.0.0.1:18301 master - 0 1574132250524 4 connected 9832-13107 e67005a46984445e559a1408dd0a4b24a8c92259 127.0.0.1:18304 master - 0 1574132251126 5 connected 6556-9831 a6a8013cf0032f0f36baec3162122b3d993dd2c8 127.0.0.1:18302 master - 0 1574132251126 2 connected 13108-16383 492cdf4b1dedab5fb94e7129da2a0e05f6c46c4f 127.0.0.1:18350 slave 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 0 1574132251126 6 connected f9f7fcee9009f25618e63d2771ee2529f814c131 127.0.0.1:18354 slave e67005a46984445e559a1408dd0a4b24a8c92259 0 1574132250724 5 connected 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 127.0.0.1:18300 myself,master - 0 1574132250000 1 connected 0-3279 (2) Scale out You can scale out the current cluster with a new server. Examples Check the current distribution of masters/slaves in each server. matthew@lightningdb:21> cluster distribution +-----------------------+--------+-------+ | HOST | MASTER | SLAVE | +-----------------------+--------+-------+ | fbg04(192.168.111.41) | 3 | 3 | | TOTAL | 3 | 3 | +-----------------------+--------+-------+ Scale out with the new server. matthew@lightningdb:21> cluster scaleout Please type hosts to scaleout separated by comma(,) [127.0.0.1] fbg05 OK, ['fbg05'] Check status of hosts... OK Checking cluster exist... - fbg04 - fbg05 OK +-------+-------+--------+ | HOST | PORT | TYPE | +-------+-------+--------+ | fbg04 | 20100 | MASTER | | fbg04 | 20101 | MASTER | | fbg04 | 20102 | MASTER | | fbg05 | 20100 | MASTER | | fbg05 | 20101 | MASTER | | fbg05 | 20102 | MASTER | | fbg04 | 20150 | SLAVE | | fbg04 | 20151 | SLAVE | | fbg04 | 20152 | SLAVE | | fbg05 | 20150 | SLAVE | | fbg05 | 20151 | SLAVE | | fbg05 | 20152 | SLAVE | +-------+-------+--------+ replicas: 1 Do you want to proceed with replicate according to the above information? (y/n) y Backup redis master log in each MASTER hosts... - fbg04 - fbg05 Backup redis slave log in each SLAVE hosts... - fbg04 - fbg05 create redis data directory in each MASTER - fbg04 - fbg05 create redis data directory in each SLAVE - fbg04 - fbg05 sync conf OK Starting master nodes : fbg04 : 20100|20101|20102 ... Starting master nodes : fbg05 : 20100|20101|20102 ... Starting slave nodes : fbg04 : 20150|20151|20152 ... Starting slave nodes : fbg05 : 20150|20151|20152 ... Wait until all redis process up... alive redis 12/12 Complete all redis process up. Replicate [M] fbg04:20100 - [S] fbg05:20150 Replicate [M] fbg04:20101 - [S] fbg05:20151 Replicate [M] fbg04:20102 - [S] fbg05:20152 Replicate [M] fbg05:20100 - [S] fbg04:20150 Replicate [M] fbg05:20101 - [S] fbg04:20151 Replicate [M] fbg05:20102 - [S] fbg04:20152 6 / 6 replicate completion. M: 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 192.168.111.41 20100 slots:5462-10922 (5461 slots) M: 2ee3d14c92321132e12cddb90dde8240ea6b8768 192.168.111.44 20101 slots: (0 slots) S: 0516e827969880b2322ae112e70e809b395c6d46 192.168.111.44 20151 slots: (0 slots) S: fd1466ec198951cbe7e172ae34bd5b3db66aa309 192.168.111.44 20150 slots: (0 slots) S: 28e4d04419c90c7b1bb4b067f9e15d4012d313b1 192.168.111.44 20152 slots: (0 slots) S: 56e1d3ab563b23bbf857a8f502d1c4b24ce74a3c 192.168.111.41 20151 slots: (0 slots) M: 00d9cea97499097645eecd0bddf0f4679a6f1be1 192.168.111.44 20100 slots: (0 slots) S: 9a21e798fc8d69a4b04910b9e4b87a69417d33fe 192.168.111.41 20150 slots: (0 slots) M: 6afbfe0ed8d701d269d8b2837253678d3452fb70 192.168.111.41 20102 slots:0-5461 (5462 slots) M: 7e2e3de6daebd6e144365d58db19629cfb1b87d1 192.168.111.41 20101 slots:10923-16383 (5461 slots) S: 1df738824e9d41622158a4102ba4aab355225747 192.168.111.41 20152 slots: (0 slots) M: 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 192.168.111.44 20102 slots: (0 slots) >>> Performing Cluster Check (using node 192.168.111.41:20100) [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered err_perc: 50.009156 err_perc: 50.018308 err_perc: 50.009156 >>> Rebalancing across 6 nodes. Total weight = 6 2ee3d14c92321132e12cddb90dde8240ea6b8768 balance is -2732 00d9cea97499097645eecd0bddf0f4679a6f1be1 balance is -2731 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 balance is -2731 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 balance is 2731 7e2e3de6daebd6e144365d58db19629cfb1b87d1 balance is 2731 6afbfe0ed8d701d269d8b2837253678d3452fb70 balance is 2732 Moving 2732 slots from 6afbfe0ed8d701d269d8b2837253678d3452fb70 to 2ee3d14c92321132e12cddb90dde8240ea6b8768 ############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################ Moving 2731 slots from 7e2e3de6daebd6e144365d58db19629cfb1b87d1 to 00d9cea97499097645eecd0bddf0f4679a6f1be1 ########################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### Moving 2731 slots from 47f7f65f36fbf1eb89e29ce1fd2facd8bb646f15 to 71334ecc4e6e1a707b0f7f6c85f0a75ece45f891 ########################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### OK The result of scale out matthew@lightningdb:21> cluster distribution +-----------------------+--------+-------+ | HOST | MASTER | SLAVE | +-----------------------+--------+-------+ | fbg04(192.168.111.41) | 3 | 3 | | fbg05(192.168.111.44) | 3 | 3 | | TOTAL | 6 | 6 | +-----------------------+--------+-------+ If user types 'cfc 1', ${SR2_HOME} will be '~/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT'. \u21a9 'cluster-node-time' can be set with using 'config set' command. Its default time is 1200,000 msec. \u21a9","title":"4. Scale out"},{"location":"cli-conf/","text":"With conf commands, you can configure out the cluster. You can open the template file with the below options( cluster / master / thriftserver ) After saving the template file, the configuration will be synchronized with all nodes in the current cluster. 1. cluster \u00b6 conf cluster will open redis.properties file of the current cluster. matthew@lightningdb:21> conf cluster Check status of hosts... OK Sync conf... OK Complete edit. Example of redis.properties #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"192.168.111.41\" \"192.168.111.44\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 20100 20102) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"192.168.111.41\" \"192.168.111.44\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 20150 20152) ) ## multiple data directory in redis db and flash db export SSD_COUNT=3 export SR2_REDIS_DATA=\"/sata_ssd/ssd_02/matthew\" export SR2_REDIS_DB_PATH=\"/sata_ssd/ssd_02/matthew\" export SR2_FLASH_DB_PATH=\"/sata_ssd/ssd_02/matthew\" 2. master \u00b6 conf master will open redis-master.conf.template file of the current cluster. This file will configure all redis-servers in the current cluster. matthew@lightningdb:21> conf master Check status of hosts... OK Sync conf... OK Complete edit. Example of redis-master.conf.template # In short... if you have slaves attached it is suggested that you set a lower # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # # maxmemory # maxmemory should be greater than 51mb in TSR2 maxmemory 300mb 3. thrifserver \u00b6 conf thrifserver will open thriftserver.properties file of the current thriftserver. matthew@lightningdb:21> conf thriftserver Check status of hosts... OK Sync conf... OK Complete edit. Example of thriftserver.properties #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14050} EXECUTERS=12 EXECUTER_CORES=32 HIVE_METASTORE_URL='' HIVE_HOST=${HIVE_HOST:-localhost} HIVE_PORT=${HIVE_PORT:-13000} COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \\ -o -name 'geospark*' -o -name 'gt-*' | tr '\\n' ':') ############################################################################### # Driver DRIVER_MEMORY=6g DRIVER_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Execute EXECUTOR_MEMORY=2g EXECUTOR_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Thrift Server logs EVENT_LOG_ENABLED=false EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling EVENT_LOG_SAVE_MIN=60 EXTRACTED_EVENT_LOG_SAVE_DAY=5 SPARK_LOG_SAVE_MIN=2000 ############## ######################## # Thrift Name cluster_id=$(echo $SR2_HOME | awk -F \"cluster_\" '{print $2}' | awk -F '/' '{print $1}') host=$(hostname) THRIFT_NAME=\"ThriftServer_${host}_${cluster_id}\" ######################## ############################################################################### # AGGREGATION PUSHDOWN AGG_PUSHDOWN=true ############################################################################### 4. sync \u00b6 With sync {IP address} or sync {hostname} command, you can load the configurations of all clusters from the remote server to localhost. matthew@lightningdb:21> sync fbg04 Localhost already has the information on the cluster 21. Do you want to overwrite? (y/n) [n] y Localhost already has the information on the cluster 20. Do you want to overwrite? (y/n) [n] n Importing cluster complete...","title":"Configuration"},{"location":"cli-conf/#1-cluster","text":"conf cluster will open redis.properties file of the current cluster. matthew@lightningdb:21> conf cluster Check status of hosts... OK Sync conf... OK Complete edit. Example of redis.properties #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"192.168.111.41\" \"192.168.111.44\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 20100 20102) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"192.168.111.41\" \"192.168.111.44\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 20150 20152) ) ## multiple data directory in redis db and flash db export SSD_COUNT=3 export SR2_REDIS_DATA=\"/sata_ssd/ssd_02/matthew\" export SR2_REDIS_DB_PATH=\"/sata_ssd/ssd_02/matthew\" export SR2_FLASH_DB_PATH=\"/sata_ssd/ssd_02/matthew\"","title":"1. cluster"},{"location":"cli-conf/#2-master","text":"conf master will open redis-master.conf.template file of the current cluster. This file will configure all redis-servers in the current cluster. matthew@lightningdb:21> conf master Check status of hosts... OK Sync conf... OK Complete edit. Example of redis-master.conf.template # In short... if you have slaves attached it is suggested that you set a lower # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # # maxmemory # maxmemory should be greater than 51mb in TSR2 maxmemory 300mb","title":"2. master"},{"location":"cli-conf/#3-thrifserver","text":"conf thrifserver will open thriftserver.properties file of the current thriftserver. matthew@lightningdb:21> conf thriftserver Check status of hosts... OK Sync conf... OK Complete edit. Example of thriftserver.properties #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14050} EXECUTERS=12 EXECUTER_CORES=32 HIVE_METASTORE_URL='' HIVE_HOST=${HIVE_HOST:-localhost} HIVE_PORT=${HIVE_PORT:-13000} COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \\ -o -name 'geospark*' -o -name 'gt-*' | tr '\\n' ':') ############################################################################### # Driver DRIVER_MEMORY=6g DRIVER_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Execute EXECUTOR_MEMORY=2g EXECUTOR_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Thrift Server logs EVENT_LOG_ENABLED=false EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling EVENT_LOG_SAVE_MIN=60 EXTRACTED_EVENT_LOG_SAVE_DAY=5 SPARK_LOG_SAVE_MIN=2000 ############## ######################## # Thrift Name cluster_id=$(echo $SR2_HOME | awk -F \"cluster_\" '{print $2}' | awk -F '/' '{print $1}') host=$(hostname) THRIFT_NAME=\"ThriftServer_${host}_${cluster_id}\" ######################## ############################################################################### # AGGREGATION PUSHDOWN AGG_PUSHDOWN=true ###############################################################################","title":"3. thrifserver"},{"location":"cli-conf/#4-sync","text":"With sync {IP address} or sync {hostname} command, you can load the configurations of all clusters from the remote server to localhost. matthew@lightningdb:21> sync fbg04 Localhost already has the information on the cluster 21. Do you want to overwrite? (y/n) [n] y Localhost already has the information on the cluster 20. Do you want to overwrite? (y/n) [n] n Importing cluster complete...","title":"4. sync"},{"location":"cli-thriftserver/","text":"If you want to see the list of Thrift Server commands, use the the thriftserver command without any option. NAME ltcli thriftserver SYNOPSIS ltcli thriftserver COMMAND COMMANDS COMMAND is one of the following: beeline Connect to thriftserver command line monitor Show thriftserver log restart Thriftserver restart start Start thriftserver stop Stop thriftserver 1. Thriftserver beeline \u00b6 Connect to the thrift server ec2-user@lightningdb:1> thriftserver beeline Connecting... Connecting to jdbc:hive2://localhost:13000 19/11/19 04:45:18 INFO jdbc.Utils: Supplied authorities: localhost:13000 19/11/19 04:45:18 INFO jdbc.Utils: Resolved authority: localhost:13000 19/11/19 04:45:18 INFO jdbc.HiveConnection: Will try to open client transport with JDBC Uri: jdbc:hive2://localhost:13000 Connected to: Spark SQL (version 2.3.1) Driver: Hive JDBC (version 1.2.1.spark2) Transaction isolation: TRANSACTION_REPEATABLE_READ Beeline version 1.2.1.spark2 by Apache Hive 0: jdbc:hive2://localhost:13000> show tables; +-----------+------------+--------------+--+ | database | tableName | isTemporary | +-----------+------------+--------------+--+ +-----------+------------+--------------+--+ No rows selected (0.55 seconds) Default value of db url to connect is jdbc:hive2://$HIVE_HOST:$HIVE_PORT You can modify $HIVE_HOST and $HIVE_PORT by the command conf thriftserver 2. Thriftserver monitor \u00b6 You can view the logs of the thrift server in real-time. ec2-user@lightningdb:1> thriftserver monitor Press Ctrl-C for exit. 19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ip-172-31-39-147.ap-northeast-2.compute.internal:35909 with 912.3 MB RAM, BlockManagerId(4, ip-172-31-39-147.ap-northeast-2.compute.internal, 35909, None) 19/11/19 04:43:33 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (172.31.39.147:53604) with ID 5 19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ... 3. Thriftserver restart \u00b6 Restart the thrift server. ec2-user@lightningdb:1> thriftserver restart no org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 to stop starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out 4. Start thriftserver \u00b6 Run the thrift server. ec2-user@lightningdb:1> thriftserver start starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out You can view the logs through the command monitor . 5. Stop thriftserver \u00b6 Shut down the thrift server. ec2-user@lightningdb:1> thriftserver stop stopping org.apache.spark.sql.hive.thriftserver.HiveThriftServer2","title":"Thriftserver"},{"location":"cli-thriftserver/#1-thriftserver-beeline","text":"Connect to the thrift server ec2-user@lightningdb:1> thriftserver beeline Connecting... Connecting to jdbc:hive2://localhost:13000 19/11/19 04:45:18 INFO jdbc.Utils: Supplied authorities: localhost:13000 19/11/19 04:45:18 INFO jdbc.Utils: Resolved authority: localhost:13000 19/11/19 04:45:18 INFO jdbc.HiveConnection: Will try to open client transport with JDBC Uri: jdbc:hive2://localhost:13000 Connected to: Spark SQL (version 2.3.1) Driver: Hive JDBC (version 1.2.1.spark2) Transaction isolation: TRANSACTION_REPEATABLE_READ Beeline version 1.2.1.spark2 by Apache Hive 0: jdbc:hive2://localhost:13000> show tables; +-----------+------------+--------------+--+ | database | tableName | isTemporary | +-----------+------------+--------------+--+ +-----------+------------+--------------+--+ No rows selected (0.55 seconds) Default value of db url to connect is jdbc:hive2://$HIVE_HOST:$HIVE_PORT You can modify $HIVE_HOST and $HIVE_PORT by the command conf thriftserver","title":"1. Thriftserver beeline"},{"location":"cli-thriftserver/#2-thriftserver-monitor","text":"You can view the logs of the thrift server in real-time. ec2-user@lightningdb:1> thriftserver monitor Press Ctrl-C for exit. 19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ip-172-31-39-147.ap-northeast-2.compute.internal:35909 with 912.3 MB RAM, BlockManagerId(4, ip-172-31-39-147.ap-northeast-2.compute.internal, 35909, None) 19/11/19 04:43:33 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (172.31.39.147:53604) with ID 5 19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ...","title":"2. Thriftserver monitor"},{"location":"cli-thriftserver/#3-thriftserver-restart","text":"Restart the thrift server. ec2-user@lightningdb:1> thriftserver restart no org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 to stop starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out","title":"3. Thriftserver restart"},{"location":"cli-thriftserver/#4-start-thriftserver","text":"Run the thrift server. ec2-user@lightningdb:1> thriftserver start starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out You can view the logs through the command monitor .","title":"4. Start thriftserver"},{"location":"cli-thriftserver/#5-stop-thriftserver","text":"Shut down the thrift server. ec2-user@lightningdb:1> thriftserver stop stopping org.apache.spark.sql.hive.thriftserver.HiveThriftServer2","title":"5. Stop thriftserver"},{"location":"cli-version/","text":"1. LTCLI \u00b6 You can check the version of LTCLI tool. $ ltcli --version ltcli version 1.1.5 2. Lightning DB \u00b6 You can check the version of Lightning DB that is deployed in each cluster. $ ltcli Cluster '21' selected. matthew@lightningdb:21> cluster version - build date : 20200820-173819 - branch: release.flashbase_v1.2.3 - last commit-id: 45814d - output binary: lightningdb.release.release.flashbase_v1.2.3.45814d.bin matthew@lightningdb:21>","title":"Version"},{"location":"cli-version/#1-ltcli","text":"You can check the version of LTCLI tool. $ ltcli --version ltcli version 1.1.5","title":"1. LTCLI"},{"location":"cli-version/#2-lightning-db","text":"You can check the version of Lightning DB that is deployed in each cluster. $ ltcli Cluster '21' selected. matthew@lightningdb:21> cluster version - build date : 20200820-173819 - branch: release.flashbase_v1.2.3 - last commit-id: 45814d - output binary: lightningdb.release.release.flashbase_v1.2.3.45814d.bin matthew@lightningdb:21>","title":"2. Lightning DB"},{"location":"command-line-interface/","text":"Note Command Line Interface(CLI) of LightningDB supports not only deploy and start command but also many commands to access and manipulate data in LightningDB. 1. Cluster Commands \u00b6 If you want to see the list of cluster commands, use the cluster command without any option. ec2-user@lightningdb:1> cluster NAME ltcli cluster - This is cluster command SYNOPSIS ltcli cluster COMMAND DESCRIPTION This is cluster command COMMANDS COMMAND is one of the following: add_slave Add slaves to cluster additionally clean Clean cluster configure create Create cluster ls Check cluster list rebalance Rebalance restart Restart redist cluster rowcount Query and show cluster row count start Start cluster stop Stop cluster use Change selected cluster (1) Cluster configure redis-{port}.conf is generated with using redis-{master/slave}.conf.template and redis.properties files. > cluster configure (2) Cluster start Backup logs of the previous master/slave nodes All log files of previous master/slave nodes in ${SR2_HOME}/logs/redis/ 1 will be moved to ${SR2_HOME}/logs/redis/backup/ . Generate directories to save data Save aof and rdb files of redis-server and RocksDB files in ${SR2_REDIS_DATA} Start redis-server process Start master and slave redis-server with ${SR2_HOME}/conf/redis/redis-{port}.conf file Log files will be saved in ${SR2_HOME}/logs/redis/ ec2-user@lightningdb:1> cluster start Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK Backup redis master log in each MASTER hosts... - 127.0.0.1 Generate redis configuration files for master hosts sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up Errors ErrorCode 11 Redis-server(master) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process} . $ cluster start ... ... [ErrorCode 11] Fail to start... Must be checked running MASTER redis processes! We estimate that redis process is . ErrorCode 12 Redis-server(slave) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process} . $ cluster start ... [ErrorCode 12] Fail to start... Must be checked running SLAVE redis processes! We estimate that redis process is . Conf file not exist Conf file is not found. To resove this error, use cluster configure and then cluster start . $ cluster start ... FileNotExistError: ${SR2_HOME}/conf/redis/redis-{port}.conf Max try error \u200b For detail information, please check the log files. $ cluster start ... ClusterRedisError: Fail to start redis: max try exceed Recommendation Command: 'monitor' (3) Cluster create After checking the information of the cluster, create a cluster of LightningDB. Case 1) When redis-server processes are running, create a cluster only. ec2-user@lightningdb:1>cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18100 - 127.0.0.1:18103 - 127.0.0.1:18104 - 127.0.0.1:18101 - 127.0.0.1:18102 Adding slots... - 127.0.0.1:18100, 3280 - 127.0.0.1:18103, 3276 - 127.0.0.1:18104, 3276 - 127.0.0.1:18101, 3276 - 127.0.0.1:18102, 3276 Check cluster state and asign slot... Ok create cluster complete. Case 2) When redis-server processes are not running, create a cluster after launching redis-server processes with cluster start command. ec2-user@lightningdb:4>cluster create Check status of hosts... OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18103 - 127.0.0.1:18104 - 127.0.0.1:18101 - 127.0.0.1:18102 - 127.0.0.1:18100 Adding slots... - 127.0.0.1:18103, 3280 - 127.0.0.1:18104, 3276 - 127.0.0.1:18101, 3276 - 127.0.0.1:18102, 3276 - 127.0.0.1:18100, 3276 Check cluster state and asign slot... Ok create cluster complete. Errors When redis servers are not running, this error(Errno 111) will occur. To solve this error, use cluster start command previously. ec2-user@lightningdb:1>cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y 127.0.0.1:18100 - [Errno 111] Connection refused (4) Cluster stop \u200bGracefully kill all redis-servers(master/slave) with SIGINT \u200b\u200b ec2-user@lightningdb:1> cluster stop Check status of hosts... OK Stopping master cluster of redis... cur: 5 / total: 5 cur: 0 / total: 5 Complete all redis process down Options Force to kill all redis-servers(master/slave) with SIGKILL --force (5) Cluster clean Remove conf files for redis-server and all data(aof, rdb, RocksDB) of LightningDB ec2-user@lightningdb:1> cluster clean Removing redis generated master configuration files - 127.0.0.1 Removing flash db directory, appendonly and dump.rdb files in master - 127.0.0.1 Removing master node configuration - 127.0.0.1 (6) Cluster restart\u200b Process cluster stop and then cluster start .\u200b\u200b Options Force to kill all redis-servers(master/slave) with SIGKILL and then start again. --force-stop Remove all data(aof, rdb, RocksDB, conf files) before starting again. --reset Process cluster create . This command should be called with --reset . --cluster (7) Cluster ls List the deployed clusters. ec2-user@lightningdb:2> cluster ls [1, 2] (8) Cluster use Change the cluster to use LTCLI. Use cluster use or c commands. ec2-user@lightningdb:2> cluster use 1 Cluster '1' selected. ec2-user@lightningdb:1> c 2 Cluster '2' selected. (9) Cluster add_slave Warning Before using the add-slave command, ingestion to master nodes should be stopped. After replication and sync between master and slave are completed, ingestion will be available again. You can add a slave to a cluster that is configured only with the master without redundancy. Create cluster only with masters Procedure for configuring the test environment. If cluster with the only masters already exists, go to the add slave info . Proceed with the deploy. Enter 0 in replicas as shown below when deploy. ec2-user@lightningdb:2> deploy 3 Select installer [ INSTALLER LIST ] (1) lightningdb.dev.master.5a6a38.bin Please enter the number, file path or url of the installer you want to use. you can also add file in list by copy to '$FBPATH/releases/' https://flashbase.s3.ap-northeast-2.amazonaws.com/lightningdb.release.master.5a6a38.bin Downloading lightningdb.release.master.5a6a38.bin [==================================================] 100% OK, lightningdb.release.master.5a6a38.bin Please type host list separated by comma(,) [127.0.0.1] OK, ['127.0.0.1'] How many masters would you like to create on each host? [5] OK, 5 Please type ports separate with comma(,) and use hyphen(-) for range. [18300-18304] OK, ['18300-18304'] How many replicas would you like to create on each master? [0] OK, 0 How many ssd would you like to use? [3] OK, 3 Type prefix of db path [~/sata_ssd/ssd_] OK, ~/sata_ssd/ssd_ +--------------+---------------------------------+ | NAME | VALUE | +--------------+---------------------------------+ | installer | lightningdb.dev.master.5a6a38.bin | | hosts | 127.0.0.1 | | master ports | 18300-18304 | | ssd count | 3 | | db path | ~/sata_ssd/ssd_ | +--------------+---------------------------------+ Do you want to proceed with the deploy accroding to the above information? (y/n) y Check status of hosts... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Checking for cluster exist... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | CLEAN | +-----------+--------+ OK Transfer installer and execute... - 127.0.0.1 Sync conf... Complete to deploy cluster 3. Cluster '3' selected. When the deploy is complete, start and create the cluster. ec2-user@lightningdb:3> cluster start Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18300|18301|18302|18303|18304 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up ec2-user@lightningdb:3> cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18300 | MASTER | | 127.0.0.1 | 18301 | MASTER | | 127.0.0.1 | 18302 | MASTER | | 127.0.0.1 | 18303 | MASTER | | 127.0.0.1 | 18304 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18300 - 127.0.0.1:18303 - 127.0.0.1:18304 - 127.0.0.1:18301 - 127.0.0.1:18302 Adding slots... - 127.0.0.1:18300, 3280 - 127.0.0.1:18303, 3276 - 127.0.0.1:18304, 3276 - 127.0.0.1:18301, 3276 - 127.0.0.1:18302, 3276 Check cluster state and asign slot... Ok create cluster complete. ec2-user@lightningdb:3> Add slave info Open the conf file. ec2-user@lightningdb:3> conf cluster You can modify redis.properties by entering the command as shown above. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) ) ## Slave hosts and ports (optional) [[export]] SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) [[export]] SR2_REDIS_SLAVE_PORTS=( $(seq 18600 18609) ) ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"~/sata_ssd/ssd_\" export SR2_REDIS_DB_PATH=\"~/sata_ssd/ssd_\" export SR2_FLASH_DB_PATH=\"~/sata_ssd/ssd_\" ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Modify SR2_REDIS_SLAVE_HOSTS and SR2_REDIS_SLAVE_PORTS as shown below. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 18350 18354) ) ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"~/sata_ssd/ssd_\" export SR2_REDIS_DB_PATH=\"~/sata_ssd/ssd_\" export SR2_FLASH_DB_PATH=\"~/sata_ssd/ssd_\" ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Save the modification and exit. ec2-user@lightningdb:3> conf cluster Check status of hosts... OK sync conf OK Complete edit Execute cluster add-slave command ec2-user@lightningdb:3> cluster add-slave Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK clean redis conf, node conf, db data of master clean redis conf, node conf, db data of slave - 127.0.0.1 Backup redis slave log in each SLAVE hosts... - 127.0.0.1 create redis data directory in each SLAVE hosts - 127.0.0.1 sync conf OK Starting slave nodes : 127.0.0.1 : 18350|18351|18352|18353|18354 ... Wait until all redis process up... cur: 10 / total: 10 Complete all redis process up replicate [M] 127.0.0.1 18300 - [S] 127.0.0.1 18350 replicate [M] 127.0.0.1 18301 - [S] 127.0.0.1 18351 replicate [M] 127.0.0.1 18302 - [S] 127.0.0.1 18352 replicate [M] 127.0.0.1 18303 - [S] 127.0.0.1 18353 replicate [M] 127.0.0.1 18304 - [S] 127.0.0.1 18354 5 / 5 meet complete. Check configuration information ec2-user@lightningdb:3> cli cluster nodes 0549ec03031213f95121ceff6c9c13800aef848c 127.0.0.1:18303 master - 0 1574132251126 3 connected 3280-6555 1b09519d37ebb1c09095158b4f1c9f318ddfc747 127.0.0.1:18352 slave a6a8013cf0032f0f36baec3162122b3d993dd2c8 0 1574132251025 6 connected c7dc4815e24054104dff61cac6b13256a84ac4ae 127.0.0.1:18353 slave 0549ec03031213f95121ceff6c9c13800aef848c 0 1574132251126 3 connected 0ab96cb79165ddca7d7134f80aea844bd49ae2e1 127.0.0.1:18351 slave 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 0 1574132250724 4 connected 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 127.0.0.1:18301 master - 0 1574132250524 4 connected 9832-13107 e67005a46984445e559a1408dd0a4b24a8c92259 127.0.0.1:18304 master - 0 1574132251126 5 connected 6556-9831 a6a8013cf0032f0f36baec3162122b3d993dd2c8 127.0.0.1:18302 master - 0 1574132251126 2 connected 13108-16383 492cdf4b1dedab5fb94e7129da2a0e05f6c46c4f 127.0.0.1:18350 slave 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 0 1574132251126 6 connected f9f7fcee9009f25618e63d2771ee2529f814c131 127.0.0.1:18354 slave e67005a46984445e559a1408dd0a4b24a8c92259 0 1574132250724 5 connected 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 127.0.0.1:18300 myself,master - 0 1574132250000 1 connected 0-3279 (10) Cluster rowcount Check the count of records that are stored in the cluster. ec2-user@lightningdb:1> cluster rowcount 0 (11) Check the status of cluster With the following commands, you can check the status of the cluster. Send PING ec2-user@lightningdb:1> cli ping --all alive redis 10/10 If a node does not reply, the fail node will be displayed like below. +-------+-----------------+--------+ | TYPE | ADDR | RESULT | +-------+-----------------+--------+ | Slave | 127.0.0.1:18352 | FAIL | +-------+-----------------+--------+ alive redis 9/10 Check the status of the cluster ec2-user@lightningdb:1> cli cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:5 cluster_size:5 cluster_current_epoch:4 cluster_my_epoch:2 cluster_stats_messages_ping_sent:12 cluster_stats_messages_pong_sent:14 cluster_stats_messages_sent:26 cluster_stats_messages_ping_received:10 cluster_stats_messages_pong_received:12 cluster_stats_messages_meet_received:4 cluster_stats_messages_received:26 Check the list of the nodes those are organizing the cluster. ec2-user@lightningdb:1> cli cluster nodes 559af5e90c3f2c92f19c927c29166c268d938e8f 127.0.0.1:18104 master - 0 1574127926000 4 connected 6556-9831 174e2a62722273fb83814c2f12e2769086c3d185 127.0.0.1:18101 myself,master - 0 1574127925000 3 connected 9832-13107 35ab4d3f7f487c5332d7943dbf4b20d5840053ea 127.0.0.1:18100 master - 0 1574127926000 1 connected 0-3279 f39ed05ace18e97f74c745636ea1d171ac1d456f 127.0.0.1:18103 master - 0 1574127927172 0 connected 3280-6555 9fd612b86a9ce1b647ba9170b8f4a8bfa5c875fc 127.0.0.1:18102 master - 0 1574127926171 2 connected 13108-16383 (12) Cluster tree User can check the status of master nodes and slaves and show which master and slave nodes are linked. ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(connected) |__ 127.0.0.1:18952(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) (13) Cluster failover If a master node is killed, its slave node will automatically promote after 'cluster-node-time' 2 . User can promote the slave node immediately by using the 'cluster failover' command. Step 1) Check the status of the cluster In this case, '127.0.0.1:18902' node is killed. ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(disconnected) <--- Killed! |__ 127.0.0.1:18952(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) Step 2) Do failover with 'cluster failover' command ec2-user@lightningdb:9> cluster failover failover 127.0.0.1:18952 for 127.0.0.1:18902 OK ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(disconnected) <--- Killed! 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) 127.0.0.1:18952(connected) <--- Promoted to master! (14) Cluster failback With 'cluster failback' command, the killed node is restarted and added to the cluster as the slave node. ec2-user@lightningdb:9> cluster failback run 127.0.0.1:18902 ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) 127.0.0.1:18952(connected) <--- Promoted to master! |__ 127.0.0.1:18902(connected) <--- Failbacked. Now this node is slave! 2. Thrift Server Commands \u00b6 If you want to see the list of Thrift Server commands, use the the thriftserver command without any option. NAME ltcli thriftserver SYNOPSIS ltcli thriftserver COMMAND COMMANDS COMMAND is one of the following: beeline Connect to thriftserver command line monitor Show thriftserver log restart Thriftserver restart start Start thriftserver stop Stop thriftserver (1) Thriftserver beeline Connect to the thrift server ec2-user@lightningdb:1> thriftserver beeline Connecting... Connecting to jdbc:hive2://localhost:13000 19/11/19 04:45:18 INFO jdbc.Utils: Supplied authorities: localhost:13000 19/11/19 04:45:18 INFO jdbc.Utils: Resolved authority: localhost:13000 19/11/19 04:45:18 INFO jdbc.HiveConnection: Will try to open client transport with JDBC Uri: jdbc:hive2://localhost:13000 Connected to: Spark SQL (version 2.3.1) Driver: Hive JDBC (version 1.2.1.spark2) Transaction isolation: TRANSACTION_REPEATABLE_READ Beeline version 1.2.1.spark2 by Apache Hive 0: jdbc:hive2://localhost:13000> show tables; +-----------+------------+--------------+--+ | database | tableName | isTemporary | +-----------+------------+--------------+--+ +-----------+------------+--------------+--+ No rows selected (0.55 seconds) Default value of db url to connect is jdbc:hive2://$HIVE_HOST:$HIVE_PORT You can modify $HIVE_HOST and $HIVE_PORT by the command conf thriftserver (2) Thriftserver monitor You can view the logs of the thrift server in real-time. ec2-user@lightningdb:1> thriftserver monitor Press Ctrl-C for exit. 19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ip-172-31-39-147.ap-northeast-2.compute.internal:35909 with 912.3 MB RAM, BlockManagerId(4, ip-172-31-39-147.ap-northeast-2.compute.internal, 35909, None) 19/11/19 04:43:33 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (172.31.39.147:53604) with ID 5 19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ... (3) Thriftserver restart Restart the thrift server. ec2-user@lightningdb:1> thriftserver restart no org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 to stop starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out (4) Start thriftserver Run the thrift server. ec2-user@lightningdb:1> thriftserver start starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out You can view the logs through the command monitor . (5) Stop thriftserver Shut down the thrift server. ec2-user@lightningdb:1> thriftserver stop stopping org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 (6) Conf thriftserver ec2-user@lightningdb:1> conf thriftserver #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14050} EXECUTERS=12 EXECUTER_CORES=32 HIVE_METASTORE_URL='' HIVE_HOST=${HIVE_HOST:-localhost} HIVE_PORT=${HIVE_PORT:-13000} COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \\ -o -name 'geospark*' -o -name 'gt-*' | tr '\\n' ':') ############################################################################### # Driver DRIVER_MEMORY=6g DRIVER_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Execute EXECUTOR_MEMORY=2g EXECUTOR_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Thrift Server logs EVENT_LOG_ENABLED=false EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling EVENT_LOG_SAVE_MIN=60 EXTRACTED_EVENT_LOG_SAVE_DAY=5 SPARK_LOG_SAVE_MIN=2000 ############## If user types 'cfc 1', ${SR2_HOME} will be '~/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT'. \u21a9 'cluster-node-time' can be set with using 'config set' command. Its default time is 1200,000 msec. \u21a9","title":"Command line interface"},{"location":"command-line-interface/#1-cluster-commands","text":"If you want to see the list of cluster commands, use the cluster command without any option. ec2-user@lightningdb:1> cluster NAME ltcli cluster - This is cluster command SYNOPSIS ltcli cluster COMMAND DESCRIPTION This is cluster command COMMANDS COMMAND is one of the following: add_slave Add slaves to cluster additionally clean Clean cluster configure create Create cluster ls Check cluster list rebalance Rebalance restart Restart redist cluster rowcount Query and show cluster row count start Start cluster stop Stop cluster use Change selected cluster (1) Cluster configure redis-{port}.conf is generated with using redis-{master/slave}.conf.template and redis.properties files. > cluster configure (2) Cluster start Backup logs of the previous master/slave nodes All log files of previous master/slave nodes in ${SR2_HOME}/logs/redis/ 1 will be moved to ${SR2_HOME}/logs/redis/backup/ . Generate directories to save data Save aof and rdb files of redis-server and RocksDB files in ${SR2_REDIS_DATA} Start redis-server process Start master and slave redis-server with ${SR2_HOME}/conf/redis/redis-{port}.conf file Log files will be saved in ${SR2_HOME}/logs/redis/ ec2-user@lightningdb:1> cluster start Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK Backup redis master log in each MASTER hosts... - 127.0.0.1 Generate redis configuration files for master hosts sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up Errors ErrorCode 11 Redis-server(master) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process} . $ cluster start ... ... [ErrorCode 11] Fail to start... Must be checked running MASTER redis processes! We estimate that redis process is . ErrorCode 12 Redis-server(slave) process with the same port is already running. To resolve this error, use cluster stop or kill {pid of the process} . $ cluster start ... [ErrorCode 12] Fail to start... Must be checked running SLAVE redis processes! We estimate that redis process is . Conf file not exist Conf file is not found. To resove this error, use cluster configure and then cluster start . $ cluster start ... FileNotExistError: ${SR2_HOME}/conf/redis/redis-{port}.conf Max try error \u200b For detail information, please check the log files. $ cluster start ... ClusterRedisError: Fail to start redis: max try exceed Recommendation Command: 'monitor' (3) Cluster create After checking the information of the cluster, create a cluster of LightningDB. Case 1) When redis-server processes are running, create a cluster only. ec2-user@lightningdb:1>cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18100 - 127.0.0.1:18103 - 127.0.0.1:18104 - 127.0.0.1:18101 - 127.0.0.1:18102 Adding slots... - 127.0.0.1:18100, 3280 - 127.0.0.1:18103, 3276 - 127.0.0.1:18104, 3276 - 127.0.0.1:18101, 3276 - 127.0.0.1:18102, 3276 Check cluster state and asign slot... Ok create cluster complete. Case 2) When redis-server processes are not running, create a cluster after launching redis-server processes with cluster start command. ec2-user@lightningdb:4>cluster create Check status of hosts... OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18103 - 127.0.0.1:18104 - 127.0.0.1:18101 - 127.0.0.1:18102 - 127.0.0.1:18100 Adding slots... - 127.0.0.1:18103, 3280 - 127.0.0.1:18104, 3276 - 127.0.0.1:18101, 3276 - 127.0.0.1:18102, 3276 - 127.0.0.1:18100, 3276 Check cluster state and asign slot... Ok create cluster complete. Errors When redis servers are not running, this error(Errno 111) will occur. To solve this error, use cluster start command previously. ec2-user@lightningdb:1>cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y 127.0.0.1:18100 - [Errno 111] Connection refused (4) Cluster stop \u200bGracefully kill all redis-servers(master/slave) with SIGINT \u200b\u200b ec2-user@lightningdb:1> cluster stop Check status of hosts... OK Stopping master cluster of redis... cur: 5 / total: 5 cur: 0 / total: 5 Complete all redis process down Options Force to kill all redis-servers(master/slave) with SIGKILL --force (5) Cluster clean Remove conf files for redis-server and all data(aof, rdb, RocksDB) of LightningDB ec2-user@lightningdb:1> cluster clean Removing redis generated master configuration files - 127.0.0.1 Removing flash db directory, appendonly and dump.rdb files in master - 127.0.0.1 Removing master node configuration - 127.0.0.1 (6) Cluster restart\u200b Process cluster stop and then cluster start .\u200b\u200b Options Force to kill all redis-servers(master/slave) with SIGKILL and then start again. --force-stop Remove all data(aof, rdb, RocksDB, conf files) before starting again. --reset Process cluster create . This command should be called with --reset . --cluster (7) Cluster ls List the deployed clusters. ec2-user@lightningdb:2> cluster ls [1, 2] (8) Cluster use Change the cluster to use LTCLI. Use cluster use or c commands. ec2-user@lightningdb:2> cluster use 1 Cluster '1' selected. ec2-user@lightningdb:1> c 2 Cluster '2' selected. (9) Cluster add_slave Warning Before using the add-slave command, ingestion to master nodes should be stopped. After replication and sync between master and slave are completed, ingestion will be available again. You can add a slave to a cluster that is configured only with the master without redundancy. Create cluster only with masters Procedure for configuring the test environment. If cluster with the only masters already exists, go to the add slave info . Proceed with the deploy. Enter 0 in replicas as shown below when deploy. ec2-user@lightningdb:2> deploy 3 Select installer [ INSTALLER LIST ] (1) lightningdb.dev.master.5a6a38.bin Please enter the number, file path or url of the installer you want to use. you can also add file in list by copy to '$FBPATH/releases/' https://flashbase.s3.ap-northeast-2.amazonaws.com/lightningdb.release.master.5a6a38.bin Downloading lightningdb.release.master.5a6a38.bin [==================================================] 100% OK, lightningdb.release.master.5a6a38.bin Please type host list separated by comma(,) [127.0.0.1] OK, ['127.0.0.1'] How many masters would you like to create on each host? [5] OK, 5 Please type ports separate with comma(,) and use hyphen(-) for range. [18300-18304] OK, ['18300-18304'] How many replicas would you like to create on each master? [0] OK, 0 How many ssd would you like to use? [3] OK, 3 Type prefix of db path [~/sata_ssd/ssd_] OK, ~/sata_ssd/ssd_ +--------------+---------------------------------+ | NAME | VALUE | +--------------+---------------------------------+ | installer | lightningdb.dev.master.5a6a38.bin | | hosts | 127.0.0.1 | | master ports | 18300-18304 | | ssd count | 3 | | db path | ~/sata_ssd/ssd_ | +--------------+---------------------------------+ Do you want to proceed with the deploy accroding to the above information? (y/n) y Check status of hosts... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Checking for cluster exist... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | CLEAN | +-----------+--------+ OK Transfer installer and execute... - 127.0.0.1 Sync conf... Complete to deploy cluster 3. Cluster '3' selected. When the deploy is complete, start and create the cluster. ec2-user@lightningdb:3> cluster start Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18300|18301|18302|18303|18304 ... Wait until all redis process up... cur: 5 / total: 5 Complete all redis process up ec2-user@lightningdb:3> cluster create Check status of hosts... OK >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18300 | MASTER | | 127.0.0.1 | 18301 | MASTER | | 127.0.0.1 | 18302 | MASTER | | 127.0.0.1 | 18303 | MASTER | | 127.0.0.1 | 18304 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18300 - 127.0.0.1:18303 - 127.0.0.1:18304 - 127.0.0.1:18301 - 127.0.0.1:18302 Adding slots... - 127.0.0.1:18300, 3280 - 127.0.0.1:18303, 3276 - 127.0.0.1:18304, 3276 - 127.0.0.1:18301, 3276 - 127.0.0.1:18302, 3276 Check cluster state and asign slot... Ok create cluster complete. ec2-user@lightningdb:3> Add slave info Open the conf file. ec2-user@lightningdb:3> conf cluster You can modify redis.properties by entering the command as shown above. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) ) ## Slave hosts and ports (optional) [[export]] SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) [[export]] SR2_REDIS_SLAVE_PORTS=( $(seq 18600 18609) ) ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"~/sata_ssd/ssd_\" export SR2_REDIS_DB_PATH=\"~/sata_ssd/ssd_\" export SR2_FLASH_DB_PATH=\"~/sata_ssd/ssd_\" ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Modify SR2_REDIS_SLAVE_HOSTS and SR2_REDIS_SLAVE_PORTS as shown below. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18300 18304) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 18350 18354) ) ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"~/sata_ssd/ssd_\" export SR2_REDIS_DB_PATH=\"~/sata_ssd/ssd_\" export SR2_FLASH_DB_PATH=\"~/sata_ssd/ssd_\" ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Save the modification and exit. ec2-user@lightningdb:3> conf cluster Check status of hosts... OK sync conf OK Complete edit Execute cluster add-slave command ec2-user@lightningdb:3> cluster add-slave Check status of hosts... OK Check cluster exist... - 127.0.0.1 OK clean redis conf, node conf, db data of master clean redis conf, node conf, db data of slave - 127.0.0.1 Backup redis slave log in each SLAVE hosts... - 127.0.0.1 create redis data directory in each SLAVE hosts - 127.0.0.1 sync conf OK Starting slave nodes : 127.0.0.1 : 18350|18351|18352|18353|18354 ... Wait until all redis process up... cur: 10 / total: 10 Complete all redis process up replicate [M] 127.0.0.1 18300 - [S] 127.0.0.1 18350 replicate [M] 127.0.0.1 18301 - [S] 127.0.0.1 18351 replicate [M] 127.0.0.1 18302 - [S] 127.0.0.1 18352 replicate [M] 127.0.0.1 18303 - [S] 127.0.0.1 18353 replicate [M] 127.0.0.1 18304 - [S] 127.0.0.1 18354 5 / 5 meet complete. Check configuration information ec2-user@lightningdb:3> cli cluster nodes 0549ec03031213f95121ceff6c9c13800aef848c 127.0.0.1:18303 master - 0 1574132251126 3 connected 3280-6555 1b09519d37ebb1c09095158b4f1c9f318ddfc747 127.0.0.1:18352 slave a6a8013cf0032f0f36baec3162122b3d993dd2c8 0 1574132251025 6 connected c7dc4815e24054104dff61cac6b13256a84ac4ae 127.0.0.1:18353 slave 0549ec03031213f95121ceff6c9c13800aef848c 0 1574132251126 3 connected 0ab96cb79165ddca7d7134f80aea844bd49ae2e1 127.0.0.1:18351 slave 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 0 1574132250724 4 connected 7e97f8a8799e1e28feee630b47319e6f5e1cfaa7 127.0.0.1:18301 master - 0 1574132250524 4 connected 9832-13107 e67005a46984445e559a1408dd0a4b24a8c92259 127.0.0.1:18304 master - 0 1574132251126 5 connected 6556-9831 a6a8013cf0032f0f36baec3162122b3d993dd2c8 127.0.0.1:18302 master - 0 1574132251126 2 connected 13108-16383 492cdf4b1dedab5fb94e7129da2a0e05f6c46c4f 127.0.0.1:18350 slave 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 0 1574132251126 6 connected f9f7fcee9009f25618e63d2771ee2529f814c131 127.0.0.1:18354 slave e67005a46984445e559a1408dd0a4b24a8c92259 0 1574132250724 5 connected 83b7ef98b80a05a4ee795ae6b399c8cde54ad04e 127.0.0.1:18300 myself,master - 0 1574132250000 1 connected 0-3279 (10) Cluster rowcount Check the count of records that are stored in the cluster. ec2-user@lightningdb:1> cluster rowcount 0 (11) Check the status of cluster With the following commands, you can check the status of the cluster. Send PING ec2-user@lightningdb:1> cli ping --all alive redis 10/10 If a node does not reply, the fail node will be displayed like below. +-------+-----------------+--------+ | TYPE | ADDR | RESULT | +-------+-----------------+--------+ | Slave | 127.0.0.1:18352 | FAIL | +-------+-----------------+--------+ alive redis 9/10 Check the status of the cluster ec2-user@lightningdb:1> cli cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:5 cluster_size:5 cluster_current_epoch:4 cluster_my_epoch:2 cluster_stats_messages_ping_sent:12 cluster_stats_messages_pong_sent:14 cluster_stats_messages_sent:26 cluster_stats_messages_ping_received:10 cluster_stats_messages_pong_received:12 cluster_stats_messages_meet_received:4 cluster_stats_messages_received:26 Check the list of the nodes those are organizing the cluster. ec2-user@lightningdb:1> cli cluster nodes 559af5e90c3f2c92f19c927c29166c268d938e8f 127.0.0.1:18104 master - 0 1574127926000 4 connected 6556-9831 174e2a62722273fb83814c2f12e2769086c3d185 127.0.0.1:18101 myself,master - 0 1574127925000 3 connected 9832-13107 35ab4d3f7f487c5332d7943dbf4b20d5840053ea 127.0.0.1:18100 master - 0 1574127926000 1 connected 0-3279 f39ed05ace18e97f74c745636ea1d171ac1d456f 127.0.0.1:18103 master - 0 1574127927172 0 connected 3280-6555 9fd612b86a9ce1b647ba9170b8f4a8bfa5c875fc 127.0.0.1:18102 master - 0 1574127926171 2 connected 13108-16383 (12) Cluster tree User can check the status of master nodes and slaves and show which master and slave nodes are linked. ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(connected) |__ 127.0.0.1:18952(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) (13) Cluster failover If a master node is killed, its slave node will automatically promote after 'cluster-node-time' 2 . User can promote the slave node immediately by using the 'cluster failover' command. Step 1) Check the status of the cluster In this case, '127.0.0.1:18902' node is killed. ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(disconnected) <--- Killed! |__ 127.0.0.1:18952(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) Step 2) Do failover with 'cluster failover' command ec2-user@lightningdb:9> cluster failover failover 127.0.0.1:18952 for 127.0.0.1:18902 OK ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18902(disconnected) <--- Killed! 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) 127.0.0.1:18952(connected) <--- Promoted to master! (14) Cluster failback With 'cluster failback' command, the killed node is restarted and added to the cluster as the slave node. ec2-user@lightningdb:9> cluster failback run 127.0.0.1:18902 ec2-user@lightningdb:9> cluster tree 127.0.0.1:18900(connected) |__ 127.0.0.1:18950(connected) 127.0.0.1:18901(connected) |__ 127.0.0.1:18951(connected) 127.0.0.1:18903(connected) |__ 127.0.0.1:18953(connected) 127.0.0.1:18904(connected) |__ 127.0.0.1:18954(connected) 127.0.0.1:18905(connected) |__ 127.0.0.1:18955(connected) 127.0.0.1:18906(connected) |__ 127.0.0.1:18956(connected) 127.0.0.1:18952(connected) <--- Promoted to master! |__ 127.0.0.1:18902(connected) <--- Failbacked. Now this node is slave!","title":"1. Cluster Commands"},{"location":"command-line-interface/#2-thrift-server-commands","text":"If you want to see the list of Thrift Server commands, use the the thriftserver command without any option. NAME ltcli thriftserver SYNOPSIS ltcli thriftserver COMMAND COMMANDS COMMAND is one of the following: beeline Connect to thriftserver command line monitor Show thriftserver log restart Thriftserver restart start Start thriftserver stop Stop thriftserver (1) Thriftserver beeline Connect to the thrift server ec2-user@lightningdb:1> thriftserver beeline Connecting... Connecting to jdbc:hive2://localhost:13000 19/11/19 04:45:18 INFO jdbc.Utils: Supplied authorities: localhost:13000 19/11/19 04:45:18 INFO jdbc.Utils: Resolved authority: localhost:13000 19/11/19 04:45:18 INFO jdbc.HiveConnection: Will try to open client transport with JDBC Uri: jdbc:hive2://localhost:13000 Connected to: Spark SQL (version 2.3.1) Driver: Hive JDBC (version 1.2.1.spark2) Transaction isolation: TRANSACTION_REPEATABLE_READ Beeline version 1.2.1.spark2 by Apache Hive 0: jdbc:hive2://localhost:13000> show tables; +-----------+------------+--------------+--+ | database | tableName | isTemporary | +-----------+------------+--------------+--+ +-----------+------------+--------------+--+ No rows selected (0.55 seconds) Default value of db url to connect is jdbc:hive2://$HIVE_HOST:$HIVE_PORT You can modify $HIVE_HOST and $HIVE_PORT by the command conf thriftserver (2) Thriftserver monitor You can view the logs of the thrift server in real-time. ec2-user@lightningdb:1> thriftserver monitor Press Ctrl-C for exit. 19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ip-172-31-39-147.ap-northeast-2.compute.internal:35909 with 912.3 MB RAM, BlockManagerId(4, ip-172-31-39-147.ap-northeast-2.compute.internal, 35909, None) 19/11/19 04:43:33 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (172.31.39.147:53604) with ID 5 19/11/19 04:43:33 INFO storage.BlockManagerMasterEndpoint: Registering block manager ... (3) Thriftserver restart Restart the thrift server. ec2-user@lightningdb:1> thriftserver restart no org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 to stop starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out (4) Start thriftserver Run the thrift server. ec2-user@lightningdb:1> thriftserver start starting org.apache.spark.sql.hive.thriftserver.HiveThriftServer2, logging to /opt/spark/logs/spark-ec2-user-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1-ip-172-31-39-147.ap-northeast-2.compute.internal.out You can view the logs through the command monitor . (5) Stop thriftserver Shut down the thrift server. ec2-user@lightningdb:1> thriftserver stop stopping org.apache.spark.sql.hive.thriftserver.HiveThriftServer2 (6) Conf thriftserver ec2-user@lightningdb:1> conf thriftserver #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14050} EXECUTERS=12 EXECUTER_CORES=32 HIVE_METASTORE_URL='' HIVE_HOST=${HIVE_HOST:-localhost} HIVE_PORT=${HIVE_PORT:-13000} COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \\ -o -name 'geospark*' -o -name 'gt-*' | tr '\\n' ':') ############################################################################### # Driver DRIVER_MEMORY=6g DRIVER_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Execute EXECUTOR_MEMORY=2g EXECUTOR_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Thrift Server logs EVENT_LOG_ENABLED=false EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling EVENT_LOG_SAVE_MIN=60 EXTRACTED_EVENT_LOG_SAVE_DAY=5 SPARK_LOG_SAVE_MIN=2000 ############## If user types 'cfc 1', ${SR2_HOME} will be '~/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT'. \u21a9 'cluster-node-time' can be set with using 'config set' command. Its default time is 1200,000 msec. \u21a9","title":"2. Thrift Server Commands"},{"location":"data-ingestion-and-querying/","text":"1. Create a table \u00b6 You can create tables in the metastore using standard DDL. CREATE TABLE `pcell` ( `event_time` STRING, `m_10_under` DOUBLE, `m_10_19` DOUBLE, `m_20_29` DOUBLE, `m_30_39` DOUBLE, `m_40_49` DOUBLE, `m_50_59` DOUBLE, `m_60_over` DOUBLE, `longitude` DOUBLE, `lattitude` DOUBLE, `geohash` STRING) USING r2 OPTIONS ( `table` '100', `host` 'localhost', `port` '18100', `partitions` 'event_time geohash', `mode` 'nvkvs', `at_least_one_partition_enabled` 'no', `rowstore` 'true' ) There are various options used to describe storage properties. table : Positive Integer. The identification of the table. Redis identifies a table with this value. host/port : The host/port of representative Redis Node. Using this host and port, Spark builds a Redis cluster client that retrieves and inserts data to the Redis cluster. partitions : The partitions columns. The partition column values are used to distribute data in Redis cluster. That is, the partition column values are concatenated with a colon(:) and used as KEY of Redis which is the criteria distributing data. For more information, you can refer to Keys distribution model page in Redis. Tip Deciding a partition column properly is a crucial factor for performance because it is related to sharding data to multiple Redis nodes. It is important to try to distribute KEYs to 16384 slots of REDIS evenly and to try to map at least 200 rows for each KEY. mode : 'nvkvs' for this field at_least_one_partition_enabled : yes or no. If yes, the queries which do not have partition filter are not permitted. rowstore : true or false. If yes, all columns are merged and stored in RockDB as one column. It enhances ingesting performance. However, the query performance can be dropped because there is overhead for parsing columns in the Redis layer when retrieving data from RockDB. Tip The metastore of LightningDB only contains metadata/schema of tables. The actual data are stored in Lightning DB which consists of Redis & RockDB (Abbreviation: r2), and the table information is stored in metastore. 2. Data Ingestion \u00b6 (1) Insert data with DataFrameWriter You can use DataFrameWriter to write data into LightningDB. Now, LightingDB only supports \" Append mode \". // Create source DataFrame. val df = spark.sqlContext.read.format(\"csv\") .option(\"header\", \"false\") .option(\"inferSchema\", \"true\") .load(\"/nvme/data_01/csv/\") // \"pcell\" is a name of table which has R2 options. df.write.insertInto(\"pcell\") (2) Insert data with INSERT INTO SELECT query -- pcell : table with R2 option -- csv_table : table with csv option -- udf : UDF can be used to transform original data. INSERT INTO pcell SELECT *, udf(event_time) FROM csv_table 3. Querying \u00b6 You can query data with SparkSQL interfaces such as DataFrames and Spark ThriftServer. Please refer to Spark SQL guide page .","title":"Data Ingestion and querying"},{"location":"data-ingestion-and-querying/#1-create-a-table","text":"You can create tables in the metastore using standard DDL. CREATE TABLE `pcell` ( `event_time` STRING, `m_10_under` DOUBLE, `m_10_19` DOUBLE, `m_20_29` DOUBLE, `m_30_39` DOUBLE, `m_40_49` DOUBLE, `m_50_59` DOUBLE, `m_60_over` DOUBLE, `longitude` DOUBLE, `lattitude` DOUBLE, `geohash` STRING) USING r2 OPTIONS ( `table` '100', `host` 'localhost', `port` '18100', `partitions` 'event_time geohash', `mode` 'nvkvs', `at_least_one_partition_enabled` 'no', `rowstore` 'true' ) There are various options used to describe storage properties. table : Positive Integer. The identification of the table. Redis identifies a table with this value. host/port : The host/port of representative Redis Node. Using this host and port, Spark builds a Redis cluster client that retrieves and inserts data to the Redis cluster. partitions : The partitions columns. The partition column values are used to distribute data in Redis cluster. That is, the partition column values are concatenated with a colon(:) and used as KEY of Redis which is the criteria distributing data. For more information, you can refer to Keys distribution model page in Redis. Tip Deciding a partition column properly is a crucial factor for performance because it is related to sharding data to multiple Redis nodes. It is important to try to distribute KEYs to 16384 slots of REDIS evenly and to try to map at least 200 rows for each KEY. mode : 'nvkvs' for this field at_least_one_partition_enabled : yes or no. If yes, the queries which do not have partition filter are not permitted. rowstore : true or false. If yes, all columns are merged and stored in RockDB as one column. It enhances ingesting performance. However, the query performance can be dropped because there is overhead for parsing columns in the Redis layer when retrieving data from RockDB. Tip The metastore of LightningDB only contains metadata/schema of tables. The actual data are stored in Lightning DB which consists of Redis & RockDB (Abbreviation: r2), and the table information is stored in metastore.","title":"1. Create a table"},{"location":"data-ingestion-and-querying/#2-data-ingestion","text":"(1) Insert data with DataFrameWriter You can use DataFrameWriter to write data into LightningDB. Now, LightingDB only supports \" Append mode \". // Create source DataFrame. val df = spark.sqlContext.read.format(\"csv\") .option(\"header\", \"false\") .option(\"inferSchema\", \"true\") .load(\"/nvme/data_01/csv/\") // \"pcell\" is a name of table which has R2 options. df.write.insertInto(\"pcell\") (2) Insert data with INSERT INTO SELECT query -- pcell : table with R2 option -- csv_table : table with csv option -- udf : UDF can be used to transform original data. INSERT INTO pcell SELECT *, udf(event_time) FROM csv_table","title":"2. Data Ingestion"},{"location":"data-ingestion-and-querying/#3-querying","text":"You can query data with SparkSQL interfaces such as DataFrames and Spark ThriftServer. Please refer to Spark SQL guide page .","title":"3. Querying"},{"location":"deploy-lightningdb-on-k8s/","text":"Deploy LightningDB and API Server \u00b6 1. Kubernetes manifest github \u00b6 $ git clone https://github.com/mnms/metavision2_k8s_manifests 2. Install LightningDB v1 \u00b6 Install ltdb-operator $ cd ltdb-operator $ kubectl create -f ltdb-operator-controller-manager.yaml deploy LightningDB with CRD $ cd ltdb $ kubectl create -f ltdb.yaml -n {namespace} \ucc38\uc870 size / storageClass / maxMemory \ub4f1 \ud1b5\ud574 \ud074\ub7ec\uc2a4\ud130 \uc124\uc815 \uc870\uc815 AOF/RDB \ub294 \ub514\ud3f4\ud2b8 \uc635\uc158 on fs \ub0b4 redis/rocksdb mount \uc704\uce58 /tmp-redis_rocksdb_integration_0: redis's aof/rdb, conf /tmp-redis_rocksdb_integration_1: rocksdb's sst/wal rdb \uba85\uc2dc\uc801 \uba85\ub839: bash flashbase cli-all bgsave on-premise \uacbd\uc6b0, \uc544\ub798 URL \ucc98\ub7fc system \ud29c\ub2dd\uc774 \ub4e4\uc5b4\uac10. k8s \uc6b4\uc601 \uc0c1\ud669\uc5d0\uc11c\ub3c4 \ud29c\ub2dd \uc5ec\ubd80 \ud655\uc778 \ud544\uc694 https://docs.lightningdb.io/get-started-with-scratch/ \uc0ad\uc81c STS \ub294 PVC \uac00 \uc790\ub3d9\uc73c\ub85c \uc0ad\uc81c\ub418\uc9c0 \uc54a\uc73c\ubbc0\ub85c \uc644\uc804\ud788 \uc0ad\uc81c\ud558\ub824\uba74 \ud574\ub2f9 PVC \uc218\ub3d9 \uc0ad\uc81c \ud544\uc694 $ kubectl delete -f ltdb.yaml or $ kubectl delete ltdb ltdb -n metavision $ for i in {0..39}; do kubectl delete pvc \"ltdb-data-logging-ltdb-$i\" -n metavision; done $ for i in {0..39}; do kubectl delete pvc \"ltdb-data-ltdb-$i\" -n metavision; done 3. Install LightningDB v2 / Thunderquery \u00b6 $ cd ltdbv2 $ kubectl create -f ltdbv2-all-in-one.yaml $ kubectl -n metavision exec -it ltdbv2-0 -- redis-cli --cluster-yes --cluster create `kubectl -n metavision get po -o wide -l app=ltdbv2 | grep ltdbv2 | awk '{print $6\":6379\"}' | tr '\\n' ' '` \ucc38\uc870 Operator \uc5c6\uc774 \uc218\ub3d9 \uc124\uce58 namespace \uac00 metavision \uc73c\ub85c \uba85\uc2dc\uc801\uc73c\ub85c \ub418\uc5b4 \uc788\uc74c. namespace \ub97c \ubc14\uafb8\uace0 \uc2f6\uc73c\uba74 \ud574\ub2f9 \ubd80\ubd84 \uc218\uc815 \ucd5c\uc2e0 \ubc84\uc804\uc740 ann \uc744 \uc0ac\uc6a9\ud55c\ub2e4 \ud558\ub354\ub77c\ub3c4 maxmemory-policy \ub97c noeviction \uc73c\ub85c \ubc14\uafc0 \ud544\uc694 \uc5c6\uc774 eviction rule \uc815\uc0c1 \uc791\ub3d9\ud558\uba74\uc11c \uc0ac\uc6a9\ud558\uba74 \ub428 AOF/RDB \ub294 \ub514\ud3f4\ud2b8 \uc635\uc158 on fs \ub0b4 redis/rocksdb mount \uc704\uce58 /tmp/redis: redis's aof/rdb, conf, rocksdb's sst/wal rdb \uba85\uc2dc\uc801 \uba85\ub839: flashbase cli-all bgrewriteaof \uc0ad\uc81c STS \ub294 PVC \uac00 \uc790\ub3d9\uc73c\ub85c \uc0ad\uc81c\ub418\uc9c0 \uc54a\uc73c\ubbc0\ub85c \uc644\uc804\ud788 \uc0ad\uc81c\ud558\ub824\uba74 \ud574\ub2f9 PVC \uc218\ub3d9 \uc0ad\uc81c \ud544\uc694 $ kubectl delete -f ltdbv2-all-in-one.yaml $ for i in {0..99}; do kubectl delete pvc \"ltdbv2-pvc-ltdbv2-$i\" -n metavision; done 4. Install ltdb-http v1 \u00b6 $ cd ltdb-http $ ls -alh total 32 drwxr-xr-x 6 1111462 1437349805 192B 8 31 17:53 . drwxr-xr-x 11 1111462 1437349805 352B 8 31 17:54 .. -rw-r--r-- 1 1111462 1437349805 1.3K 8 31 17:53 ltdb-http-configmap.yaml -rw-r--r-- 1 1111462 1437349805 1.5K 8 31 17:53 ltdb-http.yaml -rw-r--r-- 1 1111462 1437349805 259B 8 31 17:53 pvc.yaml -rw-r--r-- 1 1111462 1437349805 342B 8 31 17:53 spark-rbac.yaml ltdb-http.yaml\ub9cc \uac00\uc7a5 \ub098\uc911\uc5d0 apply kubectl -n metavision apply -f ltdb-http-configmap.yaml kubectl -n metavision apply -f spark-rbac.yaml kubectl -n metavision apply -f pvc.yaml kubectl -n metavision apply -f ltdb-http.yaml // \uac00\uc7a5 \ub098\uc911\uc5d0... 5. Install ltdb-http v2 \u00b6 \ucc38\uc870: https://www.notion.so/ltdb/LTDB-HTTP-V2-0-K8S-b47ad5741e9a43668c7bee4d40e1616e?pvs=4 \uc544\uc774\uc2a4\ubc84\uadf8 \uc0ac\uc6a9 \uc548\ud560 \uc2dc, ltdb-postgresql.yaml \uc81c\uc678 \uac00\ub2a5 namespace \uac00 metavision \uc73c\ub85c \uba85\uc2dc\uc801\uc73c\ub85c \ub418\uc5b4 \uc788\uc74c. namespace \ub97c \ubc14\uafb8\uace0 \uc2f6\uc73c\uba74 \ud574\ub2f9 \ubd80\ubd84 \uc218\uc815 s3 \uae30\ub2a5\uc744 \uc0ac\uc6a9\ud558\uace0 \uc2f6\uc73c\uba74, app/s3-secret.yaml \uc124\uce58 \ud544\uc694 (\ubd84\ub2f9 9\uce35 TB\uc5d0\ub294 \uc774\ubbf8 \uc124\uce58 \ub428) s3 region \uc740 \uae30\ubcf8\uac12\uc73c\ub85c ap-northeast-2 \uc124\uc815 \ub428 $ cd ltdbv2-http $ kubectl create -f ltdb-http-configmap.yaml $ kubectl create -f ltdb-http.yaml $ kubectl create -f ltdbv2-http-vs.yaml \uc0ad\uc81c $ kubectl delete -f ltdbv2-http-vs.yaml $ kubectl delete -f ltdb-http.yaml $ kubectl delete -f ltdb-http-configmap.yaml 6. Install ltdb-http v2 CXL-CMS \u00b6 $ cd hynix $ kubectl create -f ltdbv2.yaml $ kubectl -n hynix exec -it ltdbv2-0 -- redis-cli --cluster-yes --cluster create `kubectl -n hynix get po -o wide -l app=ltdbv2 | grep ltdbv2 | awk '{print $6\":6379\"}' | tr '\\n' ' '` $ kubectl create -f thunderquery.yaml $ kubectl create -f ltdbv2-http.yaml $ kubectl create -f istio-ingress.yaml \ucc38\uc870 cxl-cms \uc5d0\uc11c \ucd94\uac00 \ub41c config \uac12\uc740 \uc544\ub798 \uac19\uc73c\uba70, cxl-cms dev \uc6a9 CSI \ub4dc\ub77c\uc774\ubc84\uac00 \uc5c6\uae30 \ub54c\ubb38\uc5d0 STS \uc5d0\uc11c \uc218\ub3d9\uc73c\ub85c pod \uac1c\uc218 \ubc0f Node Affinity \uc124\uc815 \ud558\uba74\uc11c \ud14c\uc2a4\ud2b8 \ud574\uc57c \ud568 dax-device-name /dev/xxx, cms-device-name /dev/yyy \ud615\ud0dc\ub85c \uc7a1\uc544\uc9d0 $ vi ltdbv2.yaml ... cms-enabled no dax-device-name no cms-device-name no \uc0ad\uc81c STS \ub294 PVC \uac00 \uc790\ub3d9\uc73c\ub85c \uc0ad\uc81c\ub418\uc9c0 \uc54a\uc73c\ubbc0\ub85c \uc644\uc804\ud788 \uc0ad\uc81c\ud558\ub824\uba74 \ud574\ub2f9 PVC \uc218\ub3d9 \uc0ad\uc81c \ud544\uc694 $ cd hynix $ kubectl delete -f ltdbv2-http.yaml $ kubectl delete -f thunderquery.yaml $ kubectl delete -f ltdbv2.yaml for i in {0..9}; do kubectl delete pvc \"ltdbv2-pvc-ltdbv2-$i\" -n hynix; done $ kubectl delete -f istio-ingress.yaml","title":"Deploy LightningDB"},{"location":"deploy-lightningdb-on-k8s/#deploy-lightningdb-and-api-server","text":"","title":"Deploy LightningDB and API Server"},{"location":"deploy-lightningdb-on-k8s/#1-kubernetes-manifest-github","text":"$ git clone https://github.com/mnms/metavision2_k8s_manifests","title":"1. Kubernetes manifest github"},{"location":"deploy-lightningdb-on-k8s/#2-install-lightningdb-v1","text":"Install ltdb-operator $ cd ltdb-operator $ kubectl create -f ltdb-operator-controller-manager.yaml deploy LightningDB with CRD $ cd ltdb $ kubectl create -f ltdb.yaml -n {namespace} \ucc38\uc870 size / storageClass / maxMemory \ub4f1 \ud1b5\ud574 \ud074\ub7ec\uc2a4\ud130 \uc124\uc815 \uc870\uc815 AOF/RDB \ub294 \ub514\ud3f4\ud2b8 \uc635\uc158 on fs \ub0b4 redis/rocksdb mount \uc704\uce58 /tmp-redis_rocksdb_integration_0: redis's aof/rdb, conf /tmp-redis_rocksdb_integration_1: rocksdb's sst/wal rdb \uba85\uc2dc\uc801 \uba85\ub839: bash flashbase cli-all bgsave on-premise \uacbd\uc6b0, \uc544\ub798 URL \ucc98\ub7fc system \ud29c\ub2dd\uc774 \ub4e4\uc5b4\uac10. k8s \uc6b4\uc601 \uc0c1\ud669\uc5d0\uc11c\ub3c4 \ud29c\ub2dd \uc5ec\ubd80 \ud655\uc778 \ud544\uc694 https://docs.lightningdb.io/get-started-with-scratch/ \uc0ad\uc81c STS \ub294 PVC \uac00 \uc790\ub3d9\uc73c\ub85c \uc0ad\uc81c\ub418\uc9c0 \uc54a\uc73c\ubbc0\ub85c \uc644\uc804\ud788 \uc0ad\uc81c\ud558\ub824\uba74 \ud574\ub2f9 PVC \uc218\ub3d9 \uc0ad\uc81c \ud544\uc694 $ kubectl delete -f ltdb.yaml or $ kubectl delete ltdb ltdb -n metavision $ for i in {0..39}; do kubectl delete pvc \"ltdb-data-logging-ltdb-$i\" -n metavision; done $ for i in {0..39}; do kubectl delete pvc \"ltdb-data-ltdb-$i\" -n metavision; done","title":"2. Install LightningDB v1"},{"location":"deploy-lightningdb-on-k8s/#3-install-lightningdb-v2-thunderquery","text":"$ cd ltdbv2 $ kubectl create -f ltdbv2-all-in-one.yaml $ kubectl -n metavision exec -it ltdbv2-0 -- redis-cli --cluster-yes --cluster create `kubectl -n metavision get po -o wide -l app=ltdbv2 | grep ltdbv2 | awk '{print $6\":6379\"}' | tr '\\n' ' '` \ucc38\uc870 Operator \uc5c6\uc774 \uc218\ub3d9 \uc124\uce58 namespace \uac00 metavision \uc73c\ub85c \uba85\uc2dc\uc801\uc73c\ub85c \ub418\uc5b4 \uc788\uc74c. namespace \ub97c \ubc14\uafb8\uace0 \uc2f6\uc73c\uba74 \ud574\ub2f9 \ubd80\ubd84 \uc218\uc815 \ucd5c\uc2e0 \ubc84\uc804\uc740 ann \uc744 \uc0ac\uc6a9\ud55c\ub2e4 \ud558\ub354\ub77c\ub3c4 maxmemory-policy \ub97c noeviction \uc73c\ub85c \ubc14\uafc0 \ud544\uc694 \uc5c6\uc774 eviction rule \uc815\uc0c1 \uc791\ub3d9\ud558\uba74\uc11c \uc0ac\uc6a9\ud558\uba74 \ub428 AOF/RDB \ub294 \ub514\ud3f4\ud2b8 \uc635\uc158 on fs \ub0b4 redis/rocksdb mount \uc704\uce58 /tmp/redis: redis's aof/rdb, conf, rocksdb's sst/wal rdb \uba85\uc2dc\uc801 \uba85\ub839: flashbase cli-all bgrewriteaof \uc0ad\uc81c STS \ub294 PVC \uac00 \uc790\ub3d9\uc73c\ub85c \uc0ad\uc81c\ub418\uc9c0 \uc54a\uc73c\ubbc0\ub85c \uc644\uc804\ud788 \uc0ad\uc81c\ud558\ub824\uba74 \ud574\ub2f9 PVC \uc218\ub3d9 \uc0ad\uc81c \ud544\uc694 $ kubectl delete -f ltdbv2-all-in-one.yaml $ for i in {0..99}; do kubectl delete pvc \"ltdbv2-pvc-ltdbv2-$i\" -n metavision; done","title":"3. Install LightningDB v2 / Thunderquery"},{"location":"deploy-lightningdb-on-k8s/#4-install-ltdb-http-v1","text":"$ cd ltdb-http $ ls -alh total 32 drwxr-xr-x 6 1111462 1437349805 192B 8 31 17:53 . drwxr-xr-x 11 1111462 1437349805 352B 8 31 17:54 .. -rw-r--r-- 1 1111462 1437349805 1.3K 8 31 17:53 ltdb-http-configmap.yaml -rw-r--r-- 1 1111462 1437349805 1.5K 8 31 17:53 ltdb-http.yaml -rw-r--r-- 1 1111462 1437349805 259B 8 31 17:53 pvc.yaml -rw-r--r-- 1 1111462 1437349805 342B 8 31 17:53 spark-rbac.yaml ltdb-http.yaml\ub9cc \uac00\uc7a5 \ub098\uc911\uc5d0 apply kubectl -n metavision apply -f ltdb-http-configmap.yaml kubectl -n metavision apply -f spark-rbac.yaml kubectl -n metavision apply -f pvc.yaml kubectl -n metavision apply -f ltdb-http.yaml // \uac00\uc7a5 \ub098\uc911\uc5d0...","title":"4. Install ltdb-http v1"},{"location":"deploy-lightningdb-on-k8s/#5-install-ltdb-http-v2","text":"\ucc38\uc870: https://www.notion.so/ltdb/LTDB-HTTP-V2-0-K8S-b47ad5741e9a43668c7bee4d40e1616e?pvs=4 \uc544\uc774\uc2a4\ubc84\uadf8 \uc0ac\uc6a9 \uc548\ud560 \uc2dc, ltdb-postgresql.yaml \uc81c\uc678 \uac00\ub2a5 namespace \uac00 metavision \uc73c\ub85c \uba85\uc2dc\uc801\uc73c\ub85c \ub418\uc5b4 \uc788\uc74c. namespace \ub97c \ubc14\uafb8\uace0 \uc2f6\uc73c\uba74 \ud574\ub2f9 \ubd80\ubd84 \uc218\uc815 s3 \uae30\ub2a5\uc744 \uc0ac\uc6a9\ud558\uace0 \uc2f6\uc73c\uba74, app/s3-secret.yaml \uc124\uce58 \ud544\uc694 (\ubd84\ub2f9 9\uce35 TB\uc5d0\ub294 \uc774\ubbf8 \uc124\uce58 \ub428) s3 region \uc740 \uae30\ubcf8\uac12\uc73c\ub85c ap-northeast-2 \uc124\uc815 \ub428 $ cd ltdbv2-http $ kubectl create -f ltdb-http-configmap.yaml $ kubectl create -f ltdb-http.yaml $ kubectl create -f ltdbv2-http-vs.yaml \uc0ad\uc81c $ kubectl delete -f ltdbv2-http-vs.yaml $ kubectl delete -f ltdb-http.yaml $ kubectl delete -f ltdb-http-configmap.yaml","title":"5. Install ltdb-http v2"},{"location":"deploy-lightningdb-on-k8s/#6-install-ltdb-http-v2-cxl-cms","text":"$ cd hynix $ kubectl create -f ltdbv2.yaml $ kubectl -n hynix exec -it ltdbv2-0 -- redis-cli --cluster-yes --cluster create `kubectl -n hynix get po -o wide -l app=ltdbv2 | grep ltdbv2 | awk '{print $6\":6379\"}' | tr '\\n' ' '` $ kubectl create -f thunderquery.yaml $ kubectl create -f ltdbv2-http.yaml $ kubectl create -f istio-ingress.yaml \ucc38\uc870 cxl-cms \uc5d0\uc11c \ucd94\uac00 \ub41c config \uac12\uc740 \uc544\ub798 \uac19\uc73c\uba70, cxl-cms dev \uc6a9 CSI \ub4dc\ub77c\uc774\ubc84\uac00 \uc5c6\uae30 \ub54c\ubb38\uc5d0 STS \uc5d0\uc11c \uc218\ub3d9\uc73c\ub85c pod \uac1c\uc218 \ubc0f Node Affinity \uc124\uc815 \ud558\uba74\uc11c \ud14c\uc2a4\ud2b8 \ud574\uc57c \ud568 dax-device-name /dev/xxx, cms-device-name /dev/yyy \ud615\ud0dc\ub85c \uc7a1\uc544\uc9d0 $ vi ltdbv2.yaml ... cms-enabled no dax-device-name no cms-device-name no \uc0ad\uc81c STS \ub294 PVC \uac00 \uc790\ub3d9\uc73c\ub85c \uc0ad\uc81c\ub418\uc9c0 \uc54a\uc73c\ubbc0\ub85c \uc644\uc804\ud788 \uc0ad\uc81c\ud558\ub824\uba74 \ud574\ub2f9 PVC \uc218\ub3d9 \uc0ad\uc81c \ud544\uc694 $ cd hynix $ kubectl delete -f ltdbv2-http.yaml $ kubectl delete -f thunderquery.yaml $ kubectl delete -f ltdbv2.yaml for i in {0..9}; do kubectl delete pvc \"ltdbv2-pvc-ltdbv2-$i\" -n hynix; done $ kubectl delete -f istio-ingress.yaml","title":"6. Install ltdb-http v2 CXL-CMS"},{"location":"get-started-with-scratch/","text":"Note This page guides how to start LightningDB on CentOS manually. In case of using AWS EC2 Instance , please use Installation 1. Optimizing System Parameters \u00b6 (1) Edit /etc/sysctl.conf like following ... vm.swappiness = 0 vm.overcommit_memory = 1 vm.overcommit_ratio = 50 fs.file-max = 6815744 net.ipv4.ip_local_port_range = 32768 65535 net.core.rmem_default = 262144 net.core.wmem_default = 262144 net.core.rmem_max = 16777216 net.core.wmem_max = 16777216 net.ipv4.tcp_max_syn_backlog = 4096 net.core.somaxconn = 65535 ... Tip In case of application in runtime, use sudo sysctl -p (2) Edit /etc/security/limits.conf ... * soft core -1 * soft nofile 262144 * hard nofile 262144 * soft nproc 131072 * hard nproc 131072 [account name] * soft nofile 262144 [account name] * hard nofile 262144 [account name] * soft nproc 131072 [account name] * hard nproc 131072 ... Tip In case of application in runtime, use ulimit -n 65535, ulimit -u 131072 (3) Edit /etc/fstab Remove SWAP Partition (Comment out SWAP partition with using # and reboot) ... [[/dev/mapper/centos-swap]] swap swap defaults 0 0 ... Tip In case of application in runtime, use swapoff -a (4) /etc/init.d/disable-transparent-hugepages root@fbg01 ~] cat /etc/init.d/disable-transparent-hugepages #!/bin/bash ### BEGIN INIT INFO # Provides: disable-transparent-hugepages # Required-Start: $local_fs # Required-Stop: # X-Start-Before: mongod mongodb-mms-automation-agent # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Disable Linux transparent huge pages # Description: Disable Linux transparent huge pages, to improve # database performance. ### END INIT INFO case $1 in start) if [ -d /sys/kernel/mm/transparent_hugepage ]; then thp_path=/sys/kernel/mm/transparent_hugepage elif [ -d /sys/kernel/mm/redhat_transparent_hugepage ]; then thp_path=/sys/kernel/mm/redhat_transparent_hugepage else return 0 fi echo 'never' > ${thp_path}/enabled echo 'never' > ${thp_path}/defrag re='^[0-1]+$' if [[ $(cat ${thp_path}/khugepaged/defrag) =~ $re ]] then # RHEL 7 echo 0 > ${thp_path}/khugepaged/defrag else # RHEL 6 echo 'no' > ${thp_path}/khugepaged/defrag fi unset re unset thp_path ;; esac [root@fbg01 ~] [root@fbg01 ~] [root@fbg01 ~] chmod 755 /etc/init.d/disable-transparent-hugepages [root@fbg01 ~] chkconfig --add disable-transparent-hugepages 2. Setup Prerequisites \u00b6 - bash, unzip, ssh - JDK 1.8 or higher - gcc 4.8.5 or higher - glibc 2.17 or higher - epel-release sudo yum install epel-release - boost, boost-thread, boost-devel sudo yum install boost boost-thread boost-devel - Exchange SSH Key For all servers that LightningDB will be deployed, SSH key should be exchanged. ssh-keygen -t rsa chmod 0600 ~/.ssh/authorized_keys cat .ssh/id_rsa.pub | ssh {server name} \"cat >> .ssh/authorized_keys\" - Intel MKL library (1) Intel MKL 2019 library install Go to the website: https://software.intel.com/en-us/mkl/choose-download/macos Register and login Select product named \"Intel * Math Kernel Library for Linux\" or \"Intel * Math Kernel Library for Mac\" from the select box \"Choose Product to Download\" Choose a Version \"2019 Update 2\" and download Unzip the file and execute the install.sh file with root account or (sudo command) sudo ./install.sh Choose custom install and configure the install directory /opt/intel (with sudo, /opt/intel is the default installation path, just confirm it) matthew@fbg05 /opt/intel $ pwd /opt/intel matthew@fbg05 /opt/intel $ ls -alh \ud569\uacc4 0 drwxr-xr-x 10 root root 307 3\uc6d4 22 01:34 . drwxr-xr-x. 5 root root 83 3\uc6d4 22 01:34 .. drwxr-xr-x 6 root root 72 3\uc6d4 22 01:35 .pset drwxr-xr-x 2 root root 53 3\uc6d4 22 01:34 bin lrwxrwxrwx 1 root root 28 3\uc6d4 22 01:34 compilers_and_libraries -> compilers_and_libraries_2019 drwxr-xr-x 3 root root 19 3\uc6d4 22 01:34 compilers_and_libraries_2019 drwxr-xr-x 4 root root 36 1\uc6d4 24 23:04 compilers_and_libraries_2019.2.187 drwxr-xr-x 6 root root 63 1\uc6d4 24 22:50 conda_channel drwxr-xr-x 4 root root 26 1\uc6d4 24 23:01 documentation_2019 lrwxrwxrwx 1 root root 33 3\uc6d4 22 01:34 lib -> compilers_and_libraries/linux/lib lrwxrwxrwx 1 root root 33 3\uc6d4 22 01:34 mkl -> compilers_and_libraries/linux/mkl lrwxrwxrwx 1 root root 29 3\uc6d4 22 01:34 parallel_studio_xe_2019 -> parallel_studio_xe_2019.2.057 drwxr-xr-x 5 root root 216 3\uc6d4 22 01:34 parallel_studio_xe_2019.2.057 drwxr-xr-x 3 root root 16 3\uc6d4 22 01:34 samples_2019 lrwxrwxrwx 1 root root 33 3\uc6d4 22 01:34 tbb -> compilers_and_libraries/linux/tbb (2) Intel MKL 2019 library environment settings Append the following statement into ~/.bashrc # INTEL MKL enviroment variables for ($MKLROOT, can be checked with the value export | grep MKL) source /opt/intel/mkl/bin/mklvars.sh intel64 - Apache Hadoop 2.6.0 (or higher) - Apache Spark 2.3 on Hadoop 2.6 - ntp For clock synchronization between servers over packet-switched, variable-latency data networks. - Settings for core dump(Optional) (1) INSTALLING ABRT AND STARTING ITS SERVICES (2) Set core dump file size ulimit -c unlimited (3) Change the path of core dump files echo /tmp/core.%p > /proc/sys/kernel/core_pattern 3. Session configuration files \u00b6 '~/.bashrc' Add followings # .bashrc if [ -f /etc/bashrc ]; then . /etc/bashrc fi # User specific environment and startup programs PATH=$PATH:$HOME/.local/bin:$HOME/bin HADOOP_HOME=/home/nvkvs/hadoop HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop SPARK_HOME=/home/nvkvs/spark PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$SPARK_HOME/bin:$SPARK_HOME/sbin:$HOME/sbin export PATH SPARK_HOME HADOOP_HOME HADOOP_CONF_DIR YARN_CONF_DIR alias cfc='source ~/.use_cluster' 4. Install and Start LightningDB \u00b6 With LTCLI provided by LightningDB, users can deploy and use LightningDB. Install LTCLI with the following command. $ pip install ltcli --upgrade --user After installation is completed, start LTCLI with Commands","title":"Manual Installation"},{"location":"get-started-with-scratch/#1-optimizing-system-parameters","text":"(1) Edit /etc/sysctl.conf like following ... vm.swappiness = 0 vm.overcommit_memory = 1 vm.overcommit_ratio = 50 fs.file-max = 6815744 net.ipv4.ip_local_port_range = 32768 65535 net.core.rmem_default = 262144 net.core.wmem_default = 262144 net.core.rmem_max = 16777216 net.core.wmem_max = 16777216 net.ipv4.tcp_max_syn_backlog = 4096 net.core.somaxconn = 65535 ... Tip In case of application in runtime, use sudo sysctl -p (2) Edit /etc/security/limits.conf ... * soft core -1 * soft nofile 262144 * hard nofile 262144 * soft nproc 131072 * hard nproc 131072 [account name] * soft nofile 262144 [account name] * hard nofile 262144 [account name] * soft nproc 131072 [account name] * hard nproc 131072 ... Tip In case of application in runtime, use ulimit -n 65535, ulimit -u 131072 (3) Edit /etc/fstab Remove SWAP Partition (Comment out SWAP partition with using # and reboot) ... [[/dev/mapper/centos-swap]] swap swap defaults 0 0 ... Tip In case of application in runtime, use swapoff -a (4) /etc/init.d/disable-transparent-hugepages root@fbg01 ~] cat /etc/init.d/disable-transparent-hugepages #!/bin/bash ### BEGIN INIT INFO # Provides: disable-transparent-hugepages # Required-Start: $local_fs # Required-Stop: # X-Start-Before: mongod mongodb-mms-automation-agent # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Disable Linux transparent huge pages # Description: Disable Linux transparent huge pages, to improve # database performance. ### END INIT INFO case $1 in start) if [ -d /sys/kernel/mm/transparent_hugepage ]; then thp_path=/sys/kernel/mm/transparent_hugepage elif [ -d /sys/kernel/mm/redhat_transparent_hugepage ]; then thp_path=/sys/kernel/mm/redhat_transparent_hugepage else return 0 fi echo 'never' > ${thp_path}/enabled echo 'never' > ${thp_path}/defrag re='^[0-1]+$' if [[ $(cat ${thp_path}/khugepaged/defrag) =~ $re ]] then # RHEL 7 echo 0 > ${thp_path}/khugepaged/defrag else # RHEL 6 echo 'no' > ${thp_path}/khugepaged/defrag fi unset re unset thp_path ;; esac [root@fbg01 ~] [root@fbg01 ~] [root@fbg01 ~] chmod 755 /etc/init.d/disable-transparent-hugepages [root@fbg01 ~] chkconfig --add disable-transparent-hugepages","title":"1. Optimizing System Parameters"},{"location":"get-started-with-scratch/#2-setup-prerequisites","text":"- bash, unzip, ssh - JDK 1.8 or higher - gcc 4.8.5 or higher - glibc 2.17 or higher - epel-release sudo yum install epel-release - boost, boost-thread, boost-devel sudo yum install boost boost-thread boost-devel - Exchange SSH Key For all servers that LightningDB will be deployed, SSH key should be exchanged. ssh-keygen -t rsa chmod 0600 ~/.ssh/authorized_keys cat .ssh/id_rsa.pub | ssh {server name} \"cat >> .ssh/authorized_keys\" - Intel MKL library (1) Intel MKL 2019 library install Go to the website: https://software.intel.com/en-us/mkl/choose-download/macos Register and login Select product named \"Intel * Math Kernel Library for Linux\" or \"Intel * Math Kernel Library for Mac\" from the select box \"Choose Product to Download\" Choose a Version \"2019 Update 2\" and download Unzip the file and execute the install.sh file with root account or (sudo command) sudo ./install.sh Choose custom install and configure the install directory /opt/intel (with sudo, /opt/intel is the default installation path, just confirm it) matthew@fbg05 /opt/intel $ pwd /opt/intel matthew@fbg05 /opt/intel $ ls -alh \ud569\uacc4 0 drwxr-xr-x 10 root root 307 3\uc6d4 22 01:34 . drwxr-xr-x. 5 root root 83 3\uc6d4 22 01:34 .. drwxr-xr-x 6 root root 72 3\uc6d4 22 01:35 .pset drwxr-xr-x 2 root root 53 3\uc6d4 22 01:34 bin lrwxrwxrwx 1 root root 28 3\uc6d4 22 01:34 compilers_and_libraries -> compilers_and_libraries_2019 drwxr-xr-x 3 root root 19 3\uc6d4 22 01:34 compilers_and_libraries_2019 drwxr-xr-x 4 root root 36 1\uc6d4 24 23:04 compilers_and_libraries_2019.2.187 drwxr-xr-x 6 root root 63 1\uc6d4 24 22:50 conda_channel drwxr-xr-x 4 root root 26 1\uc6d4 24 23:01 documentation_2019 lrwxrwxrwx 1 root root 33 3\uc6d4 22 01:34 lib -> compilers_and_libraries/linux/lib lrwxrwxrwx 1 root root 33 3\uc6d4 22 01:34 mkl -> compilers_and_libraries/linux/mkl lrwxrwxrwx 1 root root 29 3\uc6d4 22 01:34 parallel_studio_xe_2019 -> parallel_studio_xe_2019.2.057 drwxr-xr-x 5 root root 216 3\uc6d4 22 01:34 parallel_studio_xe_2019.2.057 drwxr-xr-x 3 root root 16 3\uc6d4 22 01:34 samples_2019 lrwxrwxrwx 1 root root 33 3\uc6d4 22 01:34 tbb -> compilers_and_libraries/linux/tbb (2) Intel MKL 2019 library environment settings Append the following statement into ~/.bashrc # INTEL MKL enviroment variables for ($MKLROOT, can be checked with the value export | grep MKL) source /opt/intel/mkl/bin/mklvars.sh intel64 - Apache Hadoop 2.6.0 (or higher) - Apache Spark 2.3 on Hadoop 2.6 - ntp For clock synchronization between servers over packet-switched, variable-latency data networks. - Settings for core dump(Optional) (1) INSTALLING ABRT AND STARTING ITS SERVICES (2) Set core dump file size ulimit -c unlimited (3) Change the path of core dump files echo /tmp/core.%p > /proc/sys/kernel/core_pattern","title":"2. Setup Prerequisites"},{"location":"get-started-with-scratch/#3-session-configuration-files","text":"'~/.bashrc' Add followings # .bashrc if [ -f /etc/bashrc ]; then . /etc/bashrc fi # User specific environment and startup programs PATH=$PATH:$HOME/.local/bin:$HOME/bin HADOOP_HOME=/home/nvkvs/hadoop HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop SPARK_HOME=/home/nvkvs/spark PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$SPARK_HOME/bin:$SPARK_HOME/sbin:$HOME/sbin export PATH SPARK_HOME HADOOP_HOME HADOOP_CONF_DIR YARN_CONF_DIR alias cfc='source ~/.use_cluster'","title":"3. Session configuration files"},{"location":"get-started-with-scratch/#4-install-and-start-lightningdb","text":"With LTCLI provided by LightningDB, users can deploy and use LightningDB. Install LTCLI with the following command. $ pip install ltcli --upgrade --user After installation is completed, start LTCLI with Commands","title":"4. Install and Start LightningDB"},{"location":"how-to-scaleout/","text":"Note This document guides how to use 'flashbase' script for scale-out. If you use LTCLI, you can operate Lightning DB more easily and powerfully. Therefore, if possible, we recommend LTCLI rather than 'flashbase' script. 1. Check the distribution of slots \u00b6 You can use 'redis-trib.rb check {master's IP}:{master's Port} | grep slots | grep master' command to check slots assigned to each master. Any master can be used for '{master's IP}:{master's Port}'. $ redis-trib.rb check 192.168.111.201:18800 | grep slots | grep master slots:0-818 (819 slots) master slots:3277-4095 (819 slots) master slots:5734-6553 (820 slots) master slots:7373-8191 (819 slots) master slots:13926-14745 (820 slots) master slots:4096-4914 (819 slots) master slots:8192-9010 (819 slots) master slots:2458-3276 (819 slots) master slots:9011-9829 (819 slots) master slots:10650-11468 (819 slots) master slots:11469-12287 (819 slots) master slots:1638-2457 (820 slots) master slots:12288-13106 (819 slots) master slots:15565-16383 (819 slots) master slots:9830-10649 (820 slots) master slots:819-1637 (819 slots) master slots:6554-7372 (819 slots) master slots:4915-5733 (819 slots) master slots:13107-13925 (819 slots) master slots:14746-15564 (819 slots) master 2. Check the distribution of redis-servers \u00b6 $ flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.201 | 10 | 10 192.168.111.202 | 10 | 10 -------------------------------- Total nodes | 20 | 20 3. Scale out \u00b6 Open 'redis.properties' with 'flashbase edit' command. $ flashbase edit Add a new node(\"192.168.111.203\"). As-is #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"192.168.111.201\" \"192.168.111.202\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18800 18809) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"192.168.111.201\" \"192.168.111.202\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 18850 18859) ) To-be #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"192.168.111.201\" \"192.168.111.202\" \"192.168.111.203\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18800 18809) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"192.168.111.201\" \"192.168.111.202\" \"192.168.111.203\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 18850 18859) ) Scale out the cluster with a 'flashbase scale-out {new node's IP}' command. If you add more than one node, you can use like 'flashbase scale-out 192.168.111.203 192.168.111.204 192.168.111.205'. $ flashbase scale-out 192.168.111.203 4. Check the new distribution of slots \u00b6 $ redis-trib.rb check 192.168.111.201:18800 | grep master | grep slot slots:273-818 (546 slots) master slots:11742-12287 (546 slots) master slots:0-272,10650-10921,14198-14199 (547 slots) master slots:10922,11469-11741,14746-15018 (547 slots) master slots:6827-7372 (546 slots) master slots:1912-2457 (546 slots) master slots:6008-6553 (546 slots) master slots:7646-8191 (546 slots) master slots:1911,5734-6007,13926-14197 (547 slots) master slots:5188-5733 (546 slots) master slots:13380-13925 (546 slots) master slots:1092-1637 (546 slots) master slots:1638-1910,9830-10103 (547 slots) master slots:3550-4095 (546 slots) master slots:7373-7645,8192-8464 (546 slots) master slots:14200-14745 (546 slots) master slots:2458-2730,4096-4368 (546 slots) master slots:4369-4914 (546 slots) master slots:9284-9829 (546 slots) master slots:12561-13106 (546 slots) master slots:6554-6826,15565-15837 (546 slots) master slots:9011-9283,12288-12560 (546 slots) master slots:4915-5187,13107-13379 (546 slots) master slots:15019-15564 (546 slots) master slots:10923-11468 (546 slots) master slots:819-1091,3277-3549 (546 slots) master slots:8465-9010 (546 slots) master slots:2731-3276 (546 slots) master slots:15838-16383 (546 slots) master slots:10104-10649 (546 slots) master 5. Check the new distribution of redis-servers \u00b6 $ fb check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.201 | 10 | 10 192.168.111.202 | 10 | 10 192.168.111.203 | 10 | 10 -------------------------------- Total nodes | 30 | 30","title":"Scaleout with LightningDB"},{"location":"how-to-scaleout/#1-check-the-distribution-of-slots","text":"You can use 'redis-trib.rb check {master's IP}:{master's Port} | grep slots | grep master' command to check slots assigned to each master. Any master can be used for '{master's IP}:{master's Port}'. $ redis-trib.rb check 192.168.111.201:18800 | grep slots | grep master slots:0-818 (819 slots) master slots:3277-4095 (819 slots) master slots:5734-6553 (820 slots) master slots:7373-8191 (819 slots) master slots:13926-14745 (820 slots) master slots:4096-4914 (819 slots) master slots:8192-9010 (819 slots) master slots:2458-3276 (819 slots) master slots:9011-9829 (819 slots) master slots:10650-11468 (819 slots) master slots:11469-12287 (819 slots) master slots:1638-2457 (820 slots) master slots:12288-13106 (819 slots) master slots:15565-16383 (819 slots) master slots:9830-10649 (820 slots) master slots:819-1637 (819 slots) master slots:6554-7372 (819 slots) master slots:4915-5733 (819 slots) master slots:13107-13925 (819 slots) master slots:14746-15564 (819 slots) master","title":"1. Check the distribution of slots"},{"location":"how-to-scaleout/#2-check-the-distribution-of-redis-servers","text":"$ flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.201 | 10 | 10 192.168.111.202 | 10 | 10 -------------------------------- Total nodes | 20 | 20","title":"2. Check the distribution of redis-servers"},{"location":"how-to-scaleout/#3-scale-out","text":"Open 'redis.properties' with 'flashbase edit' command. $ flashbase edit Add a new node(\"192.168.111.203\"). As-is #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"192.168.111.201\" \"192.168.111.202\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18800 18809) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"192.168.111.201\" \"192.168.111.202\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 18850 18859) ) To-be #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"192.168.111.201\" \"192.168.111.202\" \"192.168.111.203\" ) export SR2_REDIS_MASTER_PORTS=( $(seq 18800 18809) ) ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"192.168.111.201\" \"192.168.111.202\" \"192.168.111.203\" ) export SR2_REDIS_SLAVE_PORTS=( $(seq 18850 18859) ) Scale out the cluster with a 'flashbase scale-out {new node's IP}' command. If you add more than one node, you can use like 'flashbase scale-out 192.168.111.203 192.168.111.204 192.168.111.205'. $ flashbase scale-out 192.168.111.203","title":"3. Scale out"},{"location":"how-to-scaleout/#4-check-the-new-distribution-of-slots","text":"$ redis-trib.rb check 192.168.111.201:18800 | grep master | grep slot slots:273-818 (546 slots) master slots:11742-12287 (546 slots) master slots:0-272,10650-10921,14198-14199 (547 slots) master slots:10922,11469-11741,14746-15018 (547 slots) master slots:6827-7372 (546 slots) master slots:1912-2457 (546 slots) master slots:6008-6553 (546 slots) master slots:7646-8191 (546 slots) master slots:1911,5734-6007,13926-14197 (547 slots) master slots:5188-5733 (546 slots) master slots:13380-13925 (546 slots) master slots:1092-1637 (546 slots) master slots:1638-1910,9830-10103 (547 slots) master slots:3550-4095 (546 slots) master slots:7373-7645,8192-8464 (546 slots) master slots:14200-14745 (546 slots) master slots:2458-2730,4096-4368 (546 slots) master slots:4369-4914 (546 slots) master slots:9284-9829 (546 slots) master slots:12561-13106 (546 slots) master slots:6554-6826,15565-15837 (546 slots) master slots:9011-9283,12288-12560 (546 slots) master slots:4915-5187,13107-13379 (546 slots) master slots:15019-15564 (546 slots) master slots:10923-11468 (546 slots) master slots:819-1091,3277-3549 (546 slots) master slots:8465-9010 (546 slots) master slots:2731-3276 (546 slots) master slots:15838-16383 (546 slots) master slots:10104-10649 (546 slots) master","title":"4. Check the new distribution of slots"},{"location":"how-to-scaleout/#5-check-the-new-distribution-of-redis-servers","text":"$ fb check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.201 | 10 | 10 192.168.111.202 | 10 | 10 192.168.111.203 | 10 | 10 -------------------------------- Total nodes | 30 | 30","title":"5. Check the new distribution of redis-servers"},{"location":"how-to-use-flashbase/","text":"Note This document guides how to use 'flashbase' script for installation and operation. If you use LTCLI, you can deploy and operate Lightning DB more easily and powerfully. Therefore, if possible, we recommend LTCLI rather than 'flashbase' script. 1. Deploy \u00b6 You can download the recommended version of Lightning DB in Release Notes Deploy the Lightning DB binary with using deploy-flashbase.sh . Type ./deploy-flashbase.sh {binary path} {cluster list} to deploy. > ./deploy-flashbase.sh ./lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 1 2 // deploy cluster 1 and cluster 2 with lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin DATEMIN: 20200811113038 INSTALLER PATH: ./lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin INSTALLER NAME: lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin ====================================================== DEPLOY CLUSTER 1 CLUSTER_DIR: /Users/myaccount/tsr2/cluster_1 SR2_HOME: /Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT SR2_CONF: /Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT/conf BACKUP_DIR: /Users/myaccount/tsr2/cluster_1_bak_20200811113038 CONF_BACKUP_DIR: /Users/myaccount/tsr2/cluster_1_conf_bak_20200811113038 ====================================================== backup... DEPLOY NODE localhost lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 100% 126MB 256.8MB/s 00:00 \\e[01;32mInstalling tsr2 as full...\\e[00m Skip to create \\e[01:31m/Users/myaccount/tsr2/cluster_1\\e[00m \\e[01;32mUnarchieving to /Users/myaccount/tsr2/cluster_1...\\e[00m \\e[01;32mMaking required directories...\\e[00m \\e[01;32mProcessing a native library linkage...\\e[00m \\e[01;31mNo ldconfig in $PATH. Fix the problem and try again\\e[00m building file list ... done logback-kaetlyn.xml.template logback.xml redis-master.conf.template redis-slave.conf.template redis.conf.sample redis.properties sentinel.conf.template thriftserver.properties tsr2-kaetlyn.properties redis/ redis/redis-18500.conf redis/redis-18501.conf redis/redis-18502.conf redis/redis-18503.conf redis/redis-18504.conf redis/redis-18505.conf redis/redis-18506.conf redis/redis-18507.conf redis/redis-18508.conf redis/redis-18509.conf redis/redis-18600.conf redis/redis-18601.conf redis/redis-18602.conf redis/redis-18603.conf redis/redis-18604.conf redis/redis-18605.conf redis/redis-18606.conf redis/redis-18607.conf redis/redis-18608.conf redis/redis-18609.conf sample-configure/ sample-configure/etc/ sample-configure/etc/sysctl.conf.sample sample-configure/etc/profile.d/ sample-configure/etc/profile.d/jdk.sh.sample sample-configure/hadoop/ sample-configure/hadoop/core-site.xml.sample sample-configure/hadoop/hdfs-site.xml.sample sample-configure/hadoop/mapred-site.xml.sample sample-configure/hadoop/slaves.sample sample-configure/hadoop/yarn-site.xml.sample sample-configure/spark/ sample-configure/spark/log4j.properties.sample sample-configure/spark/metrics.properties.sample sample-configure/spark/scheduler-site.xml.sample sample-configure/spark/spark-defaults.conf.sample sent 995838 bytes received 2532 bytes 1996740.00 bytes/sec total size is 1161578 speedup is 1.16 ====================================================== DEPLOY CLUSTER 2 CLUSTER_DIR: /Users/myaccount/tsr2/cluster_2 SR2_HOME: /Users/myaccount/tsr2/cluster_2/tsr2-assembly-1.0.0-SNAPSHOT SR2_CONF: /Users/myaccount/tsr2/cluster_2/tsr2-assembly-1.0.0-SNAPSHOT/conf BACKUP_DIR: /Users/myaccount/tsr2/cluster_2_bak_20200811113038 CONF_BACKUP_DIR: /Users/myaccount/tsr2/cluster_2_conf_bak_20200811113038 ====================================================== backup... DEPLOY NODE localhost lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 100% 126MB 232.7MB/s 00:00 \\e[01;32mInstalling tsr2 as full...\\e[00m Skip to create \\e[01:31m/Users/myaccount/tsr2/cluster_2\\e[00m \\e[01;32mUnarchieving to /Users/myaccount/tsr2/cluster_2...\\e[00m \\e[01;32mMaking required directories...\\e[00m \\e[01;32mProcessing a native library linkage...\\e[00m \\e[01;31mNo ldconfig in $PATH. Fix the problem and try again\\e[00m building file list ... done logback-kaetlyn.xml.template logback.xml redis-master.conf.template redis-slave.conf.template redis.conf.sample redis.properties sentinel.conf.template thriftserver.properties tsr2-kaetlyn.properties redis/ redis/redis-18200.conf redis/redis-18201.conf redis/redis-18202.conf redis/redis-18203.conf redis/redis-18204.conf redis/redis-18205.conf redis/redis-18206.conf redis/redis-18207.conf redis/redis-18208.conf redis/redis-18209.conf redis/redis-18250.conf redis/redis-18251.conf redis/redis-18252.conf redis/redis-18253.conf redis/redis-18254.conf redis/redis-18255.conf redis/redis-18256.conf redis/redis-18257.conf redis/redis-18258.conf redis/redis-18259.conf sample-configure/ sample-configure/etc/ sample-configure/etc/sysctl.conf.sample sample-configure/etc/profile.d/ sample-configure/etc/profile.d/jdk.sh.sample sample-configure/hadoop/ sample-configure/hadoop/core-site.xml.sample sample-configure/hadoop/hdfs-site.xml.sample sample-configure/hadoop/mapred-site.xml.sample sample-configure/hadoop/slaves.sample sample-configure/hadoop/yarn-site.xml.sample sample-configure/spark/ sample-configure/spark/log4j.properties.sample sample-configure/spark/metrics.properties.sample sample-configure/spark/scheduler-site.xml.sample sample-configure/spark/spark-defaults.conf.sample sent 992400 bytes received 2532 bytes 663288.00 bytes/sec total size is 1165442 speedup is 1.17 2. Create and start a cluster \u00b6 If you've deployed Lightning DB successfully, you can create and start the clusters. Choose the cluster to use To choose the cluster, .use_cluster is used. source ~/.use_cluster.sh 1 // 'source ~/.use_cluster.sh {cluster number} If you add alias in .bashrc.sh like below, you can change the cluster easily. alias cfc=\"source ~/.use_cluster\" and type cfc {cluster number} to use the specified cluster. cfc 1 Configure the cluster for initializing Open and modify redis.properties file of the cluster by typing 'flashbase edit'. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) // need to configure export SR2_REDIS_MASTER_PORTS=( $(seq 18100 18109) ) // need to configure ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) // need to configure in case of replication export SR2_REDIS_SLAVE_PORTS=( $(seq 18150 18159) ) // need to configure in case of replication ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 // need to configure [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"/sata_ssd/ssd_\" // need to configure. With this settings, '/sata_ssd/ssd_01', '/sata_ssd/ssd_02' and '/sata_ssd/ssd_03' are used. export SR2_REDIS_DB_PATH=\"/sata_ssd/ssd_\" // need to configure export SR2_FLASH_DB_PATH=\"/sata_ssd/ssd_\" // need to configure ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Create the cluster Type flashbase restart --reset --cluster --yes . > flashbase restart --reset --cluster --yes \\e[01;32mStopping master cluster of redis...\\e[00m \\e[01;33m - Stopping 127.0.0.1[*]...\\e[00m \\e[01;32mStopping slave cluster of redis...\\e[00m \\e[01;33m - Stopping 127.0.0.1[*]...\\e[00m \\e[01;32mRemoving master node configuration in \\e[00m \\e[01;32m - 127.0.0.1\\e[00m \\e[01;32mRemoving slave node configuration in \\e[00m \\e[01;32m - 127.0.0.1\\e[00m \\e[01;32mRemoving redis generated MASTER configuration files...\\e[00m \\e[01;32m - 127.0.0.1 \\e[00m \\e[01;32mRemoving redis generated SLAVE configuration files...\\e[00m \\e[01;32m - 127.0.0.1 \\e[00m \\e[01;33m Redis flashdb path is \"/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/db/db-#{PORT}-#{DB_NUMBER}\".\\e[00m \\e[01;33mRedis dump.rdb path is \"/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/dump/dump-#{PORT}.*\".\\e[00m \\e[01;33mRedis aof path is \"/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/appendonly-#{PORT}.aof\". \\e[00m \\e[01;32mRemoving flash db directory, appendonly and dump.rdb files in MASTER NODE;\\e[00m \\e[01;32m - 127.0.0.1 \\e[00m \\e[01;32mRemoving flash db directory, appendonly and dump.rdb files in SLAVE NODE;\\e[00m \\e[01;32m - 127.0.0.1 \\e[00m \\e[01;32mGenerate redis configuration files for MASTER hosts\\e[00m \\e[01;32mGenerate redis configuration files for SLAVE hosts\\e[00m \\e[01;32m- Master nodes\\e[00m \\e[01;32m -- Copying to 127.0.0.1...\\e[00m \\e[01;32m- Slave nodes\\e[00m \\e[01;32m -- Copying to 127.0.0.1...\\e[00m \\e[01;32mSuccess to configure redis.\\e[00m netstat: t4: unknown or uninstrumented protocol netstat: t4: unknown or uninstrumented protocol \\e[01;32mBackup redis master log in each MASTER hosts... \\e[00m \\e[01;33m - 127.0.0.1\\e[00m \\e[01;33m - 127.0.0.1\\e[00m \\e[01;32mStarting master nodes : 127.0.0.1 : \\e[00m\\e[01;32m[18100, 18101, 18102, 18103, 18104, 18105, 18106, 18107, 18108, 18109]...\\e[00m \\e[01;32mStarting slave nodes : 127.0.0.1 : \\e[00m\\e[01;32m[18150, 18151, 18152, 18153, 18154, 18155, 18156, 18157, 18158, 18159]...\\e[00m total_master_num: 10 total_slave_num: 10 num_replica: 1 >>> Creating cluster >>> Performing hash slots allocation on 20 nodes... Using 10 masters: 127.0.0.1:18100 127.0.0.1:18101 127.0.0.1:18102 127.0.0.1:18103 127.0.0.1:18104 127.0.0.1:18105 127.0.0.1:18106 127.0.0.1:18107 127.0.0.1:18108 127.0.0.1:18109 Adding replica 127.0.0.1:18150 to 127.0.0.1:18100 Adding replica 127.0.0.1:18151 to 127.0.0.1:18101 Adding replica 127.0.0.1:18152 to 127.0.0.1:18102 Adding replica 127.0.0.1:18153 to 127.0.0.1:18103 Adding replica 127.0.0.1:18154 to 127.0.0.1:18104 Adding replica 127.0.0.1:18155 to 127.0.0.1:18105 Adding replica 127.0.0.1:18156 to 127.0.0.1:18106 Adding replica 127.0.0.1:18157 to 127.0.0.1:18107 Adding replica 127.0.0.1:18158 to 127.0.0.1:18108 Adding replica 127.0.0.1:18159 to 127.0.0.1:18109 M: 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100 slots:0-1637 (1638 slots) master M: c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101 slots:1638-3276 (1639 slots) master M: ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102 slots:3277-4914 (1638 slots) master M: 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103 slots:4915-6553 (1639 slots) master M: 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104 slots:6554-8191 (1638 slots) master M: d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105 slots:8192-9829 (1638 slots) master M: 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106 slots:9830-11468 (1639 slots) master M: d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107 slots:11469-13106 (1638 slots) master M: ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108 slots:13107-14745 (1639 slots) master M: b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109 slots:14746-16383 (1638 slots) master S: 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150 replicates 7e72dff98fdda09cf97e02420727fd8b6564b6ae S: ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151 replicates c3b5e673033758d77680e4534855686649fe5daa S: f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152 replicates ba39bada8a2e393f76d265ea02d3e078c9406a93 S: f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153 replicates 16da3917eff32cde8942660324c7374117902b01 S: 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154 replicates 5ed447baf1f1c6c454459c24809ffc197809cb6b S: 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155 replicates d4cdcfdfdfb966a74a1bafce8969f956b5312094 S: 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156 replicates 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 S: bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157 replicates d531628bf7b2afdc095e445d21dedc2549cc4590 S: 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158 replicates ae71f4430fba6a019e4111c3d26e27e225764200 S: 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159 replicates b3734a60336856f8c4ef08efe763ae3ac32bb94a Can I set the above configuration? (type 'yes' to accept): >>> Nodes configuration updated >>> Assign a different config epoch to each node >>> Sending CLUSTER MEET messages to join the cluster Waiting for the cluster to join.................................................................................. >>> Performing Cluster Check (using node 127.0.0.1:18100) M: 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100 slots:0-1637 (1638 slots) master M: c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101 slots:1638-3276 (1639 slots) master M: ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102 slots:3277-4914 (1638 slots) master M: 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103 slots:4915-6553 (1639 slots) master M: 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104 slots:6554-8191 (1638 slots) master M: d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105 slots:8192-9829 (1638 slots) master M: 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106 slots:9830-11468 (1639 slots) master M: d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107 slots:11469-13106 (1638 slots) master M: ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108 slots:13107-14745 (1639 slots) master M: b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109 slots:14746-16383 (1638 slots) master M: 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150 slots: (0 slots) master replicates 7e72dff98fdda09cf97e02420727fd8b6564b6ae M: ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151 slots: (0 slots) master replicates c3b5e673033758d77680e4534855686649fe5daa M: f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152 slots: (0 slots) master replicates ba39bada8a2e393f76d265ea02d3e078c9406a93 M: f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153 slots: (0 slots) master replicates 16da3917eff32cde8942660324c7374117902b01 M: 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154 slots: (0 slots) master replicates 5ed447baf1f1c6c454459c24809ffc197809cb6b M: 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155 slots: (0 slots) master replicates d4cdcfdfdfb966a74a1bafce8969f956b5312094 M: 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156 slots: (0 slots) master replicates 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 M: bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157 slots: (0 slots) master replicates d531628bf7b2afdc095e445d21dedc2549cc4590 M: 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158 slots: (0 slots) master replicates ae71f4430fba6a019e4111c3d26e27e225764200 M: 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159 slots: (0 slots) master replicates b3734a60336856f8c4ef08efe763ae3ac32bb94a [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered. 3. Operations \u00b6 PING You can simply check the status of the node with PING command. > flashbase cli -h localhost -p 18101 localhost:18101> ping PONG localhost:18101> With using flashbase cli-all , you can check the status of all nodes. > flashbase cli-all ping redis client for 127.0.0.1:18100 PONG redis client for 127.0.0.1:18101 PONG redis client for 127.0.0.1:18102 PONG redis client for 127.0.0.1:18103 PONG redis client for 127.0.0.1:18104 PONG redis client for 127.0.0.1:18105 PONG redis client for 127.0.0.1:18106 PONG redis client for 127.0.0.1:18107 PONG redis client for 127.0.0.1:18108 PONG redis client for 127.0.0.1:18109 PONG redis client for 127.0.0.1:18150 PONG redis client for 127.0.0.1:18151 PONG redis client for 127.0.0.1:18152 PONG redis client for 127.0.0.1:18153 PONG redis client for 127.0.0.1:18154 PONG redis client for 127.0.0.1:18155 PONG redis client for 127.0.0.1:18156 PONG redis client for 127.0.0.1:18157 PONG redis client for 127.0.0.1:18158 PONG redis client for 127.0.0.1:18159 PONG INFO With INFO command, you can get all information of each node. > flashbase cli -h localhost -p 18101 localhost:18101> info all # Server redis_version:3.0.7 redis_git_sha1:29d44e4d redis_git_dirty:0 redis_build_id:e5a4dd48086abff2 redis_mode:cluster os:Darwin 18.7.0 x86_64 arch_bits:64 multiplexing_api:kqueue gcc_version:4.2.1 process_id:42593 run_id:ea34cce757c61d65e344b6c1094b940c3ab46110 tcp_port:18101 uptime_in_seconds:516 uptime_in_days:0 hz:10 lru_clock:3282808 config_file:/Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT/conf/redis/redis-18101.conf # Clients connected_clients:1 client_longest_output_list:0 client_biggest_input_buf:0 blocked_clients:0 # Memory isOOM:false used_memory:20752816 used_memory_human:19.79M used_memory_rss:23941120 used_memory_peak:20752816 used_memory_peak_human:19.79M used_memory_lua:36864 used_memory_rocksdb_total:100663872 used_memory_rocksdb_block_cache:100663296 used_memory_rocksdb_mem_table:576 used_memory_rocksdb_table_readers:0 used_memory_rocksdb_pinned_block:0 meta_data_memory:64 percent_of_meta_data_memory:0 used_memory_client_buffer_peak:0 mem_fragmentation_ratio:1.15 mem_allocator:libc # Persistence loading:0 rdb_changes_since_last_save:0 rdb_bgsave_in_progress:0 rdb_last_save_time:1597117812 rdb_last_bgsave_status:ok rdb_last_bgsave_time_sec:-1 rdb_current_bgsave_time_sec:-1 aof_enabled:1 aof_rewrite_in_progress:0 aof_rewrite_scheduled:0 aof_last_rewrite_time_sec:-1 aof_current_rewrite_time_sec:-1 aof_last_bgrewrite_status:ok aof_last_write_status:ok aof_current_size:0 aof_base_size:0 aof_pending_rewrite:0 aof_buffer_length:0 aof_rewrite_buffer_length:0 aof_pending_bio_fsync:0 aof_delayed_fsync:0 # Stats total_connections_received:5 total_commands_processed:513 instantaneous_ops_per_sec:0 total_net_input_bytes:33954 total_net_output_bytes:173640 instantaneous_input_kbps:0.02 instantaneous_output_kbps:0.00 rejected_connections:0 sync_full:1 sync_partial_ok:0 sync_partial_err:0 expired_keys:0 evicted_keys:0 keyspace_hits:0 keyspace_misses:0 pubsub_channels:0 pubsub_patterns:0 latest_fork_usec:1159 migrate_cached_sockets:0 # Replication role:master connected_slaves:1 slave0:ip=127.0.0.1,port=18151,state=online,offset=589,lag=1 master_repl_offset:589 repl_backlog_active:1 repl_backlog_size:1048576 repl_backlog_first_byte_offset:2 repl_backlog_histlen:588 # CPU used_cpu_sys:0.42 used_cpu_user:0.56 used_cpu_sys_children:0.00 used_cpu_user_children:0.00 # Commandstats cmdstat_ping:calls=4,usec=19,usec_per_call=4.75,usec_std=1.00,usec_max=10 cmdstat_psync:calls=1,usec=17,usec_per_call=17.00,usec_std=0.00,usec_max=17 cmdstat_replconf:calls=416,usec=644,usec_per_call=1.55,usec_std=1.00,usec_max=11 cmdstat_info:calls=2,usec=312,usec_per_call=156.00,usec_std=5.00,usec_max=183 cmdstat_cluster:calls=90,usec=122372,usec_per_call=1359.69,usec_std=19.00,usec_max=1802 # Cluster cluster_enabled:1 # Keyspace # Tablespace # Eviction evictStat:sleeps=0,fullRowgroup=0,80Rowgroup=0,60Rowgroup=0,40Rowgroup=0,20Rowgroup=0,00Rowgroup=0 recentEvictStat:recent 200 rowgroups' avg full percent:0 # Storage(Disk Usage) DB0_TTL(sec):2592000 DB0_size(KByte):200 DB0_numFiles:0 # CompressionRatios CVA_compress_algorithm:zstd CVA_comp_avg_ratio cannot be calculated because of not enough # of samples localhost:18101> You can also check the specified information of each node. localhost:18101> info memory # Memory isOOM:false used_memory:20751904 used_memory_human:19.79M used_memory_rss:23949312 used_memory_peak:20752816 used_memory_peak_human:19.79M used_memory_lua:36864 used_memory_rocksdb_total:100663872 used_memory_rocksdb_block_cache:100663296 used_memory_rocksdb_mem_table:576 used_memory_rocksdb_table_readers:0 used_memory_rocksdb_pinned_block:0 meta_data_memory:64 percent_of_meta_data_memory:0 used_memory_client_buffer_peak:0 mem_fragmentation_ratio:1.15 mem_allocator:libc localhost:18101> localhost:18101> info storage # Storage(Disk Usage) DB0_TTL(sec):2592000 DB0_size(KByte):200 DB0_numFiles:0 localhost:18101> CLUSTER You can check the status of the cluster with CLUSTER command. localhost:18101> cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:20 cluster_size:10 cluster_current_epoch:20 cluster_my_epoch:2 cluster_stats_messages_ping_sent:665 cluster_stats_messages_pong_sent:679 cluster_stats_messages_meet_sent:15 cluster_stats_messages_sent:1359 cluster_stats_messages_ping_received:675 cluster_stats_messages_pong_received:680 cluster_stats_messages_meet_received:4 cluster_stats_messages_received:1359 localhost:18101> localhost:18101> cluster nodes d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107 master - 0 1597118527011 8 connected 11469-13106 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103 master - 0 1597118524000 4 connected 4915-6553 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100 master - 0 1597118521882 1 connected 0-1637 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158 slave ae71f4430fba6a019e4111c3d26e27e225764200 0 1597118520862 19 connected d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105 master - 0 1597118526000 6 connected 8192-9829 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159 slave b3734a60336856f8c4ef08efe763ae3ac32bb94a 0 1597118520000 20 connected 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104 master - 0 1597118523932 5 connected 6554-8191 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154 slave 5ed447baf1f1c6c454459c24809ffc197809cb6b 0 1597118521000 15 connected b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109 master - 0 1597118528026 10 connected 14746-16383 f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152 slave ba39bada8a2e393f76d265ea02d3e078c9406a93 0 1597118524959 13 connected 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150 slave 7e72dff98fdda09cf97e02420727fd8b6564b6ae 0 1597118524000 11 connected c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101 myself,master - 0 1597118523000 2 connected 1638-3276 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106 master - 0 1597118522000 7 connected 9830-11468 ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102 master - 0 1597118520000 3 connected 3277-4914 f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153 slave 16da3917eff32cde8942660324c7374117902b01 0 1597118524000 14 connected ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108 master - 0 1597118525985 9 connected 13107-14745 ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151 slave c3b5e673033758d77680e4534855686649fe5daa 0 1597118523000 12 connected 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155 slave d4cdcfdfdfb966a74a1bafce8969f956b5312094 0 1597118520000 16 connected bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157 slave d531628bf7b2afdc095e445d21dedc2549cc4590 0 1597118513713 18 connected 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156 slave 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 0 1597118523000 17 connected localhost:18101> localhost:18101> cluster slots 1) 1) (integer) 11469 2) (integer) 13106 3) 1) \"127.0.0.1\" 2) (integer) 18107 4) 1) \"127.0.0.1\" 2) (integer) 18157 2) 1) (integer) 4915 2) (integer) 6553 3) 1) \"127.0.0.1\" 2) (integer) 18103 4) 1) \"127.0.0.1\" 2) (integer) 18153 3) 1) (integer) 0 2) (integer) 1637 3) 1) \"127.0.0.1\" 2) (integer) 18100 4) 1) \"127.0.0.1\" 2) (integer) 18150 4) 1) (integer) 8192 2) (integer) 9829 3) 1) \"127.0.0.1\" 2) (integer) 18105 4) 1) \"127.0.0.1\" 2) (integer) 18155 5) 1) (integer) 6554 2) (integer) 8191 3) 1) \"127.0.0.1\" 2) (integer) 18104 4) 1) \"127.0.0.1\" 2) (integer) 18154 6) 1) (integer) 14746 2) (integer) 16383 3) 1) \"127.0.0.1\" 2) (integer) 18109 4) 1) \"127.0.0.1\" 2) (integer) 18159 7) 1) (integer) 1638 2) (integer) 3276 3) 1) \"127.0.0.1\" 2) (integer) 18101 4) 1) \"127.0.0.1\" 2) (integer) 18151 8) 1) (integer) 9830 2) (integer) 11468 3) 1) \"127.0.0.1\" 2) (integer) 18106 4) 1) \"127.0.0.1\" 2) (integer) 18156 9) 1) (integer) 3277 2) (integer) 4914 3) 1) \"127.0.0.1\" 2) (integer) 18102 4) 1) \"127.0.0.1\" 2) (integer) 18152 10) 1) (integer) 13107 2) (integer) 14745 3) 1) \"127.0.0.1\" 2) (integer) 18108 4) 1) \"127.0.0.1\" 2) (integer) 18158 localhost:18101> CONFIG With CONFIG command, you can set or get the configuration of each feature. 1) Get localhost:18101> config get maxmemory 1) \"maxmemory\" 2) \"300mb\" localhost:18101> config set maxmemory 310mb OK 2) Set localhost:18101> config set maxmemory 310mb OK localhost:18101> config get maxmemory 1) \"maxmemory\" 2) \"310mb\" 3) Rewrite With config set command, you can change the configuration only in memory. To save the modification on disk, use config rewrite after setting. localhost:18101> config rewrite OK localhost:18101> 4) DIR With DIR command, you can check the path of directory that each node uses to save .rdb, .aof, db and *.conf files. localhost:18101> config get dir 1) \"dir\" 2) \"/sata_ssd/ssd_03/nvkvs/myaccount\"","title":"Install with LightningDB"},{"location":"how-to-use-flashbase/#1-deploy","text":"You can download the recommended version of Lightning DB in Release Notes Deploy the Lightning DB binary with using deploy-flashbase.sh . Type ./deploy-flashbase.sh {binary path} {cluster list} to deploy. > ./deploy-flashbase.sh ./lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 1 2 // deploy cluster 1 and cluster 2 with lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin DATEMIN: 20200811113038 INSTALLER PATH: ./lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin INSTALLER NAME: lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin ====================================================== DEPLOY CLUSTER 1 CLUSTER_DIR: /Users/myaccount/tsr2/cluster_1 SR2_HOME: /Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT SR2_CONF: /Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT/conf BACKUP_DIR: /Users/myaccount/tsr2/cluster_1_bak_20200811113038 CONF_BACKUP_DIR: /Users/myaccount/tsr2/cluster_1_conf_bak_20200811113038 ====================================================== backup... DEPLOY NODE localhost lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 100% 126MB 256.8MB/s 00:00 \\e[01;32mInstalling tsr2 as full...\\e[00m Skip to create \\e[01:31m/Users/myaccount/tsr2/cluster_1\\e[00m \\e[01;32mUnarchieving to /Users/myaccount/tsr2/cluster_1...\\e[00m \\e[01;32mMaking required directories...\\e[00m \\e[01;32mProcessing a native library linkage...\\e[00m \\e[01;31mNo ldconfig in $PATH. Fix the problem and try again\\e[00m building file list ... done logback-kaetlyn.xml.template logback.xml redis-master.conf.template redis-slave.conf.template redis.conf.sample redis.properties sentinel.conf.template thriftserver.properties tsr2-kaetlyn.properties redis/ redis/redis-18500.conf redis/redis-18501.conf redis/redis-18502.conf redis/redis-18503.conf redis/redis-18504.conf redis/redis-18505.conf redis/redis-18506.conf redis/redis-18507.conf redis/redis-18508.conf redis/redis-18509.conf redis/redis-18600.conf redis/redis-18601.conf redis/redis-18602.conf redis/redis-18603.conf redis/redis-18604.conf redis/redis-18605.conf redis/redis-18606.conf redis/redis-18607.conf redis/redis-18608.conf redis/redis-18609.conf sample-configure/ sample-configure/etc/ sample-configure/etc/sysctl.conf.sample sample-configure/etc/profile.d/ sample-configure/etc/profile.d/jdk.sh.sample sample-configure/hadoop/ sample-configure/hadoop/core-site.xml.sample sample-configure/hadoop/hdfs-site.xml.sample sample-configure/hadoop/mapred-site.xml.sample sample-configure/hadoop/slaves.sample sample-configure/hadoop/yarn-site.xml.sample sample-configure/spark/ sample-configure/spark/log4j.properties.sample sample-configure/spark/metrics.properties.sample sample-configure/spark/scheduler-site.xml.sample sample-configure/spark/spark-defaults.conf.sample sent 995838 bytes received 2532 bytes 1996740.00 bytes/sec total size is 1161578 speedup is 1.16 ====================================================== DEPLOY CLUSTER 2 CLUSTER_DIR: /Users/myaccount/tsr2/cluster_2 SR2_HOME: /Users/myaccount/tsr2/cluster_2/tsr2-assembly-1.0.0-SNAPSHOT SR2_CONF: /Users/myaccount/tsr2/cluster_2/tsr2-assembly-1.0.0-SNAPSHOT/conf BACKUP_DIR: /Users/myaccount/tsr2/cluster_2_bak_20200811113038 CONF_BACKUP_DIR: /Users/myaccount/tsr2/cluster_2_conf_bak_20200811113038 ====================================================== backup... DEPLOY NODE localhost lightningdb.release.release.flashbase_v1.2.3.95bfc6.bin 100% 126MB 232.7MB/s 00:00 \\e[01;32mInstalling tsr2 as full...\\e[00m Skip to create \\e[01:31m/Users/myaccount/tsr2/cluster_2\\e[00m \\e[01;32mUnarchieving to /Users/myaccount/tsr2/cluster_2...\\e[00m \\e[01;32mMaking required directories...\\e[00m \\e[01;32mProcessing a native library linkage...\\e[00m \\e[01;31mNo ldconfig in $PATH. Fix the problem and try again\\e[00m building file list ... done logback-kaetlyn.xml.template logback.xml redis-master.conf.template redis-slave.conf.template redis.conf.sample redis.properties sentinel.conf.template thriftserver.properties tsr2-kaetlyn.properties redis/ redis/redis-18200.conf redis/redis-18201.conf redis/redis-18202.conf redis/redis-18203.conf redis/redis-18204.conf redis/redis-18205.conf redis/redis-18206.conf redis/redis-18207.conf redis/redis-18208.conf redis/redis-18209.conf redis/redis-18250.conf redis/redis-18251.conf redis/redis-18252.conf redis/redis-18253.conf redis/redis-18254.conf redis/redis-18255.conf redis/redis-18256.conf redis/redis-18257.conf redis/redis-18258.conf redis/redis-18259.conf sample-configure/ sample-configure/etc/ sample-configure/etc/sysctl.conf.sample sample-configure/etc/profile.d/ sample-configure/etc/profile.d/jdk.sh.sample sample-configure/hadoop/ sample-configure/hadoop/core-site.xml.sample sample-configure/hadoop/hdfs-site.xml.sample sample-configure/hadoop/mapred-site.xml.sample sample-configure/hadoop/slaves.sample sample-configure/hadoop/yarn-site.xml.sample sample-configure/spark/ sample-configure/spark/log4j.properties.sample sample-configure/spark/metrics.properties.sample sample-configure/spark/scheduler-site.xml.sample sample-configure/spark/spark-defaults.conf.sample sent 992400 bytes received 2532 bytes 663288.00 bytes/sec total size is 1165442 speedup is 1.17","title":"1. Deploy"},{"location":"how-to-use-flashbase/#2-create-and-start-a-cluster","text":"If you've deployed Lightning DB successfully, you can create and start the clusters. Choose the cluster to use To choose the cluster, .use_cluster is used. source ~/.use_cluster.sh 1 // 'source ~/.use_cluster.sh {cluster number} If you add alias in .bashrc.sh like below, you can change the cluster easily. alias cfc=\"source ~/.use_cluster\" and type cfc {cluster number} to use the specified cluster. cfc 1 Configure the cluster for initializing Open and modify redis.properties file of the cluster by typing 'flashbase edit'. #!/bin/bash ## Master hosts and ports export SR2_REDIS_MASTER_HOSTS=( \"127.0.0.1\" ) // need to configure export SR2_REDIS_MASTER_PORTS=( $(seq 18100 18109) ) // need to configure ## Slave hosts and ports (optional) export SR2_REDIS_SLAVE_HOSTS=( \"127.0.0.1\" ) // need to configure in case of replication export SR2_REDIS_SLAVE_PORTS=( $(seq 18150 18159) ) // need to configure in case of replication ## only single data directory in redis db and flash db ## Must exist below variables; 'SR2_REDIS_DATA', 'SR2_REDIS_DB_PATH' and 'SR2_FLASH_DB_PATH' [[export]] SR2_REDIS_DATA=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_REDIS_DB_PATH=\"/nvdrive0/nvkvs/redis\" [[export]] SR2_FLASH_DB_PATH=\"/nvdrive0/nvkvs/flash\" ## multiple data directory in redis db and flash db export SSD_COUNT=3 // need to configure [[export]] HDD_COUNT=3 export SR2_REDIS_DATA=\"/sata_ssd/ssd_\" // need to configure. With this settings, '/sata_ssd/ssd_01', '/sata_ssd/ssd_02' and '/sata_ssd/ssd_03' are used. export SR2_REDIS_DB_PATH=\"/sata_ssd/ssd_\" // need to configure export SR2_FLASH_DB_PATH=\"/sata_ssd/ssd_\" // need to configure ####################################################### # Example : only SSD data directory [[export]] SSD_COUNT=3 [[export]] SR2_REDIS_DATA=\"/ssd_\" [[export]] SR2_REDIS_DB_PATH=\"/ssd_\" [[export]] SR2_FLASH_DB_PATH=\"/ssd_\" ####################################################### Create the cluster Type flashbase restart --reset --cluster --yes . > flashbase restart --reset --cluster --yes \\e[01;32mStopping master cluster of redis...\\e[00m \\e[01;33m - Stopping 127.0.0.1[*]...\\e[00m \\e[01;32mStopping slave cluster of redis...\\e[00m \\e[01;33m - Stopping 127.0.0.1[*]...\\e[00m \\e[01;32mRemoving master node configuration in \\e[00m \\e[01;32m - 127.0.0.1\\e[00m \\e[01;32mRemoving slave node configuration in \\e[00m \\e[01;32m - 127.0.0.1\\e[00m \\e[01;32mRemoving redis generated MASTER configuration files...\\e[00m \\e[01;32m - 127.0.0.1 \\e[00m \\e[01;32mRemoving redis generated SLAVE configuration files...\\e[00m \\e[01;32m - 127.0.0.1 \\e[00m \\e[01;33m Redis flashdb path is \"/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/db/db-#{PORT}-#{DB_NUMBER}\".\\e[00m \\e[01;33mRedis dump.rdb path is \"/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/dump/dump-#{PORT}.*\".\\e[00m \\e[01;33mRedis aof path is \"/sata_ssd/ssd_#{SSD_NUMBER}/nvkvs/myaccount/appendonly-#{PORT}.aof\". \\e[00m \\e[01;32mRemoving flash db directory, appendonly and dump.rdb files in MASTER NODE;\\e[00m \\e[01;32m - 127.0.0.1 \\e[00m \\e[01;32mRemoving flash db directory, appendonly and dump.rdb files in SLAVE NODE;\\e[00m \\e[01;32m - 127.0.0.1 \\e[00m \\e[01;32mGenerate redis configuration files for MASTER hosts\\e[00m \\e[01;32mGenerate redis configuration files for SLAVE hosts\\e[00m \\e[01;32m- Master nodes\\e[00m \\e[01;32m -- Copying to 127.0.0.1...\\e[00m \\e[01;32m- Slave nodes\\e[00m \\e[01;32m -- Copying to 127.0.0.1...\\e[00m \\e[01;32mSuccess to configure redis.\\e[00m netstat: t4: unknown or uninstrumented protocol netstat: t4: unknown or uninstrumented protocol \\e[01;32mBackup redis master log in each MASTER hosts... \\e[00m \\e[01;33m - 127.0.0.1\\e[00m \\e[01;33m - 127.0.0.1\\e[00m \\e[01;32mStarting master nodes : 127.0.0.1 : \\e[00m\\e[01;32m[18100, 18101, 18102, 18103, 18104, 18105, 18106, 18107, 18108, 18109]...\\e[00m \\e[01;32mStarting slave nodes : 127.0.0.1 : \\e[00m\\e[01;32m[18150, 18151, 18152, 18153, 18154, 18155, 18156, 18157, 18158, 18159]...\\e[00m total_master_num: 10 total_slave_num: 10 num_replica: 1 >>> Creating cluster >>> Performing hash slots allocation on 20 nodes... Using 10 masters: 127.0.0.1:18100 127.0.0.1:18101 127.0.0.1:18102 127.0.0.1:18103 127.0.0.1:18104 127.0.0.1:18105 127.0.0.1:18106 127.0.0.1:18107 127.0.0.1:18108 127.0.0.1:18109 Adding replica 127.0.0.1:18150 to 127.0.0.1:18100 Adding replica 127.0.0.1:18151 to 127.0.0.1:18101 Adding replica 127.0.0.1:18152 to 127.0.0.1:18102 Adding replica 127.0.0.1:18153 to 127.0.0.1:18103 Adding replica 127.0.0.1:18154 to 127.0.0.1:18104 Adding replica 127.0.0.1:18155 to 127.0.0.1:18105 Adding replica 127.0.0.1:18156 to 127.0.0.1:18106 Adding replica 127.0.0.1:18157 to 127.0.0.1:18107 Adding replica 127.0.0.1:18158 to 127.0.0.1:18108 Adding replica 127.0.0.1:18159 to 127.0.0.1:18109 M: 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100 slots:0-1637 (1638 slots) master M: c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101 slots:1638-3276 (1639 slots) master M: ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102 slots:3277-4914 (1638 slots) master M: 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103 slots:4915-6553 (1639 slots) master M: 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104 slots:6554-8191 (1638 slots) master M: d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105 slots:8192-9829 (1638 slots) master M: 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106 slots:9830-11468 (1639 slots) master M: d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107 slots:11469-13106 (1638 slots) master M: ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108 slots:13107-14745 (1639 slots) master M: b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109 slots:14746-16383 (1638 slots) master S: 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150 replicates 7e72dff98fdda09cf97e02420727fd8b6564b6ae S: ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151 replicates c3b5e673033758d77680e4534855686649fe5daa S: f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152 replicates ba39bada8a2e393f76d265ea02d3e078c9406a93 S: f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153 replicates 16da3917eff32cde8942660324c7374117902b01 S: 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154 replicates 5ed447baf1f1c6c454459c24809ffc197809cb6b S: 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155 replicates d4cdcfdfdfb966a74a1bafce8969f956b5312094 S: 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156 replicates 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 S: bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157 replicates d531628bf7b2afdc095e445d21dedc2549cc4590 S: 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158 replicates ae71f4430fba6a019e4111c3d26e27e225764200 S: 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159 replicates b3734a60336856f8c4ef08efe763ae3ac32bb94a Can I set the above configuration? (type 'yes' to accept): >>> Nodes configuration updated >>> Assign a different config epoch to each node >>> Sending CLUSTER MEET messages to join the cluster Waiting for the cluster to join.................................................................................. >>> Performing Cluster Check (using node 127.0.0.1:18100) M: 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100 slots:0-1637 (1638 slots) master M: c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101 slots:1638-3276 (1639 slots) master M: ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102 slots:3277-4914 (1638 slots) master M: 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103 slots:4915-6553 (1639 slots) master M: 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104 slots:6554-8191 (1638 slots) master M: d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105 slots:8192-9829 (1638 slots) master M: 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106 slots:9830-11468 (1639 slots) master M: d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107 slots:11469-13106 (1638 slots) master M: ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108 slots:13107-14745 (1639 slots) master M: b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109 slots:14746-16383 (1638 slots) master M: 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150 slots: (0 slots) master replicates 7e72dff98fdda09cf97e02420727fd8b6564b6ae M: ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151 slots: (0 slots) master replicates c3b5e673033758d77680e4534855686649fe5daa M: f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152 slots: (0 slots) master replicates ba39bada8a2e393f76d265ea02d3e078c9406a93 M: f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153 slots: (0 slots) master replicates 16da3917eff32cde8942660324c7374117902b01 M: 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154 slots: (0 slots) master replicates 5ed447baf1f1c6c454459c24809ffc197809cb6b M: 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155 slots: (0 slots) master replicates d4cdcfdfdfb966a74a1bafce8969f956b5312094 M: 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156 slots: (0 slots) master replicates 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 M: bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157 slots: (0 slots) master replicates d531628bf7b2afdc095e445d21dedc2549cc4590 M: 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158 slots: (0 slots) master replicates ae71f4430fba6a019e4111c3d26e27e225764200 M: 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159 slots: (0 slots) master replicates b3734a60336856f8c4ef08efe763ae3ac32bb94a [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered.","title":"2. Create and start a cluster"},{"location":"how-to-use-flashbase/#3-operations","text":"PING You can simply check the status of the node with PING command. > flashbase cli -h localhost -p 18101 localhost:18101> ping PONG localhost:18101> With using flashbase cli-all , you can check the status of all nodes. > flashbase cli-all ping redis client for 127.0.0.1:18100 PONG redis client for 127.0.0.1:18101 PONG redis client for 127.0.0.1:18102 PONG redis client for 127.0.0.1:18103 PONG redis client for 127.0.0.1:18104 PONG redis client for 127.0.0.1:18105 PONG redis client for 127.0.0.1:18106 PONG redis client for 127.0.0.1:18107 PONG redis client for 127.0.0.1:18108 PONG redis client for 127.0.0.1:18109 PONG redis client for 127.0.0.1:18150 PONG redis client for 127.0.0.1:18151 PONG redis client for 127.0.0.1:18152 PONG redis client for 127.0.0.1:18153 PONG redis client for 127.0.0.1:18154 PONG redis client for 127.0.0.1:18155 PONG redis client for 127.0.0.1:18156 PONG redis client for 127.0.0.1:18157 PONG redis client for 127.0.0.1:18158 PONG redis client for 127.0.0.1:18159 PONG INFO With INFO command, you can get all information of each node. > flashbase cli -h localhost -p 18101 localhost:18101> info all # Server redis_version:3.0.7 redis_git_sha1:29d44e4d redis_git_dirty:0 redis_build_id:e5a4dd48086abff2 redis_mode:cluster os:Darwin 18.7.0 x86_64 arch_bits:64 multiplexing_api:kqueue gcc_version:4.2.1 process_id:42593 run_id:ea34cce757c61d65e344b6c1094b940c3ab46110 tcp_port:18101 uptime_in_seconds:516 uptime_in_days:0 hz:10 lru_clock:3282808 config_file:/Users/myaccount/tsr2/cluster_1/tsr2-assembly-1.0.0-SNAPSHOT/conf/redis/redis-18101.conf # Clients connected_clients:1 client_longest_output_list:0 client_biggest_input_buf:0 blocked_clients:0 # Memory isOOM:false used_memory:20752816 used_memory_human:19.79M used_memory_rss:23941120 used_memory_peak:20752816 used_memory_peak_human:19.79M used_memory_lua:36864 used_memory_rocksdb_total:100663872 used_memory_rocksdb_block_cache:100663296 used_memory_rocksdb_mem_table:576 used_memory_rocksdb_table_readers:0 used_memory_rocksdb_pinned_block:0 meta_data_memory:64 percent_of_meta_data_memory:0 used_memory_client_buffer_peak:0 mem_fragmentation_ratio:1.15 mem_allocator:libc # Persistence loading:0 rdb_changes_since_last_save:0 rdb_bgsave_in_progress:0 rdb_last_save_time:1597117812 rdb_last_bgsave_status:ok rdb_last_bgsave_time_sec:-1 rdb_current_bgsave_time_sec:-1 aof_enabled:1 aof_rewrite_in_progress:0 aof_rewrite_scheduled:0 aof_last_rewrite_time_sec:-1 aof_current_rewrite_time_sec:-1 aof_last_bgrewrite_status:ok aof_last_write_status:ok aof_current_size:0 aof_base_size:0 aof_pending_rewrite:0 aof_buffer_length:0 aof_rewrite_buffer_length:0 aof_pending_bio_fsync:0 aof_delayed_fsync:0 # Stats total_connections_received:5 total_commands_processed:513 instantaneous_ops_per_sec:0 total_net_input_bytes:33954 total_net_output_bytes:173640 instantaneous_input_kbps:0.02 instantaneous_output_kbps:0.00 rejected_connections:0 sync_full:1 sync_partial_ok:0 sync_partial_err:0 expired_keys:0 evicted_keys:0 keyspace_hits:0 keyspace_misses:0 pubsub_channels:0 pubsub_patterns:0 latest_fork_usec:1159 migrate_cached_sockets:0 # Replication role:master connected_slaves:1 slave0:ip=127.0.0.1,port=18151,state=online,offset=589,lag=1 master_repl_offset:589 repl_backlog_active:1 repl_backlog_size:1048576 repl_backlog_first_byte_offset:2 repl_backlog_histlen:588 # CPU used_cpu_sys:0.42 used_cpu_user:0.56 used_cpu_sys_children:0.00 used_cpu_user_children:0.00 # Commandstats cmdstat_ping:calls=4,usec=19,usec_per_call=4.75,usec_std=1.00,usec_max=10 cmdstat_psync:calls=1,usec=17,usec_per_call=17.00,usec_std=0.00,usec_max=17 cmdstat_replconf:calls=416,usec=644,usec_per_call=1.55,usec_std=1.00,usec_max=11 cmdstat_info:calls=2,usec=312,usec_per_call=156.00,usec_std=5.00,usec_max=183 cmdstat_cluster:calls=90,usec=122372,usec_per_call=1359.69,usec_std=19.00,usec_max=1802 # Cluster cluster_enabled:1 # Keyspace # Tablespace # Eviction evictStat:sleeps=0,fullRowgroup=0,80Rowgroup=0,60Rowgroup=0,40Rowgroup=0,20Rowgroup=0,00Rowgroup=0 recentEvictStat:recent 200 rowgroups' avg full percent:0 # Storage(Disk Usage) DB0_TTL(sec):2592000 DB0_size(KByte):200 DB0_numFiles:0 # CompressionRatios CVA_compress_algorithm:zstd CVA_comp_avg_ratio cannot be calculated because of not enough # of samples localhost:18101> You can also check the specified information of each node. localhost:18101> info memory # Memory isOOM:false used_memory:20751904 used_memory_human:19.79M used_memory_rss:23949312 used_memory_peak:20752816 used_memory_peak_human:19.79M used_memory_lua:36864 used_memory_rocksdb_total:100663872 used_memory_rocksdb_block_cache:100663296 used_memory_rocksdb_mem_table:576 used_memory_rocksdb_table_readers:0 used_memory_rocksdb_pinned_block:0 meta_data_memory:64 percent_of_meta_data_memory:0 used_memory_client_buffer_peak:0 mem_fragmentation_ratio:1.15 mem_allocator:libc localhost:18101> localhost:18101> info storage # Storage(Disk Usage) DB0_TTL(sec):2592000 DB0_size(KByte):200 DB0_numFiles:0 localhost:18101> CLUSTER You can check the status of the cluster with CLUSTER command. localhost:18101> cluster info cluster_state:ok cluster_slots_assigned:16384 cluster_slots_ok:16384 cluster_slots_pfail:0 cluster_slots_fail:0 cluster_known_nodes:20 cluster_size:10 cluster_current_epoch:20 cluster_my_epoch:2 cluster_stats_messages_ping_sent:665 cluster_stats_messages_pong_sent:679 cluster_stats_messages_meet_sent:15 cluster_stats_messages_sent:1359 cluster_stats_messages_ping_received:675 cluster_stats_messages_pong_received:680 cluster_stats_messages_meet_received:4 cluster_stats_messages_received:1359 localhost:18101> localhost:18101> cluster nodes d531628bf7b2afdc095e445d21dedc2549cc4590 127.0.0.1:18107 master - 0 1597118527011 8 connected 11469-13106 16da3917eff32cde8942660324c7374117902b01 127.0.0.1:18103 master - 0 1597118524000 4 connected 4915-6553 7e72dff98fdda09cf97e02420727fd8b6564b6ae 127.0.0.1:18100 master - 0 1597118521882 1 connected 0-1637 6b3a7f40f36cbe7aaad8ffffa58aefbf591d4967 127.0.0.1:18158 slave ae71f4430fba6a019e4111c3d26e27e225764200 0 1597118520862 19 connected d4cdcfdfdfb966a74a1bafce8969f956b5312094 127.0.0.1:18105 master - 0 1597118526000 6 connected 8192-9829 11f3c47b736e37b274bbdef95a580a0c89bc9d9b 127.0.0.1:18159 slave b3734a60336856f8c4ef08efe763ae3ac32bb94a 0 1597118520000 20 connected 5ed447baf1f1c6c454459c24809ffc197809cb6b 127.0.0.1:18104 master - 0 1597118523932 5 connected 6554-8191 8d0061ff0bc8fcc0e8a9fa5db8d6ab0b7b7ba9d0 127.0.0.1:18154 slave 5ed447baf1f1c6c454459c24809ffc197809cb6b 0 1597118521000 15 connected b3734a60336856f8c4ef08efe763ae3ac32bb94a 127.0.0.1:18109 master - 0 1597118528026 10 connected 14746-16383 f6670f4b8570758d509b5a0341a5151abea599ea 127.0.0.1:18152 slave ba39bada8a2e393f76d265ea02d3e078c9406a93 0 1597118524959 13 connected 128a527bba2823e547e8138a77aebcfec7e55342 127.0.0.1:18150 slave 7e72dff98fdda09cf97e02420727fd8b6564b6ae 0 1597118524000 11 connected c3b5e673033758d77680e4534855686649fe5daa 127.0.0.1:18101 myself,master - 0 1597118523000 2 connected 1638-3276 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 127.0.0.1:18106 master - 0 1597118522000 7 connected 9830-11468 ba39bada8a2e393f76d265ea02d3e078c9406a93 127.0.0.1:18102 master - 0 1597118520000 3 connected 3277-4914 f004736cb50724f089289af34bd8da2e98b07a0b 127.0.0.1:18153 slave 16da3917eff32cde8942660324c7374117902b01 0 1597118524000 14 connected ae71f4430fba6a019e4111c3d26e27e225764200 127.0.0.1:18108 master - 0 1597118525985 9 connected 13107-14745 ab72ae8dafc8a3f3229157cf5965bbfa1db6c726 127.0.0.1:18151 slave c3b5e673033758d77680e4534855686649fe5daa 0 1597118523000 12 connected 208496ceb24eba1e26611071e185007b1ad552c5 127.0.0.1:18155 slave d4cdcfdfdfb966a74a1bafce8969f956b5312094 0 1597118520000 16 connected bbcba7c269fb8162e0f7ef5807e079ba06fc032b 127.0.0.1:18157 slave d531628bf7b2afdc095e445d21dedc2549cc4590 0 1597118513713 18 connected 3d3af1bf3dec40fe0d5dbe1314638733dadb686e 127.0.0.1:18156 slave 6f89f0b44f0a515865173984b95fc3f6fe4e7d72 0 1597118523000 17 connected localhost:18101> localhost:18101> cluster slots 1) 1) (integer) 11469 2) (integer) 13106 3) 1) \"127.0.0.1\" 2) (integer) 18107 4) 1) \"127.0.0.1\" 2) (integer) 18157 2) 1) (integer) 4915 2) (integer) 6553 3) 1) \"127.0.0.1\" 2) (integer) 18103 4) 1) \"127.0.0.1\" 2) (integer) 18153 3) 1) (integer) 0 2) (integer) 1637 3) 1) \"127.0.0.1\" 2) (integer) 18100 4) 1) \"127.0.0.1\" 2) (integer) 18150 4) 1) (integer) 8192 2) (integer) 9829 3) 1) \"127.0.0.1\" 2) (integer) 18105 4) 1) \"127.0.0.1\" 2) (integer) 18155 5) 1) (integer) 6554 2) (integer) 8191 3) 1) \"127.0.0.1\" 2) (integer) 18104 4) 1) \"127.0.0.1\" 2) (integer) 18154 6) 1) (integer) 14746 2) (integer) 16383 3) 1) \"127.0.0.1\" 2) (integer) 18109 4) 1) \"127.0.0.1\" 2) (integer) 18159 7) 1) (integer) 1638 2) (integer) 3276 3) 1) \"127.0.0.1\" 2) (integer) 18101 4) 1) \"127.0.0.1\" 2) (integer) 18151 8) 1) (integer) 9830 2) (integer) 11468 3) 1) \"127.0.0.1\" 2) (integer) 18106 4) 1) \"127.0.0.1\" 2) (integer) 18156 9) 1) (integer) 3277 2) (integer) 4914 3) 1) \"127.0.0.1\" 2) (integer) 18102 4) 1) \"127.0.0.1\" 2) (integer) 18152 10) 1) (integer) 13107 2) (integer) 14745 3) 1) \"127.0.0.1\" 2) (integer) 18108 4) 1) \"127.0.0.1\" 2) (integer) 18158 localhost:18101> CONFIG With CONFIG command, you can set or get the configuration of each feature. 1) Get localhost:18101> config get maxmemory 1) \"maxmemory\" 2) \"300mb\" localhost:18101> config set maxmemory 310mb OK 2) Set localhost:18101> config set maxmemory 310mb OK localhost:18101> config get maxmemory 1) \"maxmemory\" 2) \"310mb\" 3) Rewrite With config set command, you can change the configuration only in memory. To save the modification on disk, use config rewrite after setting. localhost:18101> config rewrite OK localhost:18101> 4) DIR With DIR command, you can check the path of directory that each node uses to save .rdb, .aof, db and *.conf files. localhost:18101> config get dir 1) \"dir\" 2) \"/sata_ssd/ssd_03/nvkvs/myaccount\"","title":"3. Operations"},{"location":"how-to-use-lightningdb-on-k8s/","text":"How to use LightningDB on Kubernetes \u00b6 1. ltdb-http v2 - thrift beeline \u00b6 kubectl -n metavision exec -it pod/ltdbv2-beeline-857f578cd9-d7kc4 -- beeline.sh 0: jdbc:hive2://ltdbv2-http-svc:13000> select * from files limit 3; Create table(Do not use ANN) CREATE TABLE IF NOT EXISTS ltdb.metavision.img_feats_noann( id BIGINT, is_s3 BOOLEAN, bucket STRING, obj_key STRING, features ARRAY, meta STRING ) USING lightning LOCATION '127.0.0.1:18500' TBLPROPERTIES ('partition.size'=2, 'partition.0'='bucket','partition.1'='id'); Create table for ANN CREATE TABLE IF NOT EXISTS ltdb.metavision.img_feats( id BIGINT, is_s3 BOOLEAN, bucket STRING, obj_key STRING, features ARRAY, meta STRING ) USING lightning LOCATION 'ltdbv2:6379' TBLPROPERTIES ('partition.size'=2, 'partition.0'='bucket','partition.1'='id', 'feature_idx'='4', 'ann_type'='1', 'feature_dim'='1024', 'ef_construction'='500', 'ann_max_elem'='10000', 'ann_m'='20'); Ingest ANN data (from parquet) parquet \uc744 \ubc1b\uc544\uc904 \uc784\uc2dc \ud14c\uc774\ube14 \uc0dd\uc131 CREATE TABLE IF NOT EXISTS ltdb.parquet.temptable( id BIGINT, is_s3 BOOLEAN, bucket STRING, obj_key STRING, features ARRAY, meta STRING ) USING parquet LOCATION 's3a://upload-data/real/vision-ai-private-data_6.csv.ViT-H-14.laion2b_s32b_b79k.975.parquet'; Insert data INSERT INTO ltdb.metavision.img_feats SELECT (CAST(RANDOM() * 1000000 AS INTEGER) % 400) AS id, is_s3, CONCAT('metavision-', bucket) AS bucket, obj_key, features, meta FROM ltdb.parquet.temptable LIMIT 100; Query data SELECT * FROM ltdb.metavision.img_feats; SELECT count(obj_key) FROM ltdb.metavision.img_feats; Describe table DESCRIBE formatted ltdb.metavision.img_feats; Drop table DROP TABLE IF EXISTS ltdb.parquet.temptable; DROP TABLE IF EXISTS ltdb.metavision.img_feats; 2. Thunderquery CLI tool \u00b6 kubectl -n metavision exec -it thunderquery-68544ff5f7-9shjv -- thunderquery-cli ltdbv2-0.ltdbv2 ANN command select bucket, obj_key, ann(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2; KNN command select bucket, obj_key, euclideandistance(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as knn_result from ltdb.metavision.img_feats limit 2; 3. REST API \u00b6 Create table $ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table \\ --header \"Content-Type: text/plain\" \\ --data \"{ 'table': 'ltdb.metavision.img_feats', 'schema': [{'name': 'id', 'typ': 'BIGINT'}, {'name': 'is_s3', 'typ': 'BOOLEAN'}, {'name': 'bucket', 'typ': 'STRING'}, {'name': 'obj_key', 'typ': 'STRING'}, {'name': 'features', 'typ': 'ARRAY'}, {'name': 'meta', 'typ': 'STRING'}], 'loc': 'ltdbv2:6379', 'props': [{'key': 'partition.size', 'val': '2'}, {'key': 'partition.0', 'val': 'bucket'}, {'key': 'partition.1', 'val': 'id'}, {'key': 'feature_idx', 'val': '4'}, {'key': 'ann_type', 'val': '1'}, {'key': 'feature_dim', 'val': '1024'}, {'key': 'ef_construction', 'val': '500'}, {'key': 'ann_max_elem', 'val': '10000'}, {'key': 'ann_m', 'val': '20'}] }\" Ingest ANN data( from parquet) $ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/ingest/data \\ --header \"Content-Type: text/plain\" \\ --data \"{ 'src_format': 'parquet', 'src_loc': 's3a://upload-data/real/vision-ai-private-data_6.csv.ViT-H-14.laion2b_s32b_b79k.975.parquet', 'dest_table': 'ltdb.metavision.img_feats', 'limit': 100, 'src_cols_with_random': [{'name': 'id', 'range': 400}], 'src_cols_to_modify': [{'name': 'bucket', 'prefix': 'metavision-'}] }\" Query data $ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/query \\ --header \"Content-Type: text/plain\" \\ --data \"SELECT count(obj_key) FROM ltdb.metavision.img_feats\" Describe table $ curl --location --request GET http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table/ltdb.metavision.img_feats Drop table $ curl --location --request DELETE http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table/ltdb.metavision.img_feats ANN command $ curl -d 'select bucket, obj_key, ann(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;' http://metavision.k8s.lightningdb/thunderquery/sql KNN command $ curl -d 'select bucket, obj_key, euclideandistance(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;' http://metavision.k8s.lightningdb/thunderquery/sql","title":"How to use LightningDB"},{"location":"how-to-use-lightningdb-on-k8s/#how-to-use-lightningdb-on-kubernetes","text":"","title":"How to use LightningDB on Kubernetes"},{"location":"how-to-use-lightningdb-on-k8s/#1-ltdb-http-v2-thrift-beeline","text":"kubectl -n metavision exec -it pod/ltdbv2-beeline-857f578cd9-d7kc4 -- beeline.sh 0: jdbc:hive2://ltdbv2-http-svc:13000> select * from files limit 3; Create table(Do not use ANN) CREATE TABLE IF NOT EXISTS ltdb.metavision.img_feats_noann( id BIGINT, is_s3 BOOLEAN, bucket STRING, obj_key STRING, features ARRAY, meta STRING ) USING lightning LOCATION '127.0.0.1:18500' TBLPROPERTIES ('partition.size'=2, 'partition.0'='bucket','partition.1'='id'); Create table for ANN CREATE TABLE IF NOT EXISTS ltdb.metavision.img_feats( id BIGINT, is_s3 BOOLEAN, bucket STRING, obj_key STRING, features ARRAY, meta STRING ) USING lightning LOCATION 'ltdbv2:6379' TBLPROPERTIES ('partition.size'=2, 'partition.0'='bucket','partition.1'='id', 'feature_idx'='4', 'ann_type'='1', 'feature_dim'='1024', 'ef_construction'='500', 'ann_max_elem'='10000', 'ann_m'='20'); Ingest ANN data (from parquet) parquet \uc744 \ubc1b\uc544\uc904 \uc784\uc2dc \ud14c\uc774\ube14 \uc0dd\uc131 CREATE TABLE IF NOT EXISTS ltdb.parquet.temptable( id BIGINT, is_s3 BOOLEAN, bucket STRING, obj_key STRING, features ARRAY, meta STRING ) USING parquet LOCATION 's3a://upload-data/real/vision-ai-private-data_6.csv.ViT-H-14.laion2b_s32b_b79k.975.parquet'; Insert data INSERT INTO ltdb.metavision.img_feats SELECT (CAST(RANDOM() * 1000000 AS INTEGER) % 400) AS id, is_s3, CONCAT('metavision-', bucket) AS bucket, obj_key, features, meta FROM ltdb.parquet.temptable LIMIT 100; Query data SELECT * FROM ltdb.metavision.img_feats; SELECT count(obj_key) FROM ltdb.metavision.img_feats; Describe table DESCRIBE formatted ltdb.metavision.img_feats; Drop table DROP TABLE IF EXISTS ltdb.parquet.temptable; DROP TABLE IF EXISTS ltdb.metavision.img_feats;","title":"1. ltdb-http v2 - thrift beeline"},{"location":"how-to-use-lightningdb-on-k8s/#2-thunderquery-cli-tool","text":"kubectl -n metavision exec -it thunderquery-68544ff5f7-9shjv -- thunderquery-cli ltdbv2-0.ltdbv2 ANN command select bucket, obj_key, ann(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2; KNN command select bucket, obj_key, euclideandistance(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as knn_result from ltdb.metavision.img_feats limit 2;","title":"2. Thunderquery CLI tool"},{"location":"how-to-use-lightningdb-on-k8s/#3-rest-api","text":"Create table $ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table \\ --header \"Content-Type: text/plain\" \\ --data \"{ 'table': 'ltdb.metavision.img_feats', 'schema': [{'name': 'id', 'typ': 'BIGINT'}, {'name': 'is_s3', 'typ': 'BOOLEAN'}, {'name': 'bucket', 'typ': 'STRING'}, {'name': 'obj_key', 'typ': 'STRING'}, {'name': 'features', 'typ': 'ARRAY'}, {'name': 'meta', 'typ': 'STRING'}], 'loc': 'ltdbv2:6379', 'props': [{'key': 'partition.size', 'val': '2'}, {'key': 'partition.0', 'val': 'bucket'}, {'key': 'partition.1', 'val': 'id'}, {'key': 'feature_idx', 'val': '4'}, {'key': 'ann_type', 'val': '1'}, {'key': 'feature_dim', 'val': '1024'}, {'key': 'ef_construction', 'val': '500'}, {'key': 'ann_max_elem', 'val': '10000'}, {'key': 'ann_m', 'val': '20'}] }\" Ingest ANN data( from parquet) $ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/ingest/data \\ --header \"Content-Type: text/plain\" \\ --data \"{ 'src_format': 'parquet', 'src_loc': 's3a://upload-data/real/vision-ai-private-data_6.csv.ViT-H-14.laion2b_s32b_b79k.975.parquet', 'dest_table': 'ltdb.metavision.img_feats', 'limit': 100, 'src_cols_with_random': [{'name': 'id', 'range': 400}], 'src_cols_to_modify': [{'name': 'bucket', 'prefix': 'metavision-'}] }\" Query data $ curl --location --request POST http://metavision.k8s.lightningdb/ltdbv2-http/query \\ --header \"Content-Type: text/plain\" \\ --data \"SELECT count(obj_key) FROM ltdb.metavision.img_feats\" Describe table $ curl --location --request GET http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table/ltdb.metavision.img_feats Drop table $ curl --location --request DELETE http://metavision.k8s.lightningdb/ltdbv2-http/ingest/table/ltdb.metavision.img_feats ANN command $ curl -d 'select bucket, obj_key, ann(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, 0.029112583, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;' http://metavision.k8s.lightningdb/thunderquery/sql KNN command $ curl -d 'select bucket, obj_key, euclideandistance(features, [-0.009953999, -0.0006904541, -0.006250763, -0.009839512, 0.012631393, 0.024262842, -0.029540457, -0.01707404, 0.0061618676, ... , -0.011023628]) as ann_result from ltdb.metavision.img_feats limit 2;' http://metavision.k8s.lightningdb/thunderquery/sql","title":"3. REST API"},{"location":"install-ltcli/","text":"1. How to run LTCLI \u00b6 If you try to use LTCLI for the first time after the EC2 instance was created, please update LTCLI like below. pip install ltcli --upgrade --user (1) Run To run LTCLI, ${FBPATH} should be set. If not, the following error messages will be shown. To start using LTCLI, you should set env FBPATH ex) export FBPATH=$HOME/.flashbase Tip In the case of EC2 Instance, this path is set automatically. Run LTCLI by typing 'ltcli' $ ltcli When LTCLI starts for the first time, you need to confirm 'base_directory'. [~/tsr2] 1 is default value. Type base directory of LightningDB [~/tsr2] ~/tsr2 OK, ~/tsr2 In '${FBPATH}/.flashbase/config', you can modify 'base_directory'. If you logs in LTCLI normally, LTCLI starts on the last visited cluster. In the case of the first login, '-' is shown instead of cluster number. root@lightningdb:-> ... ... root@lightningdb:1> Tip In this page, '$' means that you are in Centos and '>' means that you are in LTCLI. (2) Log messages Log messages of LTCLI will be saved in '$FBPATH/logs/fb-roate.log'. Its max-file-size is 1GiB and rolling update will be done in case of exceeding of size limit. 2. Deploy LightningDB \u00b6 Deploy is the procedure that LightningDB is installed with the specified cluster number. You could make LightningDB cluster with the following command. > deploy 1 After deploy command, you should type the following information that provides its last used value. Installer Host Number of masters Replicas Number of SSD(disk) The prefix of DB path (used for 'redis data', 'redis DB path' and 'flash DB path') Use the below option not to save the last used value. > deploy --history-save=False (1) Select installer Select installer [ INSTALLER LIST ] (1) [DOWNLOAD] lightningdb.release.master.5a6a38.bin (2) [LOCAL] lightningdb.release.master.dbcb9e.bin (3) [LOCAL] lightningdb.trial.master.dbcb9e-dirty.bin (4) [LOCAL] lightningdb.trial.master.dbcb9e.bin Please enter the number, file path or url of the installer you want to use. you can also add file in list by copy to '$FBPATH/releases/' 1 OK, lightningdb.release.master.5a6a38.bin Tip LOCAL means installer file under path '$FBPATH/releases/' on your local. DOWNLOAD refers to a file that can be downloaded and up to 5 files are displayed in the latest order. To confirm the recommended Lightning DB version, use Release Notes Select a number to use that file. Type DOWNLOAD will be used after downloading. The downloaded file is saved in path '$FBPATH/releases'. Select installer [ INSTALLER LIST ] (empty) Please enter file path or url of the installer you want to use you can also add file in list by copy to '$FBPATH/releases/' https://flashbase.s3.ap-northeast-2.amazonaws.com/latest/lightningdb.release.master.5a6a38.bin Downloading lightningdb.release.master.5a6a38.bin [==================================================] 100% OK, lightningdb.release.master.5a6a38.bin If the installer list is empty like above, you can also use file path or URL. If you enter URL, download the file and use it. The downloaded file is saved in path '$FBPATH/releases'. (2) Type Hosts IP address or hostname can be used. In the case of several hosts, the list can be separated by comma(','). Please type host list separated by comma(,) [127.0.0.1] OK, ['127.0.0.1'] (3) Type Masters How many masters would you like to create on each host? [10] OK, 10 Please type ports separate with a comma(,) and use a hyphen(-) for range. [18100-18109] OK, ['18100-18109'] Define how many master processes will be created in the cluster per server. Tip To create a cluster, 3 master processes should be included at least. (4) Type information of slave How many replicas would you like to create on each master? [0] OK, 0 Define how many slave processes will be created for a master process. (5) Type the count of SSD(disk) and the path of DB files How many ssd would you like to use? [4] OK, 4 Type prefix of db path [/nvme/data_] OK, /nvme/data_ With this setting, LightningDB will use 4 disk paths('/nvme/data_01', '/nvme/data_02', '/nvme/data_03', '/nvme/data_04'). Tip In order to use this setting, the 'nvkvs' directories must be generated under all disk path and the permission setting(chmod/chown) for the directory must be configured as follows. $ pwd /nvme/data_01 $ mkdir nvkvs $chown ltdb nvkvs // The current user is 'ltdb' $chmod 755 nvkvs $ ls -alh drwxr-xr-x 10 ltdb ltdb 4.0K 4\uc6d4 27 14:34 . drwxr-xr-x 33 ltdb ltdb 4.0K 2\uc6d4 4 10:19 .. drwxrwxr-x 3 ltdb ltdb 4.0K 6\uc6d4 5 18:36 nvkvs // The current user is 'ltdb' ... (6) Check all settings finally Finally, all settings will be shown and confirmation will be requested like below. +--------------+---------------------------------------+ | NAME | VALUE | +--------------+---------------------------------------+ | installer | lightningdb.release.master.5a6a38.bin | | hosts | 127.0.0.1 | | master ports | 18100-18109 | | ssd count | 4 | | db path | /nvme/data_ | +--------------+---------------------------------------+ Do you want to proceed with the deploy accroding to the above information? (y/n) y (7) Deploy cluster After deploying is completed, the following messages are shown and LTCLI of the cluster is activated. Check status of hosts... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Checking for cluster exist... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | CLEAN | +-----------+--------+ OK Transfer installer and execute... - 127.0.0.1 Sync conf... Complete to deploy cluster 1. Cluster '1' selected. When an error occurs during deploying, error messages will be shown like below. (8) Errors Host connection error Check status of hosts... +-------+------------------+ | HOST | STATUS | +-------+------------------+ | nodeA | OK | | nodeB | SSH ERROR | | nodeC | UNKNOWN HOST | | nodeD | CONNECTION ERROR | +-------+------------------+ There are unavailable host. SSH ERROR SSH access error. Please check SSH KEY exchange or the status of SSH client/server. UNKNOWN HOST Can not get IP address with the hostname. Please check if the hostname is right. CONNECTION ERROR Please check the status of the host(server) or outbound/inbound of the server. Cluster already exist Checking for cluster exist... +-------+---------------+ | HOST | STATUS | +-------+---------------+ | nodeA | CLEAN | | nodeB | CLEAN | | nodeC | CLUSTER EXIST | | nodeD | CLUSTER EXIST | +-------+---------------+ Cluster information exist on some hosts. CLUSTER EXIST LightningDB is already deployed in the cluster of the host. Not include localhost Check status of hosts... +-------+------------------+ | HOST | STATUS | +-------+------------------+ | nodeB | OK | | nodeC | OK | | nodeD | OK | +-------+------------------+ Must include localhost. If the localhost(127.0.0.1) is not included in host information, this error occurs. Please add the localhost in the host list in this case. 3. Start LightningDB \u00b6 Create a cluster of LightningDB using 'cluster create' command. ec2-user@lightningdb:1> cluster create Check status of hosts... OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104|18105|18106|18107|18108|18109 ... Wait until all redis process up... cur: 10 / total: 10 Complete all redis process up >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | | 127.0.0.1 | 18105 | MASTER | | 127.0.0.1 | 18106 | MASTER | | 127.0.0.1 | 18107 | MASTER | | 127.0.0.1 | 18108 | MASTER | | 127.0.0.1 | 18109 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18107 - 127.0.0.1:18106 - 127.0.0.1:18101 - 127.0.0.1:18100 - 127.0.0.1:18103 - 127.0.0.1:18109 - 127.0.0.1:18102 - 127.0.0.1:18108 - 127.0.0.1:18105 - 127.0.0.1:18104 Adding slots... - 127.0.0.1:18107, 1642 - 127.0.0.1:18106, 1638 - 127.0.0.1:18101, 1638 - 127.0.0.1:18100, 1638 - 127.0.0.1:18103, 1638 - 127.0.0.1:18109, 1638 - 127.0.0.1:18102, 1638 - 127.0.0.1:18108, 1638 - 127.0.0.1:18105, 1638 - 127.0.0.1:18104, 1638 Check cluster state and asign slot... Ok create cluster complete. ec2-user@lightningdb:1> cli ping --all alive redis 10/10 ec2-user@lightningdb:1> From now, you can try ingestion and query in LightningDB with Zeppelin . And for further information about commands of LTCLI, please use Command Line . If you type 'enter' without any text, the default value is applied. In some cases, the default value will not be provided. \u21a9","title":"Installation"},{"location":"install-ltcli/#1-how-to-run-ltcli","text":"If you try to use LTCLI for the first time after the EC2 instance was created, please update LTCLI like below. pip install ltcli --upgrade --user (1) Run To run LTCLI, ${FBPATH} should be set. If not, the following error messages will be shown. To start using LTCLI, you should set env FBPATH ex) export FBPATH=$HOME/.flashbase Tip In the case of EC2 Instance, this path is set automatically. Run LTCLI by typing 'ltcli' $ ltcli When LTCLI starts for the first time, you need to confirm 'base_directory'. [~/tsr2] 1 is default value. Type base directory of LightningDB [~/tsr2] ~/tsr2 OK, ~/tsr2 In '${FBPATH}/.flashbase/config', you can modify 'base_directory'. If you logs in LTCLI normally, LTCLI starts on the last visited cluster. In the case of the first login, '-' is shown instead of cluster number. root@lightningdb:-> ... ... root@lightningdb:1> Tip In this page, '$' means that you are in Centos and '>' means that you are in LTCLI. (2) Log messages Log messages of LTCLI will be saved in '$FBPATH/logs/fb-roate.log'. Its max-file-size is 1GiB and rolling update will be done in case of exceeding of size limit.","title":"1. How to run LTCLI"},{"location":"install-ltcli/#2-deploy-lightningdb","text":"Deploy is the procedure that LightningDB is installed with the specified cluster number. You could make LightningDB cluster with the following command. > deploy 1 After deploy command, you should type the following information that provides its last used value. Installer Host Number of masters Replicas Number of SSD(disk) The prefix of DB path (used for 'redis data', 'redis DB path' and 'flash DB path') Use the below option not to save the last used value. > deploy --history-save=False (1) Select installer Select installer [ INSTALLER LIST ] (1) [DOWNLOAD] lightningdb.release.master.5a6a38.bin (2) [LOCAL] lightningdb.release.master.dbcb9e.bin (3) [LOCAL] lightningdb.trial.master.dbcb9e-dirty.bin (4) [LOCAL] lightningdb.trial.master.dbcb9e.bin Please enter the number, file path or url of the installer you want to use. you can also add file in list by copy to '$FBPATH/releases/' 1 OK, lightningdb.release.master.5a6a38.bin Tip LOCAL means installer file under path '$FBPATH/releases/' on your local. DOWNLOAD refers to a file that can be downloaded and up to 5 files are displayed in the latest order. To confirm the recommended Lightning DB version, use Release Notes Select a number to use that file. Type DOWNLOAD will be used after downloading. The downloaded file is saved in path '$FBPATH/releases'. Select installer [ INSTALLER LIST ] (empty) Please enter file path or url of the installer you want to use you can also add file in list by copy to '$FBPATH/releases/' https://flashbase.s3.ap-northeast-2.amazonaws.com/latest/lightningdb.release.master.5a6a38.bin Downloading lightningdb.release.master.5a6a38.bin [==================================================] 100% OK, lightningdb.release.master.5a6a38.bin If the installer list is empty like above, you can also use file path or URL. If you enter URL, download the file and use it. The downloaded file is saved in path '$FBPATH/releases'. (2) Type Hosts IP address or hostname can be used. In the case of several hosts, the list can be separated by comma(','). Please type host list separated by comma(,) [127.0.0.1] OK, ['127.0.0.1'] (3) Type Masters How many masters would you like to create on each host? [10] OK, 10 Please type ports separate with a comma(,) and use a hyphen(-) for range. [18100-18109] OK, ['18100-18109'] Define how many master processes will be created in the cluster per server. Tip To create a cluster, 3 master processes should be included at least. (4) Type information of slave How many replicas would you like to create on each master? [0] OK, 0 Define how many slave processes will be created for a master process. (5) Type the count of SSD(disk) and the path of DB files How many ssd would you like to use? [4] OK, 4 Type prefix of db path [/nvme/data_] OK, /nvme/data_ With this setting, LightningDB will use 4 disk paths('/nvme/data_01', '/nvme/data_02', '/nvme/data_03', '/nvme/data_04'). Tip In order to use this setting, the 'nvkvs' directories must be generated under all disk path and the permission setting(chmod/chown) for the directory must be configured as follows. $ pwd /nvme/data_01 $ mkdir nvkvs $chown ltdb nvkvs // The current user is 'ltdb' $chmod 755 nvkvs $ ls -alh drwxr-xr-x 10 ltdb ltdb 4.0K 4\uc6d4 27 14:34 . drwxr-xr-x 33 ltdb ltdb 4.0K 2\uc6d4 4 10:19 .. drwxrwxr-x 3 ltdb ltdb 4.0K 6\uc6d4 5 18:36 nvkvs // The current user is 'ltdb' ... (6) Check all settings finally Finally, all settings will be shown and confirmation will be requested like below. +--------------+---------------------------------------+ | NAME | VALUE | +--------------+---------------------------------------+ | installer | lightningdb.release.master.5a6a38.bin | | hosts | 127.0.0.1 | | master ports | 18100-18109 | | ssd count | 4 | | db path | /nvme/data_ | +--------------+---------------------------------------+ Do you want to proceed with the deploy accroding to the above information? (y/n) y (7) Deploy cluster After deploying is completed, the following messages are shown and LTCLI of the cluster is activated. Check status of hosts... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Checking for cluster exist... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | CLEAN | +-----------+--------+ OK Transfer installer and execute... - 127.0.0.1 Sync conf... Complete to deploy cluster 1. Cluster '1' selected. When an error occurs during deploying, error messages will be shown like below. (8) Errors Host connection error Check status of hosts... +-------+------------------+ | HOST | STATUS | +-------+------------------+ | nodeA | OK | | nodeB | SSH ERROR | | nodeC | UNKNOWN HOST | | nodeD | CONNECTION ERROR | +-------+------------------+ There are unavailable host. SSH ERROR SSH access error. Please check SSH KEY exchange or the status of SSH client/server. UNKNOWN HOST Can not get IP address with the hostname. Please check if the hostname is right. CONNECTION ERROR Please check the status of the host(server) or outbound/inbound of the server. Cluster already exist Checking for cluster exist... +-------+---------------+ | HOST | STATUS | +-------+---------------+ | nodeA | CLEAN | | nodeB | CLEAN | | nodeC | CLUSTER EXIST | | nodeD | CLUSTER EXIST | +-------+---------------+ Cluster information exist on some hosts. CLUSTER EXIST LightningDB is already deployed in the cluster of the host. Not include localhost Check status of hosts... +-------+------------------+ | HOST | STATUS | +-------+------------------+ | nodeB | OK | | nodeC | OK | | nodeD | OK | +-------+------------------+ Must include localhost. If the localhost(127.0.0.1) is not included in host information, this error occurs. Please add the localhost in the host list in this case.","title":"2. Deploy LightningDB"},{"location":"install-ltcli/#3-start-lightningdb","text":"Create a cluster of LightningDB using 'cluster create' command. ec2-user@lightningdb:1> cluster create Check status of hosts... OK Backup redis master log in each MASTER hosts... - 127.0.0.1 create redis data directory in each MASTER hosts - 127.0.0.1 sync conf +-----------+--------+ | HOST | STATUS | +-----------+--------+ | 127.0.0.1 | OK | +-----------+--------+ OK Starting master nodes : 127.0.0.1 : 18100|18101|18102|18103|18104|18105|18106|18107|18108|18109 ... Wait until all redis process up... cur: 10 / total: 10 Complete all redis process up >>> Creating cluster +-----------+-------+--------+ | HOST | PORT | TYPE | +-----------+-------+--------+ | 127.0.0.1 | 18100 | MASTER | | 127.0.0.1 | 18101 | MASTER | | 127.0.0.1 | 18102 | MASTER | | 127.0.0.1 | 18103 | MASTER | | 127.0.0.1 | 18104 | MASTER | | 127.0.0.1 | 18105 | MASTER | | 127.0.0.1 | 18106 | MASTER | | 127.0.0.1 | 18107 | MASTER | | 127.0.0.1 | 18108 | MASTER | | 127.0.0.1 | 18109 | MASTER | +-----------+-------+--------+ replicas: 0 Do you want to proceed with the create according to the above information? (y/n) y Cluster meet... - 127.0.0.1:18107 - 127.0.0.1:18106 - 127.0.0.1:18101 - 127.0.0.1:18100 - 127.0.0.1:18103 - 127.0.0.1:18109 - 127.0.0.1:18102 - 127.0.0.1:18108 - 127.0.0.1:18105 - 127.0.0.1:18104 Adding slots... - 127.0.0.1:18107, 1642 - 127.0.0.1:18106, 1638 - 127.0.0.1:18101, 1638 - 127.0.0.1:18100, 1638 - 127.0.0.1:18103, 1638 - 127.0.0.1:18109, 1638 - 127.0.0.1:18102, 1638 - 127.0.0.1:18108, 1638 - 127.0.0.1:18105, 1638 - 127.0.0.1:18104, 1638 Check cluster state and asign slot... Ok create cluster complete. ec2-user@lightningdb:1> cli ping --all alive redis 10/10 ec2-user@lightningdb:1> From now, you can try ingestion and query in LightningDB with Zeppelin . And for further information about commands of LTCLI, please use Command Line . If you type 'enter' without any text, the default value is applied. In some cases, the default value will not be provided. \u21a9","title":"3. Start LightningDB"},{"location":"kaetlyn/","text":"1. Kafka broker \u00b6 1. Kafka Cluster (1) Install kafka Install kafka in each server in which kafka cluster is utilized. Add $KAFKA_HOME path into ~/.bash_profile . (2) Install zookeeper In each server of kafka cluster, set $dataDir , $server.1 ~ $server.n properties in $KAFKA_HOME/config/zookeeper.properties . For example, if you try to configure kafka cluster with my-server1 , my-server2 , set server.1 , server.2 fields. dataDir=/hdd_01/zookeeper # the port at which the clients will connect clientPort=2181 # disable the per-ip limit on the number of connections since this is a non-production config maxClientCnxns=0 initLimit=5 syncLimit=2 # Zookeeper will use these ports (2891, etc.) to connect the individual follower nodes to the leader nodes. # The other ports (3881, etc.) are used for leader election in the ensemble. server.1=my-server1:2888:3888 server.2=my-server2:2888:3888 In each server, set ${dataDir}/myid with its own id. For example, use echo \"1\" > ${dataDir}/myid in my-server1 and echo \"2\" > ${dataDir}/myid in my-server2. start zookeeper in each server. > $KAFKA_HOME/bin/zookeeper-server-start.sh config/zookeeper.properties & (3) Start kafka broker Edit $KAFKA_HOME/conf/server.properties in each server, Set Broker ID in my-server1 . broker.id=1 // '2' in case of my-server2 Configure zookeeper IP and PORT : Add , as seperator. zookeeper.connect=my-server1:2181,my-server2:2181 Configure a path for Kafka data: Add a directory in each disk for load balancing. log.dirs=/hdd_01/kafka,/hdd_02/kafka,/hdd_03/kafka,/hdd_04/kafka Configure a retention time for keeping record and a retention size limit for each partition. # default value: 168 log.retention.hours=168 # '-1' means 'unlimited'. log.retention.bytes=-1 Configure a max size of a message. # If a size of a produced message exceed this limit, the exception is thrown. # If you want to create a message with many rows, increase this value and restart broker. # default value: 1000012 byte message.max.bytes=1000012 Start kafka server in each server. > $KAFKA_HOME/bin/kafka-server-start.sh config/server.properties & Create topic. # --zookeeper localhost:2181 : Need zookeeper host & clientPort, because topics and partition information are stored in zookeeper. # --topic nvkvs : For example, set 'nvkvs' as topic name. # --partitions 16 : For example, set 2 partitions in each disk and use 16 partitions((# of cluster nodes) X (# of disks in each node) X 2 = 2 X 4 X 2 = 16). # --replication-factor 2 : Create 1 follower for each partition. > $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 2 --partitions 16 --topic nvkvs # Check a generated topic: A broker.id of Replicas is different with a broker.id of Leader. > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --describe --topic nvkvs Topic:nvkvs PartitionCount:16 ReplicationFactor:2 Configs: Topic: nvkvs Partition: 0 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 1 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 2 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 3 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 4 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 5 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 6 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 7 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 8 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 9 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 10 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 11 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 12 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 13 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 14 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 15 Leader: 1 Replicas: 1,0 Isr: 1,0 Delete topic / Modify the number of partitions. # Topic delete Command > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --delete --topic nvkvs # Topic partition modification > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181/chroot --alter --topic nvkvs --partitions 6 2. Kafka Topic Information Consumer list > $KAFKA_HOME/bin/kafka-consumer-groups.sh --list --bootstrap-server localhost:9092 Console consumer start > $KAFKA_HOME/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic nvkvs --from-beginning Consumer offset check # Add '--group {consumer group name}' > $KAFKA_HOME/bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group (Consumer group name) TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID nvkvs 4 272904 272904 0 - - - nvkvs 12 272904 272904 0 - - - nvkvs 15 273113 273113 0 - - - nvkvs 6 272906 272906 0 - - - nvkvs 0 272907 272907 0 - - - nvkvs 8 272905 272905 0 - - - nvkvs 3 273111 273111 0 - - - nvkvs 9 273111 273111 0 - - - nvkvs 13 273111 273111 0 - - - nvkvs 10 272912 272912 0 - - - nvkvs 1 273111 273111 0 - - - nvkvs 11 273112 273112 0 - - - nvkvs 14 272904 272904 0 - - - nvkvs 7 273110 273110 0 - - - nvkvs 5 273111 273111 0 - - - nvkvs 2 272905 272905 0 - - - Consumer offset modification # --shift-by # --group < name of group to shift> > $KAFKA_HOME/bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --reset-offsets --shift-by -10000 --execute --group (Consumer \uadf8\ub8f9\uba85) --topic nvkvs 2. Kafka consumer \u00b6 1. Kaetlyn Consumer tsr2-kaetlyn edit KAFKA_SERVER : Kafka Broker\uc758 host:port DRIVER_MEMORY, EXECUTOR_MEMORY : A memory of Spark Driver/Excutor\uc758 Memory in Yarn. After start, check 'FGC' count with using 'jstat -gc' and optimize these values. EXECUTERS, EXECUTER_CORES : Basically consumers as many as the number of kafka partitions are generated. With this rule, need to optimize the number of EXECUTERS, EXECUTER_CORES. JSON_PATH : The path of TABLE json. Do not support hdfs path. This is relative path from tsr2-kaetlyn. KAFKA_CONSUMER_GROUP_ID : consumer group id KAFKA_CONSUMING_TOPIC_LIST : Topic list with seperator ','. JOB_GENERATION_PERIOD : With this period, check latest-offset and execute consuming job. MAX_RATE_PER_PARTITION : the maximum offset that a consumer executes within a job period. > cfc 1 (or c01) > tsr2-kaetlyn edit #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14040} KAFKA_SERVER=my-server1:9092 ############################################################################### # Properties for Consumer DRIVER_MEMORY=2g EXECUTOR_MEMORY=2g EXECUTERS=16 EXECUTER_CORES=4 JSON_PATH=~/Flashbase/flashbase-benchmark/json/load_no_skew KAFKA_CONSUMER_GROUP_ID=nvkvs_redis_connector KAFKA_CONSUMING_TOPIC_LIST=nvkvs JOB_GENERATION_PERIOD=1 MAX_RATE_PER_PARTITION=100 ... 2. Kaetlyn Consumer start/stop Because a kaetlyn consumer is a spark application in yarn cluster, Hadoop/Yarn and spark should already be installed. Start and monitor Driver Log > tsr2-kaetlyn consumer start > tsr2-kaetlyn consumer monitor If a consumer is started successfully, a state of application in yarn is set as RUNNING. > yarn application -list Stop : By SIGTERM, stop a current job and update kafka offset. > tsr2-kaetlyn consumer stop 3. Kaetlyn Log level modification Kaetlyn use a logback as a logger. After kaetlyn consumer start, '$SPARK_HOME/conf/logback-kaetlyn.xml' file is generated. to modify driver log level, edit this file. > vi $SPARK_HOME/conf/logback-kaetlyn.xml 3. Kafka producer \u00b6 Start kafka producer. kafka-console-producer.sh --broker-list localhost:9092 --topic {topic name} < {filename to ingest} To produce for a kaetlyn consumer, 2 header fields should be included. TABLE_ID SEPARATOR If you use 'kafkacat', you can produce with the additional header fields.(https://docs.confluent.io/3.3.0/app-development/kafkacat-usage.html# ) 1. How to install kafkacat c++ compiler $yum install gcc-c++ Download source codes $ git clone https://github.com/edenhill/librdkafka Make and Installation $ cd librdkafka $ ./configure $ make $ sudo make install Move to '/usr/local/lib' and execute below commands. $ git clone https://github.com/edenhill/kafkacat $ cd kafkacat $ ./configure $ make $ sudo make install How to find Lib path $ ldd kafkacat Create and edit /etc/ld.so.conf.d/usrlocal.conf Contents: /usr/local/lib Save and execution $ ldconfig -v If 'kafkacat' is shown, kafkacat is installed successfully. $kafkacat 2. Producing with kafkacat 1) Produce a single file kafkacat -b localhost:9092 -t {topic name} -T -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l {filename} 2) Produce all files in a directory After moving to the directory path, ls | xargs -n 1 kafkacat -q -b localhost:9092 -t {topic name} -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l 3. kafka-utils.sh With kafka-utils.sh, check the status of kafka broker. Because 'kafka-utils.sh' exists under sbin path of each cluster, you can use this with 'cfc {cluster number}'. [C:6][ltdb@d205 ~]$ which kafka-utils.sh ~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/kafka-utils.sh After 'CONSUMER_GROUP_ID' is set, kafka-utils.sh is enabled. [C:6][ltdb@d205 ~]$ kafka-utils.sh help Please, set $CONSUMER_GROUP_ID first. Need to set'kafka-utils.sh'. #!/bin/bash CONSUMER_GROUP_ID='nvkvs_redis_connector' // Need to modify KAFKA_SERVER=localhost:9092 ZOOKEEPER_SERVER=localhost:2181... [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh help kafka-utils.sh offset-check kafka-utils.sh offset-monitor kafka-utils.sh offset-earliest topic_name kafka-utils.sh offset-latest topic_name kafka-utils.sh offset-move topic_name 10000 kafka-utils.sh error-monitor error_topic_name kafka-utils.sh consumer-list kafka-utils.sh topic-check topic_name kafka-utils.sh topic-create topic_name 10 kafka-utils.sh topic-delete topic_name kafka-utils.sh topic-config-check topic_name kafka-utils.sh topic-config-set topic_name config_name config_value kafka-utils.sh topic-config-remove topic_name config_name kafka-utils.sh topic-list kafka-utils.sh message-earliest topic_name kafka-utils.sh message-latest topic_name If a command needs args, the error messages like below is shown. [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh offset-move Please, specify topic name & the size of moving offset (ex) kafka-utils.sh offset-move my-topic 100 [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh topic-create Please, specify topic name and its partition count. (ex) kafka-utils.sh topic-create topic-new 10 [C:6][ltdb@d205 ~/kafka/config]$ For example, [C:6][ltdb@d205 ~]$ kafka-utils.sh message-earliest nvkvs3 20160711055950|ELG|2635055200|34317|5|6091|1|25|0|11|0|100.0|0.0|0|2846|3|33|0|5|0|-1000|0.0|0.0|94932|1027|0|176|35.2|40|0|7818000000|109816071|10|0|6000000.0|164843|2.75|0|2592|6000000|0.04|1288488|1303|1338|0|530|1|88.33|0|721|67948|428|0|1|108|108.0|108|0|0.0|0|0|0|-1000|1|1|100.0|62|39.0|62.9|23.0|37.1|0|0|0|0|29|10|-7022851.0|59998.0|-117.05|-6865443.5|59998.0|-114.43|4|198060.0|59998.0|22.5|3.3|0|1|5.82|3|1.94||0|0|0|0|0|0|0|0|4|0|0|0|15|14|231|140|0|0|0|0|0|0|0|0|4|0|0|0|15|13|174|110|1|0|0|0|0|0|0|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|0|0|0.0|0.0|0.0|0.0|0.0|0.0|570.0|0.0|3.0|0.0|0.0|0.0|0.0|2.0|3.0|3.0|0.0|15.73|0.0|0.0|0.0|0.0|0.0|12.0|22.0|68.0|83.0|339.0|205.0|144.0|54.0|38.0|12.0|0.0|0.0|0.0|0.0|0.0|0.0|100.0|50.55|1:22,2:7|1.0|||||1:1,17:1,23:1|13.67|0|0|0.0|0.0|-1000||-1000||-1000|11|2|05 Processed a total of 1 messages [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-list __consumer_offsets nvkvs3 topic-error topic_name [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-create ksh 18 Created topic ksh. [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-check ksh Topic:ksh PartitionCount:18 ReplicationFactor:2 Configs: Topic: ksh Partition: 0 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 1 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 2 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 3 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 4 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 5 Leader: 3 Replicas: 3,1 Isr: 3,1 Topic: ksh Partition: 6 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 7 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 8 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 9 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 10 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 11 Leader: 3 Replicas: 3,1 Isr: 3,1 Topic: ksh Partition: 12 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 13 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 14 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 15 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 16 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 17 Leader: 3 Replicas: 3,1 Isr: 3,1","title":"Data ingestion with KAFKA"},{"location":"kaetlyn/#1-kafka-broker","text":"1. Kafka Cluster (1) Install kafka Install kafka in each server in which kafka cluster is utilized. Add $KAFKA_HOME path into ~/.bash_profile . (2) Install zookeeper In each server of kafka cluster, set $dataDir , $server.1 ~ $server.n properties in $KAFKA_HOME/config/zookeeper.properties . For example, if you try to configure kafka cluster with my-server1 , my-server2 , set server.1 , server.2 fields. dataDir=/hdd_01/zookeeper # the port at which the clients will connect clientPort=2181 # disable the per-ip limit on the number of connections since this is a non-production config maxClientCnxns=0 initLimit=5 syncLimit=2 # Zookeeper will use these ports (2891, etc.) to connect the individual follower nodes to the leader nodes. # The other ports (3881, etc.) are used for leader election in the ensemble. server.1=my-server1:2888:3888 server.2=my-server2:2888:3888 In each server, set ${dataDir}/myid with its own id. For example, use echo \"1\" > ${dataDir}/myid in my-server1 and echo \"2\" > ${dataDir}/myid in my-server2. start zookeeper in each server. > $KAFKA_HOME/bin/zookeeper-server-start.sh config/zookeeper.properties & (3) Start kafka broker Edit $KAFKA_HOME/conf/server.properties in each server, Set Broker ID in my-server1 . broker.id=1 // '2' in case of my-server2 Configure zookeeper IP and PORT : Add , as seperator. zookeeper.connect=my-server1:2181,my-server2:2181 Configure a path for Kafka data: Add a directory in each disk for load balancing. log.dirs=/hdd_01/kafka,/hdd_02/kafka,/hdd_03/kafka,/hdd_04/kafka Configure a retention time for keeping record and a retention size limit for each partition. # default value: 168 log.retention.hours=168 # '-1' means 'unlimited'. log.retention.bytes=-1 Configure a max size of a message. # If a size of a produced message exceed this limit, the exception is thrown. # If you want to create a message with many rows, increase this value and restart broker. # default value: 1000012 byte message.max.bytes=1000012 Start kafka server in each server. > $KAFKA_HOME/bin/kafka-server-start.sh config/server.properties & Create topic. # --zookeeper localhost:2181 : Need zookeeper host & clientPort, because topics and partition information are stored in zookeeper. # --topic nvkvs : For example, set 'nvkvs' as topic name. # --partitions 16 : For example, set 2 partitions in each disk and use 16 partitions((# of cluster nodes) X (# of disks in each node) X 2 = 2 X 4 X 2 = 16). # --replication-factor 2 : Create 1 follower for each partition. > $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 2 --partitions 16 --topic nvkvs # Check a generated topic: A broker.id of Replicas is different with a broker.id of Leader. > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --describe --topic nvkvs Topic:nvkvs PartitionCount:16 ReplicationFactor:2 Configs: Topic: nvkvs Partition: 0 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 1 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 2 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 3 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 4 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 5 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 6 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 7 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 8 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 9 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 10 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 11 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 12 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 13 Leader: 1 Replicas: 1,0 Isr: 1,0 Topic: nvkvs Partition: 14 Leader: 0 Replicas: 0,1 Isr: 1,0 Topic: nvkvs Partition: 15 Leader: 1 Replicas: 1,0 Isr: 1,0 Delete topic / Modify the number of partitions. # Topic delete Command > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181 --delete --topic nvkvs # Topic partition modification > $KAFKA_HOME/bin/kafka-topics.sh --zookeeper localhost:2181/chroot --alter --topic nvkvs --partitions 6 2. Kafka Topic Information Consumer list > $KAFKA_HOME/bin/kafka-consumer-groups.sh --list --bootstrap-server localhost:9092 Console consumer start > $KAFKA_HOME/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic nvkvs --from-beginning Consumer offset check # Add '--group {consumer group name}' > $KAFKA_HOME/bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group (Consumer group name) TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID nvkvs 4 272904 272904 0 - - - nvkvs 12 272904 272904 0 - - - nvkvs 15 273113 273113 0 - - - nvkvs 6 272906 272906 0 - - - nvkvs 0 272907 272907 0 - - - nvkvs 8 272905 272905 0 - - - nvkvs 3 273111 273111 0 - - - nvkvs 9 273111 273111 0 - - - nvkvs 13 273111 273111 0 - - - nvkvs 10 272912 272912 0 - - - nvkvs 1 273111 273111 0 - - - nvkvs 11 273112 273112 0 - - - nvkvs 14 272904 272904 0 - - - nvkvs 7 273110 273110 0 - - - nvkvs 5 273111 273111 0 - - - nvkvs 2 272905 272905 0 - - - Consumer offset modification # --shift-by # --group < name of group to shift> > $KAFKA_HOME/bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --reset-offsets --shift-by -10000 --execute --group (Consumer \uadf8\ub8f9\uba85) --topic nvkvs","title":"1. Kafka broker"},{"location":"kaetlyn/#2-kafka-consumer","text":"1. Kaetlyn Consumer tsr2-kaetlyn edit KAFKA_SERVER : Kafka Broker\uc758 host:port DRIVER_MEMORY, EXECUTOR_MEMORY : A memory of Spark Driver/Excutor\uc758 Memory in Yarn. After start, check 'FGC' count with using 'jstat -gc' and optimize these values. EXECUTERS, EXECUTER_CORES : Basically consumers as many as the number of kafka partitions are generated. With this rule, need to optimize the number of EXECUTERS, EXECUTER_CORES. JSON_PATH : The path of TABLE json. Do not support hdfs path. This is relative path from tsr2-kaetlyn. KAFKA_CONSUMER_GROUP_ID : consumer group id KAFKA_CONSUMING_TOPIC_LIST : Topic list with seperator ','. JOB_GENERATION_PERIOD : With this period, check latest-offset and execute consuming job. MAX_RATE_PER_PARTITION : the maximum offset that a consumer executes within a job period. > cfc 1 (or c01) > tsr2-kaetlyn edit #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14040} KAFKA_SERVER=my-server1:9092 ############################################################################### # Properties for Consumer DRIVER_MEMORY=2g EXECUTOR_MEMORY=2g EXECUTERS=16 EXECUTER_CORES=4 JSON_PATH=~/Flashbase/flashbase-benchmark/json/load_no_skew KAFKA_CONSUMER_GROUP_ID=nvkvs_redis_connector KAFKA_CONSUMING_TOPIC_LIST=nvkvs JOB_GENERATION_PERIOD=1 MAX_RATE_PER_PARTITION=100 ... 2. Kaetlyn Consumer start/stop Because a kaetlyn consumer is a spark application in yarn cluster, Hadoop/Yarn and spark should already be installed. Start and monitor Driver Log > tsr2-kaetlyn consumer start > tsr2-kaetlyn consumer monitor If a consumer is started successfully, a state of application in yarn is set as RUNNING. > yarn application -list Stop : By SIGTERM, stop a current job and update kafka offset. > tsr2-kaetlyn consumer stop 3. Kaetlyn Log level modification Kaetlyn use a logback as a logger. After kaetlyn consumer start, '$SPARK_HOME/conf/logback-kaetlyn.xml' file is generated. to modify driver log level, edit this file. > vi $SPARK_HOME/conf/logback-kaetlyn.xml","title":"2. Kafka consumer"},{"location":"kaetlyn/#3-kafka-producer","text":"Start kafka producer. kafka-console-producer.sh --broker-list localhost:9092 --topic {topic name} < {filename to ingest} To produce for a kaetlyn consumer, 2 header fields should be included. TABLE_ID SEPARATOR If you use 'kafkacat', you can produce with the additional header fields.(https://docs.confluent.io/3.3.0/app-development/kafkacat-usage.html# ) 1. How to install kafkacat c++ compiler $yum install gcc-c++ Download source codes $ git clone https://github.com/edenhill/librdkafka Make and Installation $ cd librdkafka $ ./configure $ make $ sudo make install Move to '/usr/local/lib' and execute below commands. $ git clone https://github.com/edenhill/kafkacat $ cd kafkacat $ ./configure $ make $ sudo make install How to find Lib path $ ldd kafkacat Create and edit /etc/ld.so.conf.d/usrlocal.conf Contents: /usr/local/lib Save and execution $ ldconfig -v If 'kafkacat' is shown, kafkacat is installed successfully. $kafkacat 2. Producing with kafkacat 1) Produce a single file kafkacat -b localhost:9092 -t {topic name} -T -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l {filename} 2) Produce all files in a directory After moving to the directory path, ls | xargs -n 1 kafkacat -q -b localhost:9092 -t {topic name} -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l 3. kafka-utils.sh With kafka-utils.sh, check the status of kafka broker. Because 'kafka-utils.sh' exists under sbin path of each cluster, you can use this with 'cfc {cluster number}'. [C:6][ltdb@d205 ~]$ which kafka-utils.sh ~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/kafka-utils.sh After 'CONSUMER_GROUP_ID' is set, kafka-utils.sh is enabled. [C:6][ltdb@d205 ~]$ kafka-utils.sh help Please, set $CONSUMER_GROUP_ID first. Need to set'kafka-utils.sh'. #!/bin/bash CONSUMER_GROUP_ID='nvkvs_redis_connector' // Need to modify KAFKA_SERVER=localhost:9092 ZOOKEEPER_SERVER=localhost:2181... [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh help kafka-utils.sh offset-check kafka-utils.sh offset-monitor kafka-utils.sh offset-earliest topic_name kafka-utils.sh offset-latest topic_name kafka-utils.sh offset-move topic_name 10000 kafka-utils.sh error-monitor error_topic_name kafka-utils.sh consumer-list kafka-utils.sh topic-check topic_name kafka-utils.sh topic-create topic_name 10 kafka-utils.sh topic-delete topic_name kafka-utils.sh topic-config-check topic_name kafka-utils.sh topic-config-set topic_name config_name config_value kafka-utils.sh topic-config-remove topic_name config_name kafka-utils.sh topic-list kafka-utils.sh message-earliest topic_name kafka-utils.sh message-latest topic_name If a command needs args, the error messages like below is shown. [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh offset-move Please, specify topic name & the size of moving offset (ex) kafka-utils.sh offset-move my-topic 100 [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh topic-create Please, specify topic name and its partition count. (ex) kafka-utils.sh topic-create topic-new 10 [C:6][ltdb@d205 ~/kafka/config]$ For example, [C:6][ltdb@d205 ~]$ kafka-utils.sh message-earliest nvkvs3 20160711055950|ELG|2635055200|34317|5|6091|1|25|0|11|0|100.0|0.0|0|2846|3|33|0|5|0|-1000|0.0|0.0|94932|1027|0|176|35.2|40|0|7818000000|109816071|10|0|6000000.0|164843|2.75|0|2592|6000000|0.04|1288488|1303|1338|0|530|1|88.33|0|721|67948|428|0|1|108|108.0|108|0|0.0|0|0|0|-1000|1|1|100.0|62|39.0|62.9|23.0|37.1|0|0|0|0|29|10|-7022851.0|59998.0|-117.05|-6865443.5|59998.0|-114.43|4|198060.0|59998.0|22.5|3.3|0|1|5.82|3|1.94||0|0|0|0|0|0|0|0|4|0|0|0|15|14|231|140|0|0|0|0|0|0|0|0|4|0|0|0|15|13|174|110|1|0|0|0|0|0|0|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|0|0|0.0|0.0|0.0|0.0|0.0|0.0|570.0|0.0|3.0|0.0|0.0|0.0|0.0|2.0|3.0|3.0|0.0|15.73|0.0|0.0|0.0|0.0|0.0|12.0|22.0|68.0|83.0|339.0|205.0|144.0|54.0|38.0|12.0|0.0|0.0|0.0|0.0|0.0|0.0|100.0|50.55|1:22,2:7|1.0|||||1:1,17:1,23:1|13.67|0|0|0.0|0.0|-1000||-1000||-1000|11|2|05 Processed a total of 1 messages [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-list __consumer_offsets nvkvs3 topic-error topic_name [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-create ksh 18 Created topic ksh. [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-check ksh Topic:ksh PartitionCount:18 ReplicationFactor:2 Configs: Topic: ksh Partition: 0 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 1 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 2 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 3 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 4 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 5 Leader: 3 Replicas: 3,1 Isr: 3,1 Topic: ksh Partition: 6 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 7 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 8 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 9 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 10 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 11 Leader: 3 Replicas: 3,1 Isr: 3,1 Topic: ksh Partition: 12 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 13 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 14 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 15 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 16 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 17 Leader: 3 Replicas: 3,1 Isr: 3,1","title":"3. Kafka producer"},{"location":"manage-failover/","text":"Note This document guides how to use 'flashbase' script for failover. If you use LTCLI, you can check the status of failure and operate Lightning DB more easily and powerfully. Therefore, if possible, we recommend LTCLI rather than 'flashbase' script. 1. Prerequisite \u00b6 1) Redis Check 'flashbase cluster-rowcount' Check 'flashbase cli-all config get flash-db-ttl' Check 'flashbase cli-all info keyspace' // 'memKeys'(the number of in-memory data keys) Check 'flashbase cli-all info tablespace' // 'totalRowgroups', 'totalRows' Check 'flashbase cli-all info eviction' // 'avg full percent' 2) Thriftserver Check cron jobs with 'crontab -e'. Check table schema and query. select * from {table name} where ... limit 1; 3) System resources Check available memory(nmon, 'free -h') check the status of disks(nmon, 'df -h') 2. Check the status and failover \u00b6 1) Background If a redis-server is killed, a status of the node is changed to 'disconnected'. 543f81b6c5d6e29b9871ddbbd07a4524508d27e5 127.0.0.1:18202 master - 1585787616744 1585787612000 0 disconnected After a single node checked that a redis-server is disconnected, the status of the redis-server is changed to pFail . After all nodes inf the cluster checked that the node is disconnected, the status of the redis-server is changed to Fail . If the node is replicated, the slave of the node is failovered. With 'cluster-failover.sh', you can do failover regardless of the status(pFail/Fail). 543f81b6c5d6e29b9871ddbbd07a4524508d27e5 127.0.0.1:18202 master,fail - 1585787616744 1585787612000 0 disconnected If node-{port}.conf file is lost by disk failure, the redis-server using the conf file creates new uuid. Because the previous uuid in the cluster is lost, the uuid is changed to noaddr . This noaddr uuid should be removed with using cluster forget command. // previous uuid of 18202 543f81b6c5d6e29b9871ddbbd07a4524508d27e5 :0 master,fail,noaddr - 1585787799235 1585787799235 0 disconnected // new uuid of 18202 001ce4a87de2f2fc62ff44e2b5387a3f0bb9837c 127.0.0.1:18202 master - 0 1585787800000 0 connected 2) Check the status of the cluster 1) check-distribution Show the distribution of master/slave in each server. > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 127.0.0.1 | 5 | 3 -------------------------------- Total nodes | 5 | 3 2) find-masters options > flashbase find-masters Use options(no-slave|no-slot|failovered) no-slave (masters without slaves. Need to add the failbacked slaves to this node) > flashbase find-masters no-slave 127.0.0.1:18203 127.0.0.1:18252 no-slot (Not yet added into the cluster or masters without slot) > flashbase find-masters no-slot 127.0.0.1:18202 127.0.0.1:18253 failovered (When the cluster is initialized, this node was a slave. But now, the nodes is a master by failover) > flashbase find-masters failovered 127.0.0.1:18250 127.0.0.1:18252 127.0.0.1:18253 3) find-slaves options flashbase find-slaves Use options(failbacked) failbacked (When the cluster is initialized, this node was a master. But now, the nodes is a slave by failback) > flashbase find-slaves failbacked 127.0.0.1:18200 4) find-masters-with-dir List up the redis-servers with using the disk with HW fault. After HW fault, some of these nodes are already killed and the others will be killed in a few minutes. > flashbase find-masters-with-dir Error) Invalid arguments. ex. 'flashbase find-masters-with-dir 127.0.0.1 /DATA01/nvkvs/nvkvs' > flashbase find-masters-with-dir 127.0.0.1 /nvdrive0/ssd_01/nvkvs/nvkvs 18200 18204 3) How to handle HW fault(in case of replication) 1) cluster-failover.sh If some redis-servers are disconnected(killed/paused), you can do failover immediately and make the status of the cluster 'ok'. 2) find-nodes-with-dir / find-masters-with-dir / failover-with-dir / kill-with-dir List up all nodes or master those are using the disk with HW fault. > flashbase find-masters-with-dir Error) Invalid arguments. ex. 'flashbase find-masters-with-dir 127.0.0.1 /DATA01/nvkvs/nvkvs' > flashbase find-masters-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs 18200 18204 Do failover and change the master using the error disk to the slave > failover-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs 127.0.0.1:18250 will be master 127.0.0.1:18254 will be master OK with kill-with-dir , kill all nodes that use the error disk. > flashbase kill-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs flashbase kill 18200 flashbase kill 18204 flashbase kill 18253 > flashbase cli-all ping redis client for 127.0.0.1:18200 Could not connect to Redis at 127.0.0.1:18200: Connection refused redis client for 127.0.0.1:18201 PONG redis client for 127.0.0.1:18202 PONG redis client for 127.0.0.1:18203 PONG redis client for 127.0.0.1:18204 Could not connect to Redis at 127.0.0.1:18204: Connection refused redis client for 127.0.0.1:18250 PONG redis client for 127.0.0.1:18251 PONG redis client for 127.0.0.1:18252 PONG redis client for 127.0.0.1:18253 Could not connect to Redis at 127.0.0.1:18253: Connection refused redis client for 127.0.0.1:18254 PONG 3) find-noaddr / forget-noaddr Remove 'noaddr' node > flashbase find-noaddr // The prev uuid. Now not used anymore. 1b5d70b57079a4549a1d2e8d0ac2bd7c50986372 :0 master,fail,noaddr - 1589853266724 1589853265000 1 disconnected > flashbase forget-noaddr // Remove the 'noaddr' uuid. (error) ERR Unknown node 1b5d70b57079a4549a1d2e8d0ac2bd7c50986372 // Because newly added node does not know the previous uuid. OK OK OK OK > flashbase find-noaddr // Check that the noaddr uuid is removed 4) do-replicate First of all, make the master/slave pair. If there are many nodes to replicate, pairing.py is helpful. > flashbase find-noslot > slaves > flashbase find-noslave > masters > python pairing.py slaves masters flashbase do-replicate 192.168.0.2:19003 192.168.0.4:19053 flashbase do-replicate 192.168.0.2:19004 192.168.0.4:19054 flashbase do-replicate 192.168.0.2:19005 192.168.0.4:19055 ... Add no-slot master as the slave to no-slave master(replicate) > flashbase do-replicate 127.0.0.1:18202 127.0.0.1:18252 Add 127.0.0.1:18202 as slave of master(127.0.0.1:18252) OK > flashbase cli -p 18202 info replication # Replication role:slave master_host:127.0.0.1 master_port:18252 master_link_status:down master_last_io_seconds_ago:-1 master_sync_in_progress:0 slave_repl_offset:1 master_link_down_since_seconds:1585912329 slave_priority:100 slave_read_only:1 connected_slaves:0 master_repl_offset:0 repl_backlog_active:0 repl_backlog_size:1048576 repl_backlog_first_byte_offset:0 repl_backlog_histlen:0 > flashbase do-replicate 127.0.0.1:18253 127.0.0.1:18203 Add 127.0.0.1:18253 as slave of master(127.0.0.1:18203) OK > flashbase cli -p 18253 info replication # Replication role:slave master_host:127.0.0.1 master_port:18203 master_link_status:up master_last_io_seconds_ago:5 master_sync_in_progress:0 slave_repl_offset:29 slave_priority:100 slave_read_only:1 connected_slaves:0 master_repl_offset:0 repl_backlog_active:0 repl_backlog_size:1048576 repl_backlog_first_byte_offset:0 repl_backlog_histlen:0 If the slave candidate is not included in the cluster, 'do-replicate' is done after 'cluster meet'. > flashbase do-replicate 127.0.0.1:18252 127.0.0.1:18202 Add 127.0.0.1:18252 as slave of master(127.0.0.1:18202) Fail to get masters uuid 'cluster meet' is done OK // 'cluster meet' is done successfully OK // 'cluster replicate' is done successfully 5) reset-distribution To initialize the node distribution, use 'reset-distribution'. // Check the distribution of cluster nodes. > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.35 | 4 | 4 192.168.111.38 | 0 | 8 192.168.111.41 | 8 | 0 -------------------------------- Total nodes | 12 | 12 ... > flashbase reset-distribution 192.168.111.38:20600 OK 192.168.111.38:20601 OK 192.168.111.38:20602 OK 192.168.111.38:20603 OK ... // Check the distribution of cluster nodes again. > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.35 | 4 | 4 192.168.111.38 | 4 | 4 192.168.111.41 | 4 | 4 -------------------------------- Total nodes | 12 | 12 6) force-failover When a server need to be shutdown by HW fault or checking, change all masters in the server to slaves by failover of those slaves. > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.35 | 4 | 4 192.168.111.38 | 4 | 4 192.168.111.41 | 4 | 4 -------------------------------- Total nodes | 12 | 12 > flashbase force-failover 192.168.111.41 all masters in 192.168.111.41 will be slaves and their slaves will promote to masters 192.168.111.35:20651 node will be master! OK 192.168.111.38:20651 node will be master! OK 192.168.111.35:20653 node will be master! OK 192.168.111.38:20653 node will be master! OK > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.35 | 6 | 2 192.168.111.38 | 6 | 2 192.168.111.41 | 0 | 5 -------------------------------- Total nodes | 12 | 9 4) How to handle HW fault(in case of no replication) After disk replacement, nodes-{port number}.conf is lost. Therefore a new uuid is generated after restart. Because the previous uuid in the cluster is lost, the uuid is changed to noaddr . This noaddr uuid should be removed with using cluster forget command. Because the restarted node with the new uuid has no slot information, a slot range should be assigned by using 'addslots'. 1) Find noaddr node and check its slot range. > flashbase find-noaddr 7c84d9bb36ae3fa4caaf75318b59d3d2f6c7e9d8 :0 master,fail,noaddr - 1596769266377 1596769157081 77 disconnected 13261-13311 // '13261-13311' is the lost slot range. 2) Add the slot range to the restarted node. > flashbase cli -h 192.168.111.35 -p 18317 cluster addslots {13261..13311} 3) Increase the epoch of the node and update the cluster information. > flashbase cli -h 192.168.111.35 -p 18317 cluster bumpepoch BUMPED 321 4) Remove the noaddr node. > flashbase forget-noaddr 3. Check the status \u00b6 1) Redis Compare 'flashbase cluster-rowcount' with the previous result. Compare 'flashbase cli-all config get flash-db-ttl' with the previous result. flashbase cli-all cluster info | grep state:ok | wc -l flashbase cli -h {ip} -p {port} cluster nodes flashbase cli-all info memory | grep isOOM:true 2) yarn & spark Check web ui or 'yarn application -list'. In case of spark, Remove the disk with HW fault in spark.local.dir of spark-default.conf and restart thriftserver. 3) Thriftserver Check cron jobs with 'crontab -e'. Check table schema and query. select * from {table name} where ... limit 1; 4) kafka & kaetlyn kafka-utils.sh help // list up options kafka-utils.sh topic-check {topic name} // Check the distribution of Leaders kafka-utils.sh offset-check // Consumer LAG of each partition 5) System resources Check available memory Check the status of disks","title":"Failover with LightningDB"},{"location":"manage-failover/#1-prerequisite","text":"1) Redis Check 'flashbase cluster-rowcount' Check 'flashbase cli-all config get flash-db-ttl' Check 'flashbase cli-all info keyspace' // 'memKeys'(the number of in-memory data keys) Check 'flashbase cli-all info tablespace' // 'totalRowgroups', 'totalRows' Check 'flashbase cli-all info eviction' // 'avg full percent' 2) Thriftserver Check cron jobs with 'crontab -e'. Check table schema and query. select * from {table name} where ... limit 1; 3) System resources Check available memory(nmon, 'free -h') check the status of disks(nmon, 'df -h')","title":"1. Prerequisite"},{"location":"manage-failover/#2-check-the-status-and-failover","text":"1) Background If a redis-server is killed, a status of the node is changed to 'disconnected'. 543f81b6c5d6e29b9871ddbbd07a4524508d27e5 127.0.0.1:18202 master - 1585787616744 1585787612000 0 disconnected After a single node checked that a redis-server is disconnected, the status of the redis-server is changed to pFail . After all nodes inf the cluster checked that the node is disconnected, the status of the redis-server is changed to Fail . If the node is replicated, the slave of the node is failovered. With 'cluster-failover.sh', you can do failover regardless of the status(pFail/Fail). 543f81b6c5d6e29b9871ddbbd07a4524508d27e5 127.0.0.1:18202 master,fail - 1585787616744 1585787612000 0 disconnected If node-{port}.conf file is lost by disk failure, the redis-server using the conf file creates new uuid. Because the previous uuid in the cluster is lost, the uuid is changed to noaddr . This noaddr uuid should be removed with using cluster forget command. // previous uuid of 18202 543f81b6c5d6e29b9871ddbbd07a4524508d27e5 :0 master,fail,noaddr - 1585787799235 1585787799235 0 disconnected // new uuid of 18202 001ce4a87de2f2fc62ff44e2b5387a3f0bb9837c 127.0.0.1:18202 master - 0 1585787800000 0 connected 2) Check the status of the cluster 1) check-distribution Show the distribution of master/slave in each server. > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 127.0.0.1 | 5 | 3 -------------------------------- Total nodes | 5 | 3 2) find-masters options > flashbase find-masters Use options(no-slave|no-slot|failovered) no-slave (masters without slaves. Need to add the failbacked slaves to this node) > flashbase find-masters no-slave 127.0.0.1:18203 127.0.0.1:18252 no-slot (Not yet added into the cluster or masters without slot) > flashbase find-masters no-slot 127.0.0.1:18202 127.0.0.1:18253 failovered (When the cluster is initialized, this node was a slave. But now, the nodes is a master by failover) > flashbase find-masters failovered 127.0.0.1:18250 127.0.0.1:18252 127.0.0.1:18253 3) find-slaves options flashbase find-slaves Use options(failbacked) failbacked (When the cluster is initialized, this node was a master. But now, the nodes is a slave by failback) > flashbase find-slaves failbacked 127.0.0.1:18200 4) find-masters-with-dir List up the redis-servers with using the disk with HW fault. After HW fault, some of these nodes are already killed and the others will be killed in a few minutes. > flashbase find-masters-with-dir Error) Invalid arguments. ex. 'flashbase find-masters-with-dir 127.0.0.1 /DATA01/nvkvs/nvkvs' > flashbase find-masters-with-dir 127.0.0.1 /nvdrive0/ssd_01/nvkvs/nvkvs 18200 18204 3) How to handle HW fault(in case of replication) 1) cluster-failover.sh If some redis-servers are disconnected(killed/paused), you can do failover immediately and make the status of the cluster 'ok'. 2) find-nodes-with-dir / find-masters-with-dir / failover-with-dir / kill-with-dir List up all nodes or master those are using the disk with HW fault. > flashbase find-masters-with-dir Error) Invalid arguments. ex. 'flashbase find-masters-with-dir 127.0.0.1 /DATA01/nvkvs/nvkvs' > flashbase find-masters-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs 18200 18204 Do failover and change the master using the error disk to the slave > failover-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs 127.0.0.1:18250 will be master 127.0.0.1:18254 will be master OK with kill-with-dir , kill all nodes that use the error disk. > flashbase kill-with-dir 127.0.0.1 /nvdrive0/ssd_02/nvkvs/nvkvs flashbase kill 18200 flashbase kill 18204 flashbase kill 18253 > flashbase cli-all ping redis client for 127.0.0.1:18200 Could not connect to Redis at 127.0.0.1:18200: Connection refused redis client for 127.0.0.1:18201 PONG redis client for 127.0.0.1:18202 PONG redis client for 127.0.0.1:18203 PONG redis client for 127.0.0.1:18204 Could not connect to Redis at 127.0.0.1:18204: Connection refused redis client for 127.0.0.1:18250 PONG redis client for 127.0.0.1:18251 PONG redis client for 127.0.0.1:18252 PONG redis client for 127.0.0.1:18253 Could not connect to Redis at 127.0.0.1:18253: Connection refused redis client for 127.0.0.1:18254 PONG 3) find-noaddr / forget-noaddr Remove 'noaddr' node > flashbase find-noaddr // The prev uuid. Now not used anymore. 1b5d70b57079a4549a1d2e8d0ac2bd7c50986372 :0 master,fail,noaddr - 1589853266724 1589853265000 1 disconnected > flashbase forget-noaddr // Remove the 'noaddr' uuid. (error) ERR Unknown node 1b5d70b57079a4549a1d2e8d0ac2bd7c50986372 // Because newly added node does not know the previous uuid. OK OK OK OK > flashbase find-noaddr // Check that the noaddr uuid is removed 4) do-replicate First of all, make the master/slave pair. If there are many nodes to replicate, pairing.py is helpful. > flashbase find-noslot > slaves > flashbase find-noslave > masters > python pairing.py slaves masters flashbase do-replicate 192.168.0.2:19003 192.168.0.4:19053 flashbase do-replicate 192.168.0.2:19004 192.168.0.4:19054 flashbase do-replicate 192.168.0.2:19005 192.168.0.4:19055 ... Add no-slot master as the slave to no-slave master(replicate) > flashbase do-replicate 127.0.0.1:18202 127.0.0.1:18252 Add 127.0.0.1:18202 as slave of master(127.0.0.1:18252) OK > flashbase cli -p 18202 info replication # Replication role:slave master_host:127.0.0.1 master_port:18252 master_link_status:down master_last_io_seconds_ago:-1 master_sync_in_progress:0 slave_repl_offset:1 master_link_down_since_seconds:1585912329 slave_priority:100 slave_read_only:1 connected_slaves:0 master_repl_offset:0 repl_backlog_active:0 repl_backlog_size:1048576 repl_backlog_first_byte_offset:0 repl_backlog_histlen:0 > flashbase do-replicate 127.0.0.1:18253 127.0.0.1:18203 Add 127.0.0.1:18253 as slave of master(127.0.0.1:18203) OK > flashbase cli -p 18253 info replication # Replication role:slave master_host:127.0.0.1 master_port:18203 master_link_status:up master_last_io_seconds_ago:5 master_sync_in_progress:0 slave_repl_offset:29 slave_priority:100 slave_read_only:1 connected_slaves:0 master_repl_offset:0 repl_backlog_active:0 repl_backlog_size:1048576 repl_backlog_first_byte_offset:0 repl_backlog_histlen:0 If the slave candidate is not included in the cluster, 'do-replicate' is done after 'cluster meet'. > flashbase do-replicate 127.0.0.1:18252 127.0.0.1:18202 Add 127.0.0.1:18252 as slave of master(127.0.0.1:18202) Fail to get masters uuid 'cluster meet' is done OK // 'cluster meet' is done successfully OK // 'cluster replicate' is done successfully 5) reset-distribution To initialize the node distribution, use 'reset-distribution'. // Check the distribution of cluster nodes. > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.35 | 4 | 4 192.168.111.38 | 0 | 8 192.168.111.41 | 8 | 0 -------------------------------- Total nodes | 12 | 12 ... > flashbase reset-distribution 192.168.111.38:20600 OK 192.168.111.38:20601 OK 192.168.111.38:20602 OK 192.168.111.38:20603 OK ... // Check the distribution of cluster nodes again. > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.35 | 4 | 4 192.168.111.38 | 4 | 4 192.168.111.41 | 4 | 4 -------------------------------- Total nodes | 12 | 12 6) force-failover When a server need to be shutdown by HW fault or checking, change all masters in the server to slaves by failover of those slaves. > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.35 | 4 | 4 192.168.111.38 | 4 | 4 192.168.111.41 | 4 | 4 -------------------------------- Total nodes | 12 | 12 > flashbase force-failover 192.168.111.41 all masters in 192.168.111.41 will be slaves and their slaves will promote to masters 192.168.111.35:20651 node will be master! OK 192.168.111.38:20651 node will be master! OK 192.168.111.35:20653 node will be master! OK 192.168.111.38:20653 node will be master! OK > flashbase check-distribution check distribution of masters/slaves... SERVER NAME | M | S -------------------------------- 192.168.111.35 | 6 | 2 192.168.111.38 | 6 | 2 192.168.111.41 | 0 | 5 -------------------------------- Total nodes | 12 | 9 4) How to handle HW fault(in case of no replication) After disk replacement, nodes-{port number}.conf is lost. Therefore a new uuid is generated after restart. Because the previous uuid in the cluster is lost, the uuid is changed to noaddr . This noaddr uuid should be removed with using cluster forget command. Because the restarted node with the new uuid has no slot information, a slot range should be assigned by using 'addslots'. 1) Find noaddr node and check its slot range. > flashbase find-noaddr 7c84d9bb36ae3fa4caaf75318b59d3d2f6c7e9d8 :0 master,fail,noaddr - 1596769266377 1596769157081 77 disconnected 13261-13311 // '13261-13311' is the lost slot range. 2) Add the slot range to the restarted node. > flashbase cli -h 192.168.111.35 -p 18317 cluster addslots {13261..13311} 3) Increase the epoch of the node and update the cluster information. > flashbase cli -h 192.168.111.35 -p 18317 cluster bumpepoch BUMPED 321 4) Remove the noaddr node. > flashbase forget-noaddr","title":"2. Check the status and failover"},{"location":"manage-failover/#3-check-the-status","text":"1) Redis Compare 'flashbase cluster-rowcount' with the previous result. Compare 'flashbase cli-all config get flash-db-ttl' with the previous result. flashbase cli-all cluster info | grep state:ok | wc -l flashbase cli -h {ip} -p {port} cluster nodes flashbase cli-all info memory | grep isOOM:true 2) yarn & spark Check web ui or 'yarn application -list'. In case of spark, Remove the disk with HW fault in spark.local.dir of spark-default.conf and restart thriftserver. 3) Thriftserver Check cron jobs with 'crontab -e'. Check table schema and query. select * from {table name} where ... limit 1; 4) kafka & kaetlyn kafka-utils.sh help // list up options kafka-utils.sh topic-check {topic name} // Check the distribution of Leaders kafka-utils.sh offset-check // Consumer LAG of each partition 5) System resources Check available memory Check the status of disks","title":"3. Check the status"},{"location":"prerequisite/","text":"Note This page guides how to start LightningDB automatically only for the case of AWS EC2 Instance . 1. Create EC2 Instance \u00b6 Amazon Machine Image(AMI) for LightningDB can be found in 'AWS Marketplace' and user can create EC2 Instance with the AMI. To use LightningDB in a new Instance, the size of the root volume should be 15GiB at least. To use Web UI of HDFS, YARN, Spark and Zeppelin, you should add the following ports to 'Edit inbound rules' of 'Security groups' in EC2 Instance. Service Port HDFS 50070 YARN 8088 Spark 4040 Zeppelin 8080 2. Access EC2 Instance \u00b6 Create a EC2 Instance for LightningDB and access with 'Public IP' or 'Public DNS'. '*.pem' file is also required to access EC2 Instance. $ ssh -i /path/to/.pem ec2-user@${IP_ADDRESS} 3. Setup environment \u00b6 When you access EC2 Instance, the following jobs are already done. Create and exchange SSH KEY for user authentication Mount disks Warning Before starting LightningDB, please check if the disk mount is completed using 'lsblk' like below. [ec2-user@ip-172-31-34-115 ~]$ lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT xvda 202:0 0 10G 0 disk \u2514\u2500xvda1 202:1 0 10G 0 part / nvme0n1 259:0 0 1.7T 0 disk /nvme/data_01 nvme1n1 259:1 0 1.7T 0 disk /nvme/data_02 nvme3n1 259:2 0 1.7T 0 disk /nvme/data_03 nvme2n1 259:3 0 1.7T 0 disk /nvme/data_04 Set Hadoop configurations(core-site.xml, hdfs-site.xml, yarn-site.xml). This settings is default value for starter of Hadoop. To optimize resource or performance, user needs to modify some features with Hadoop Get Started Set Spark configuration(spark-default.conf.template) To optimize resource and performance, user also need to modify some features with Spark Configuration Tip To launch Spark application on YARN, start YARN with running 'start-dfs.sh' and 'start-yarn.sh' in order. 4. Start LightningDB \u00b6 LightningDB provides LTCLI that is introduced in Installation . With LTCLI, you can deploy and use LightningDB. LightningDB supports Zeppelin to provide the convenience of ingestion and querying data of LightningDB. About Zeppelin, Try out with Zeppelin page provides some guides.","title":"Prerequisite"},{"location":"prerequisite/#1-create-ec2-instance","text":"Amazon Machine Image(AMI) for LightningDB can be found in 'AWS Marketplace' and user can create EC2 Instance with the AMI. To use LightningDB in a new Instance, the size of the root volume should be 15GiB at least. To use Web UI of HDFS, YARN, Spark and Zeppelin, you should add the following ports to 'Edit inbound rules' of 'Security groups' in EC2 Instance. Service Port HDFS 50070 YARN 8088 Spark 4040 Zeppelin 8080","title":"1. Create EC2 Instance"},{"location":"prerequisite/#2-access-ec2-instance","text":"Create a EC2 Instance for LightningDB and access with 'Public IP' or 'Public DNS'. '*.pem' file is also required to access EC2 Instance. $ ssh -i /path/to/.pem ec2-user@${IP_ADDRESS}","title":"2. Access EC2 Instance"},{"location":"prerequisite/#3-setup-environment","text":"When you access EC2 Instance, the following jobs are already done. Create and exchange SSH KEY for user authentication Mount disks Warning Before starting LightningDB, please check if the disk mount is completed using 'lsblk' like below. [ec2-user@ip-172-31-34-115 ~]$ lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT xvda 202:0 0 10G 0 disk \u2514\u2500xvda1 202:1 0 10G 0 part / nvme0n1 259:0 0 1.7T 0 disk /nvme/data_01 nvme1n1 259:1 0 1.7T 0 disk /nvme/data_02 nvme3n1 259:2 0 1.7T 0 disk /nvme/data_03 nvme2n1 259:3 0 1.7T 0 disk /nvme/data_04 Set Hadoop configurations(core-site.xml, hdfs-site.xml, yarn-site.xml). This settings is default value for starter of Hadoop. To optimize resource or performance, user needs to modify some features with Hadoop Get Started Set Spark configuration(spark-default.conf.template) To optimize resource and performance, user also need to modify some features with Spark Configuration Tip To launch Spark application on YARN, start YARN with running 'start-dfs.sh' and 'start-yarn.sh' in order.","title":"3. Setup environment"},{"location":"prerequisite/#4-start-lightningdb","text":"LightningDB provides LTCLI that is introduced in Installation . With LTCLI, you can deploy and use LightningDB. LightningDB supports Zeppelin to provide the convenience of ingestion and querying data of LightningDB. About Zeppelin, Try out with Zeppelin page provides some guides.","title":"4. Start LightningDB"},{"location":"release-note/","text":"1. Recommended Version \u00b6 LightningDB ver 1.4.2 1 2. Release Notes \u00b6 Ver 2.0.0 CXL-CMS Date: 2024.03.21 Download: LightningDB ver 2.0.0 CXL-CMS License: free Description Support Interfaces for CXL memory and CMS of SK Hynix Ver 2.0.0 Date: 2024.01.11 Download: LightningDB ver 2.0.0 License: free Description Support ANN(Aproximate Nearest Neighbor) Search Support Thunderquery CLI without Spark Use Apache Spark 3.x and CatalogPlugin Use Apache Arrow Ver 1.4.3 Date: 2024.03.21 Download: LightningDB ver 1.4.3 License: free Description Support Kubernetes Ver 1.3.1 Date: 2021.06.03 Download: LightningDB ver 1.3.1 License: free Description Support Exact KNN Search Recommended Spark version: 2.3.4 Support the permanent storage Ver 1.2.3 Date: 2020.07.21 Download: LightningDB ver 1.2.3 License: free Description Nondisruptive scale-out(Adding new nodes) Aggregation Pushdown Optimize memory usage about clustering Adaptive RDD partitioning scheme by filters('node base' or 'redis key base') Support geoSpatial queries based on OGC standards Update OSGeo repository address Ver 1.0 Date: 2019.11.20 Download: LightningDB ver 1.0 License: free Description Initial version Support LTCLI Support geoSpatial functions Copy link address with Right-Clicking and paste when you try to deploy LightningDB in LTCLI. \u21a9","title":"Release Notes"},{"location":"release-note/#1-recommended-version","text":"LightningDB ver 1.4.2 1","title":"1. Recommended Version"},{"location":"release-note/#2-release-notes","text":"Ver 2.0.0 CXL-CMS Date: 2024.03.21 Download: LightningDB ver 2.0.0 CXL-CMS License: free Description Support Interfaces for CXL memory and CMS of SK Hynix Ver 2.0.0 Date: 2024.01.11 Download: LightningDB ver 2.0.0 License: free Description Support ANN(Aproximate Nearest Neighbor) Search Support Thunderquery CLI without Spark Use Apache Spark 3.x and CatalogPlugin Use Apache Arrow Ver 1.4.3 Date: 2024.03.21 Download: LightningDB ver 1.4.3 License: free Description Support Kubernetes Ver 1.3.1 Date: 2021.06.03 Download: LightningDB ver 1.3.1 License: free Description Support Exact KNN Search Recommended Spark version: 2.3.4 Support the permanent storage Ver 1.2.3 Date: 2020.07.21 Download: LightningDB ver 1.2.3 License: free Description Nondisruptive scale-out(Adding new nodes) Aggregation Pushdown Optimize memory usage about clustering Adaptive RDD partitioning scheme by filters('node base' or 'redis key base') Support geoSpatial queries based on OGC standards Update OSGeo repository address Ver 1.0 Date: 2019.11.20 Download: LightningDB ver 1.0 License: free Description Initial version Support LTCLI Support geoSpatial functions Copy link address with Right-Clicking and paste when you try to deploy LightningDB in LTCLI. \u21a9","title":"2. Release Notes"},{"location":"try-with-zeppelin/","text":"1. Setting for Zeppelin \u00b6 You can try LightningDB in the Zeppelin notebook. Firstly, deploy and start the cluster of LightningDB using Installation before launching the Zeppelin daemon. Secondly, to run LightningDB on the Spark, the jars in the LightningDB should be passed to the Spark. When EC2 Instance is initialized, the environment variable ( $SPARK_SUBMIT_OPTIONS ) is configured for this reason. Thus just need to check the setting in zeppelin-env.sh . $ vim $ZEPPELIN_HOME/conf/zeppelin-env.sh ... LIGHTNINGDB_LIB_PATH=$(eval echo $(cat $FBPATH/config | head -n 1 | awk {'print $2'}))/cluster_$(cat $FBPATH/HEAD)/tsr2-assembly-1.0.0-SNAPSHOT/lib/ if [[ -e $LIGHTNINGDB_LIB_PATH ]]; then export SPARK_SUBMIT_OPTIONS=\"--jars $(find $LIGHTNINGDB_LIB_PATH -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' -o -name 'geospark*' -o -name 'gt-*' | tr '\\n' ',')\" fi ... Finally, start Zeppelin daemon. $ cd $ZEPPELIN_HOME/bin $ ./zeppelin-daemon.sh start 2. Tutorial with Zeppelin \u00b6 After starting zeppelin daemon, you can access zeppelin UI using a browser. The URL is https://your-server-ip:8080 . Tip We recommend that you proceed with the tutorial at the Chrome browser. There is a github page for tutorial . The repository includes a tool for generating sample csv data and a notebook for the tutorial. You can import the tutorial notebook with its URL. https://raw.githubusercontent.com/mnms/tutorials/master/zeppelin-notebook/note.json The tutorial runs on the spark interpreter of Zeppelin. Please make sure that the memory of the Spark driver is at least 10GB in the Spark interpreter setting. Also, make sure that the timeout of a shell command is at least 120000 ms.","title":"Try out with Zeppelin"},{"location":"try-with-zeppelin/#1-setting-for-zeppelin","text":"You can try LightningDB in the Zeppelin notebook. Firstly, deploy and start the cluster of LightningDB using Installation before launching the Zeppelin daemon. Secondly, to run LightningDB on the Spark, the jars in the LightningDB should be passed to the Spark. When EC2 Instance is initialized, the environment variable ( $SPARK_SUBMIT_OPTIONS ) is configured for this reason. Thus just need to check the setting in zeppelin-env.sh . $ vim $ZEPPELIN_HOME/conf/zeppelin-env.sh ... LIGHTNINGDB_LIB_PATH=$(eval echo $(cat $FBPATH/config | head -n 1 | awk {'print $2'}))/cluster_$(cat $FBPATH/HEAD)/tsr2-assembly-1.0.0-SNAPSHOT/lib/ if [[ -e $LIGHTNINGDB_LIB_PATH ]]; then export SPARK_SUBMIT_OPTIONS=\"--jars $(find $LIGHTNINGDB_LIB_PATH -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' -o -name 'geospark*' -o -name 'gt-*' | tr '\\n' ',')\" fi ... Finally, start Zeppelin daemon. $ cd $ZEPPELIN_HOME/bin $ ./zeppelin-daemon.sh start","title":"1. Setting for Zeppelin"},{"location":"try-with-zeppelin/#2-tutorial-with-zeppelin","text":"After starting zeppelin daemon, you can access zeppelin UI using a browser. The URL is https://your-server-ip:8080 . Tip We recommend that you proceed with the tutorial at the Chrome browser. There is a github page for tutorial . The repository includes a tool for generating sample csv data and a notebook for the tutorial. You can import the tutorial notebook with its URL. https://raw.githubusercontent.com/mnms/tutorials/master/zeppelin-notebook/note.json The tutorial runs on the spark interpreter of Zeppelin. Please make sure that the memory of the Spark driver is at least 10GB in the Spark interpreter setting. Also, make sure that the timeout of a shell command is at least 120000 ms.","title":"2. Tutorial with Zeppelin"},{"location":"tutorial/","text":"Tutorial 1. About Lightning DB \u00b6 Architecture & key features \u00b6 Replication & failover/failback \u00b6 2. LTCLI \u00b6 Installation Command Line 3. Ingestion \u00b6 load.sh \u00b6 'tsr2-tools'\ub97c \uc774\uc6a9\ud55c \uc801\uc7ac\ub85c '-s' option\uc73c\ub85c delimiter \uc124\uc815 \ud6c4 \uc0ac\uc6a9 #!/bin/bash if [ $# -ne 2 ]; then echo \"Usage: load_data.sh data-directory json-file\" echo \"(e.g.: load_data.sh ./data/split ./json/106.json)\" echo \"Warning: delimiter is '|'\" exit fi tsr2-tools insert java_options \"-Xms1g -Xmx32g\" -d $1 -s \"|\" -t $2 -p 40 -c 1 -i \uc0ac\uc6a9\ubc29\ubc95 [ltdb@d205 ~/tsr2-test]$ cat ./json/cell_nvkvs.json // json file \uc791\uc131. { \"endpoint\" : \"192.168.111.205:18600\", \"id\" :\"9999\", \"columns\" : 219, \"partitions\" : [ 216, 218, 3, 4 ], \"rowStore\" : true // \uc801\uc7ac\ud558\ub294 \ud074\ub7ec\uc2a4\ud130\uc5d0\uc11c 'row-store-enabled'\uac00 \ub3d9\uc77c\ud558\uac8c 'true'\uc778\uc9c0 \ud655\uc778 \ud544\uc694. \ubc18\ub300\uc758 \uacbd\uc6b0 \ub458\ub2e4 'false'\ub85c \uc124\uc815\ud558\uba74 \ub428. } [ltdb@d205 ~/tsr2-test]$ ls -alh ./test_data_one/ // \uc801\uc7ac\ud560 \ub370\uc774\ud130 \ud655\uc778. dir \ubc0f file path \ubaa8\ub450 \uac00\ub2a5\ud568. total 8.7M drwxrwxr-x. 2 ltdb ltdb 50 2020-06-18 08:58:44 ./ drwxrwxr-x. 7 ltdb ltdb 84 2020-06-18 08:58:28 ../ -rw-rw-r--. 1 ltdb ltdb 8.7M 2020-06-18 08:58:44 ANALYSIS_CELL_ELG_20160711055950.dat [ltdb@d205 ~/tsr2-test]$ load.sh // \uadf8\ub0e5 'load.sh'\ub97c \uc785\ub825\ud558\uba74 args \uc815\ubcf4\uac00 \ud45c\uc2dc\ub428. Usage: load_data.sh data-directory json-file (e.g.: load_data.sh ./data/split ./json/106.json) Warning: delimiter is '|' [ltdb@d205 ~/tsr2-test]$ load.sh ./test_data_one/ ./json/cell_nvkvs.json /home/ltdb/tsr2-test/sbin/load.sh: line 9: tsr2-tools: command not found // cluster \uc9c0\uc815\uc774 \ub418\uc9c0 \uc54a\uc544 \uc2e4\ud589\uc774 \uc548\ub418\ub294 \uac83\uc73c\ub85c 'cfc'\ub97c \uc0ac\uc6a9\ud558\uc5ec cluster \ub97c \uc9c0\uc815\ud568 [ltdb@d205 ~/tsr2-test]$ cfc 6 [C:6][ltdb@d205 ~/tsr2-test]$ load.sh ./test_data_one/ ./json/cell_nvkvs.json // \uc801\uc7ac \uc2dc\uc791. SLF4J: Class path contains multiple SLF4J bindings. SLF4J: Found binding in [jar:file:/home/ltdb/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/lib/logback-classic-1.2.3.jar!/org/slf4j/impl/StaticLoggerBinder.class] kafka & kafkacat & tsr2-kaetlyn \u00b6 zookeeper/kafka zookeeper \ubc0f kafka broker \uc124\uce58\uac00 \uc120\ud589\ub418\uc5b4\uc57c \ud568 ==> Kafka&Kaetlyn \uc124\uce58 \ucc38\uace0 \uc0ac\uc6a9\ud558\ub294 topic\uc740 \ud06c\uac8c \uc544\ub798\uc640 \uac19\uc774 3\uac00\uc9c0\ub85c \ub098\ub204\uc5b4\uc9d0 1. \ub370\uc774\ud130 \uc801\uc7ac\ub97c \uc704\ud55c topic - table\uac04 dependency\uac00 \uc5c6\ub3c4\ub85d table\ubcc4\ub85c \ub098\ub220\uc11c \uc801\uc7ac \ud544\uc694 2. error topic - 'tsr2-kaetlyn edit'\uc744 \ud1b5\ud574 'KAFKA_ERROR_REPORT_TOPIC_NAME=topic-error' \ub85c \uc124\uc815 - consuming \ub2e8\uacc4\uc5d0\uc11c \uc801\uc7ac \uc2e4\ud328 \uc2dc error topic\uc73c\ub85c \uc5d0\ub7ec \ub0b4\uc6a9\uc744 \ub123\uc5b4\uc11c \uc801\uc7ac client\uac00 \ud655\uc778\ud560 \uc218 \uc788\ub3c4\ub85d \ud568 3. result topic(optional) - consuming \ud6c4 \uc801\uc7ac \uacb0\uacfc\ub97c \uc815\uc758\ub41c protocol\uc5d0 \ub9de\uac8c \uc801\uc7ac app\uc5d0 \uc804\ub2ec\ud560 \uc218 \uc788\uc74c kafka consumer Kafka&Kaetlyn \uc124\uce58 \uc5d0\uc11c \uac00\uc774\ub4dc\ud558\uace0 \uc788\ub294 kaetlyn consumer\ub97c \uc0ac\uc6a9\ud558\uc5ec consumer\ub97c \uc124\uc815. 'tsr2-kaetlyn edit'\uc744 \ud1b5\ud574 consumer \uc124\uc815\uc774 \ud544\uc694\ud568 \uae30\ubcf8\uc801\uc73c\ub85c '\uc218\uc815 \ud544\uc694'\ub85c \ucf54\uba58\ud2b8\ub41c \ubd80\ubd84\uc740 \uac80\ud1a0 \ubc0f \uc218\uc815\uc774 \ud544\uc694\ud568 #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14040} KAFKA_SERVER=localhost:9092 ############################################################################### # Properties for Consumer DRIVER_MEMORY=1g // \uc218\uc815 \ud544\uc694 EXECUTOR_MEMORY=1g // \uc218\uc815 \ud544\uc694 EXECUTORS=12 // \uc218\uc815 \ud544\uc694 EXECUTOR_CORES=12 // \uc218\uc815 \ud544\uc694 [[JSON_PATH]]=~/Flashbase/flashbase-benchmark/json/load_no_skew JSON_PATH=/home/ltdb/tsr2-test/json // \uc218\uc815 \ud544\uc694, json file \uc5c5\ub370\uc774\ud2b8 \uc2dc kaetlyn \uc7ac\uc2dc\uc791 \ud544\uc694! [[HIVE_METASTORE_URI]]=thrift://localhost:9083 HIVE_METASTORE_URI='' // \uc218\uc815 \ud544\uc694 KAFKA_CONSUMER_GROUP_ID=nvkvs_redis_connector // \uc218\uc815 \ud544\uc694 KAFKA_CONSUMING_TOPIC_LIST=nvkvs3 // \uc218\uc815 \ud544\uc694 JOB_GENERATION_PERIOD=1 MAX_RATE_PER_PARTITION=20000 KAFKA_ERROR_REPORT_TOPIC_NAME=topic-error TEST_MODE=false EXCUTOR_LOCALITY=false kafka producer \uae30\ubcf8\uc801\uc73c\ub85c kafka producing\uc740 \uc544\ub798\uc640 \uac19\uc740 \ubc29\ubc95\uc73c\ub85c \ud560 \uc218 \uc788\uc74c kafka-console-producer.sh --broker-list localhost:9092 --topic {topic name} < {\uc801\uc7ac\ud560 filename} \ud558\uc9c0\ub9cc, kaetlyn \uc801\uc7ac\ub97c \uc704\ud574\uc11c\ub294 \uba54\uc2dc\uc9c0\uc5d0 \uc544\ub798 \ud5e4\ub354 \uc815\ubcf4\uac00 \ud3ec\ud568\ub418\uc5b4\uc57c \ud55c\ub2e4. TABLE_ID SEPARATOR \ub530\ub77c\uc11c kafkacat\uc774\ub77c\ub294 tool\uc744 \ud1b5\ud574 \ud5e4\ub354 \uc815\ubcf4\uc640 \ud568\uaed8 producing\uc744 \ud574\uc57c \ud55c\ub2e4.(https://docs.confluent.io/3.3.0/app-development/kafkacat-usage.html# \ucc38\uace0) c++ compiler \uc124\uce58 $yum install gcc-c++ $ git clone https://github.com/edenhill/librdkafka $ cd librdkafka $ ./configure $ make $ sudo make install /usr/local/lib \ub85c \uc774\ub3d9\ud574\uc8fc\uc5b4 \ub2e4\uc74c \uba85\ub839\uc5b4 \uc2e4\ud589\ud55c\ub2e4. $ git clone https://github.com/edenhill/kafkacat $ cd kafkacat $ ./configure $ make $ sudo make install Lib \ud30c\uc77c\uc744 \ucc3e\uc744 \uc218 \uc5c6\ub2e4\uba74 $ ldd kafkacat \ub2e4\uc74c\uc758 \ud30c\uc77c\uc744 \ub9cc\ub4e4\uace0 \uc544\ub798\ub97c \ucd94\uac00 /etc/ld.so.conf.d/usrlocal.conf Contents: /usr/local/lib \uc800\uc7a5 \ud6c4 \uc544\ub798 \uba85\ub839\uc5b4 \uc2e4\ud589 $ ldconfig -v $kafkacat Kafkacat\uc5d0 \ub300\ud55c \uba85\ub839\uc5b4\uac00 \ub098\uc624\uba74 \uc131\uacf5 kafkacat\uc774 \uc815\uc0c1 \uc124\uce58\ub418\uc5c8\uc73c\uba74 \uc544\ub798\uc640 \uac19\uc774 producing\uc774 \uac00\ub2a5\ud568 file \ud558\ub098\ub9cc \uc801\uc7ac\ud560 \uacbd\uc6b0 kafkacat -b localhost:9092 -t {topic name} -T -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l {\uc801\uc7ac\ud560 filename} 2. dir\uc5d0 \uc788\ub294 \ubaa8\ub4e0 \ud30c\uc77c\uc744 \uc801\uc7ac\ud560 \ub54c\uc5d0\ub294 \ud574\ub2f9 dir\ub85c \uc774\ub3d9\ud55c \ud6c4\uc5d0, ls | xargs -n 1 kafkacat -q -b localhost:9092 -t {topic name} -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l ** \uae30\ubcf8\uc801\uc778 \uac00\uc774\ub4dc\ub294 Kafka&Kaetlyn \uc124\uce58 \uc5d0 \uc788\uc73c\ubbc0\ub85c \uac1c\ub150 \uc774\ud574\ub97c \uc704\ud574\uc11c\ub294 \uc774 \ud398\uc774\uc9c0\ub97c \ucc38\uace0\ud558\uba74 \ub418\uc9c0\ub9cc \uc880 \ub354 \ud3b8\ub9ac\ud558\uac8c \uc0ac\uc6a9\ud558\uae30 \uc704\ud574 kafka-utils.sh\ub97c \uc81c\uacf5\ud558\uace0 \uc788\uc5b4 \uc6b4\uc601 \uc2dc\uc5d0\ub294 kafka-utils.sh\ub97c \uc0ac\uc6a9\ud558\uba74 \ub428. 'kafka-utils.sh'\ub294 \uac01 \ud074\ub7ec\uc2a4\ud130\ubcc4 sbin\uc5d0 \uc788\uc73c\ubbc0\ub85c, 'cfc'\ub85c cluster \uc124\uc815 \ud6c4 \uc0ac\uc6a9\uc774 \uac00\ub2a5\ud568. [C:6][ltdb@d205 ~]$ which kafka-utils.sh ~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/kafka-utils.sh \uc544\ub798\uc640 \uac19\uc774 'CONSUMER_GROUP_ID'\uac00 \uc9c0\uc815\ub418\uc5b4 \uc788\uc9c0 \uc54a\uc73c\uba74 \uc2e4\ud589\uc774 \ub418\uc9c0 \uc54a\uc73c\ubbc0\ub85c, [C:6][ltdb@d205 ~]$ kafka-utils.sh help Please, set $CONSUMER_GROUP_ID first. \uc544\ub798\uc640 \uac19\uc774 'kafka-utils.sh'\ub97c \uc5f4\uc5b4\uc11c \uc218\uc815\uc744 \ud574\uc57c \ud568. #!/bin/bash CONSUMER_GROUP_ID='nvkvs_redis_connector' // \uc218\uc815 \ud544\uc694 KAFKA_SERVER=localhost:9092 ZOOKEEPER_SERVER=localhost:2181... 'help'\ub97c \ud1b5\ud574 \uac00\ub2a5\ud55c \ucee4\ub9e8\ub4dc\ub97c \ud655\uc778\ud560 \uc218 \uc788\uc74c. [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh help kafka-utils.sh offset-check kafka-utils.sh offset-monitor kafka-utils.sh offset-earliest topic_name kafka-utils.sh offset-latest topic_name kafka-utils.sh offset-move topic_name 10000 kafka-utils.sh error-monitor error_topic_name kafka-utils.sh consumer-list kafka-utils.sh topic-check topic_name kafka-utils.sh topic-create topic_name 10 kafka-utils.sh topic-delete topic_name kafka-utils.sh topic-config-check topic_name kafka-utils.sh topic-config-set topic_name config_name config_value kafka-utils.sh topic-config-remove topic_name config_name kafka-utils.sh topic-list kafka-utils.sh message-earliest topic_name kafka-utils.sh message-latest topic_name command\uc5d0 args\uac00 \ud544\uc694\ud55c \uacbd\uc6b0, args\uc5c6\uc774 \uc785\ub825\ud558\uba74 \uc544\ub798\uc640 \uac19\uc774 \uac00\uc774\ub4dc \ubb38\uad6c\uac00 \ub098\uc634. [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh offset-move Please, specify topic name & the size of moving offset (ex) kafka-utils.sh offset-move my-topic 100 [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh topic-create Please, specify topic name and its partition count. (ex) kafka-utils.sh topic-create topic-new 10 [C:6][ltdb@d205 ~/kafka/config]$ \uc0ac\uc6a9 \uc608, [C:6][ltdb@d205 ~]$ kafka-utils.sh message-earliest nvkvs3 20160711055950|ELG|2635055200|34317|5|6091|1|25|0|11|0|100.0|0.0|0|2846|3|33|0|5|0|-1000|0.0|0.0|94932|1027|0|176|35.2|40|0|7818000000|109816071|10|0|6000000.0|164843|2.75|0|2592|6000000|0.04|1288488|1303|1338|0|530|1|88.33|0|721|67948|428|0|1|108|108.0|108|0|0.0|0|0|0|-1000|1|1|100.0|62|39.0|62.9|23.0|37.1|0|0|0|0|29|10|-7022851.0|59998.0|-117.05|-6865443.5|59998.0|-114.43|4|198060.0|59998.0|22.5|3.3|0|1|5.82|3|1.94||0|0|0|0|0|0|0|0|4|0|0|0|15|14|231|140|0|0|0|0|0|0|0|0|4|0|0|0|15|13|174|110|1|0|0|0|0|0|0|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|0|0|0.0|0.0|0.0|0.0|0.0|0.0|570.0|0.0|3.0|0.0|0.0|0.0|0.0|2.0|3.0|3.0|0.0|15.73|0.0|0.0|0.0|0.0|0.0|12.0|22.0|68.0|83.0|339.0|205.0|144.0|54.0|38.0|12.0|0.0|0.0|0.0|0.0|0.0|0.0|100.0|50.55|1:22,2:7|1.0|||||1:1,17:1,23:1|13.67|0|0|0.0|0.0|-1000||-1000||-1000|11|2|05 Processed a total of 1 messages [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-list __consumer_offsets nvkvs3 topic-error topic_name [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-create ksh 18 Created topic ksh. [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-check ksh Topic:ksh PartitionCount:18 ReplicationFactor:2 Configs: Topic: ksh Partition: 0 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 1 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 2 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 3 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 4 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 5 Leader: 3 Replicas: 3,1 Isr: 3,1 Topic: ksh Partition: 6 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 7 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 8 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 9 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 10 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 11 Leader: 3 Replicas: 3,1 Isr: 3,1 Topic: ksh Partition: 12 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 13 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 14 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 15 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 16 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 17 Leader: 3 Replicas: 3,1 Isr: 3,1 4. Query \u00b6 thriftserver \u00b6 thriftserver\ub97c \uc0ac\uc6a9\ud558\uc5ec \uc9c8\uc758\ub97c \uc218\ud589\ud560 \uc218 \uc788\uc73c\uba70, hive-metastore\ub97c \uc0ac\uc6a9\ud574\uc11c \uba54\ud0c0\uc815\ubcf4\ub97c \uad00\ub9ac\ud560 \uc218 \uc788\uc2b5\ub2c8\ub2e4. \uac01 cluster\uc5d0 thriftserver \uac00 \uc788\uc73c\uba70, \ud2b9\uc815 \ud074\ub7ec\uc2a4\ud130\uc5d0\uc11c \ub744\uc6b8 \uc218 \uc788\uc2b5\ub2c8\ub2e4. > cfc 6 > which thriftserver ~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/thriftserver \uc2e4\ud589 \uc804, 'thriftserver edit'\uc744 \ud1b5\ud574 \uc124\uc815\uac12\uc744 \ubcc0\uacbd\ud569\ub2c8\ub2e4. #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14050} EXECUTERS=12 // \uc218\uc815 \ud544\uc694 EXECUTER_CORES=32 // \uc218\uc815\ud544\uc694 HIVE_METASTORE_URL='' HIVE_HOST=${HIVE_HOST:-localhost} HIVE_PORT=${HIVE_PORT:-13000} COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \\ -o -name 'geospark*' -o -name 'gt-*' | tr '\\n' ':') ############################################################################### # Driver DRIVER_MEMORY=6g // \uc218\uc815 \ud544\uc694 DRIVER_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Execute EXECUTOR_MEMORY=2g // \uc218\uc815 \ud544\uc694 EXECUTOR_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Thrift Server logs EVENT_LOG_ENABLED=false EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling EVENT_LOG_SAVE_MIN=60 EXTRACTED_EVENT_LOG_SAVE_DAY=5 SPARK_LOG_SAVE_MIN=2000 ############## start \ubc29\ubc95 thriftserver start stop \ubc29\ubc95 thriftserver stop yarn application \ud655\uc778 > yarn application -list 20/06/25 17:04:35 INFO client.RMProxy: Connecting to ResourceManager at d205/192.168.111.205:18032 Total number of applications (application-types: [] and states: [SUBMITTED, ACCEPTED, RUNNING]):1 Application-Id Application-Name Application-Type User Queue State Final-State Progress Tracking-URL application_1592880218288_0002 ThriftServer_d205_6 SPARK ltdb default RUNNING UNDEFINED 10% http://d205:14050 table properties \ud655\uc778 0: jdbc:hive2://0.0.0.0:13000> show tables; +-----------+-----------------------------------------------+--------------+--+ | database | tableName | isTemporary | +-----------+-----------------------------------------------+--------------+--+ | default | aua_adong_cd | false | | default | aua_aom_log | false | | default | aua_cell_by10sec_sum | false | | default | aua_cell_by5min_sum | false | | default | aua_cell_cfg_inf | false | | default | aua_enb_nsn_ho_log | false | | default | aua_enb_nsn_rrc_log | false | | default | aua_enb_ss_csl_log | false | | default | aua_ra_cscore_area_5min_sum | false | | default | aua_ue_rf_sum | false | | | aua_aom_log_6fbb17bb9718a46306ec7a9766464813 | true | +-----------+-----------------------------------------------+--------------+--+ 11 rows selected (0.045 seconds) 0: jdbc:hive2://0.0.0.0:13000> show tblproperties aua_ue_rf_sum; +---------------------------------------------+-------------------------------------------------------------------------------------------------+--+ | key | value | +---------------------------------------------+-------------------------------------------------------------------------------------------------+--+ | transient_lastDdlTime | 1581872791 | | fb.load.kafka.bootstrap.servers | 90.90.200.182:9092,90.90.200.183:9092,90.90.200.184:9092,90.90.200.185:9092,90.90.200.186:9092 | | fb.transformation.column.add.IMSI_KEY | ${IMSI_NO}.length() >= 2 && ${IMSI_NO}.substring(0, 2).equals(\"T1\") ? \"T1\" : \"O\" | | fb.transformation.column.add.IMSI_HASH_KEY | fnvHash(${IMSI_NO}, 5) | | fb.load.kafka.producer.compression.type | zstd | | fb.transformation.column.add.EVENT_TIME | ${EVT_DTM}.length() < 12 ? \"000000000000\" : ${EVT_DTM}.substring(0, 11).concat(\"0\") | | fb.load.kafka.topic.name | topic-tango-dev | | Comment | \ub2e8\ub9d0\ubcc4 \ubd84\uc11d \uacb0\uacfc | | fb.load.kafka.producer.max.request.size | 1048576 | +---------------------------------------------+-------------------------------------------------------------------------------------------------+--+ 9 rows selected (0.1 seconds) 0: jdbc:hive2://0.0.0.0:13000> 0: jdbc:hive2://0.0.0.0:13000> show create table aua_ue_rf_sum; +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ | createtab_stmt | +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ | CREATE TABLE `aua_ue_rf_sum` (`EVT_DTM` STRING, `VEND_ID` STRING, `ADONG_CD` STRING, `ENB_ID` STRING, `CELL_ID` STRING, `ENB_UE_S1AP_ID` STRING, `MME_UE_S1AP_ID` STRING, `IMSI_NO` STRING, `EVT_ID` STRING, `CALL_RESULT_CODE` STRING, `CALL_RESULT_MSG` STRING, `FREQ_TYP_CD` STRING, `CQI` STRING, `TA` STRING, `RSRP` STRING, `RSRQ` STRING, `DL_PACKET_LOSS_SUCC_CNT` STRING, `DL_PACKET_LOSS_LOST_CNT` STRING, `DL_PACKET_LOSS_RATE` STRING, `SINR_PUSCH` STRING, `SINR_PUCCH` STRING, `UE_TX_POWER` STRING, `PHR` STRING, `UL_PACKET_LOSS_SUCC_CNT` STRING, `UL_PACKET_LOSS_LOST_CNT` STRING, `UL_PACKET_LOSS_RATE` STRING, `RRC_LATENCY` STRING, `HO_LATENCY` STRING, `RRE_LATENCY` STRING, `DL_NO_RTP` STRING, `UL_NO_RTP` STRING, `ERAB_LATENCY` STRING, `RRC_ERAB_LATENCY` STRING, `EVENT_TIME` STRING, `IMSI_KEY` STRING, `IMSI_HASH_KEY` STRING, `UE_ENDC_STAGE` STRING) USING r2 OPTIONS ( `query_result_partition_cnt_limit` '2000000', `query_response_timeout` '1200000', `query_result_task_row_cnt_limit` '1000000', `host` '90.90.200.187', `serialization.format` '1', `query_result_total_row_cnt_limit` '2147483647', `group_size` '10', `port` '18600', `mode` 'nvkvs', `partitions` 'EVENT_TIME ENB_ID IMSI_KEY IMSI_HASH_KEY', `second_filter_enabled` 'no', `table` '6010' ) | +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ 1 row selected (0.027 seconds) 0: jdbc:hive2://0.0.0.0:13000> select event_time, enb_id, vend_id from aua_ue_rf_sum limit 3; Error: com.skt.spark.r2.exception.UnsupportedQueryTypeException: at least one partition column should be included in where predicates; caused by 6010 table. You must check to exist partition column(s) of this table or mistype condition of partition column(s) in where clause. : EVENT_TIME,ENB_ID,IMSI_KEY,IMSI_HASH_KEY (state=,code=0) 0: jdbc:hive2://0.0.0.0:13000> select event_time, enb_id, vend_id from aua_ue_rf_sum where event_time='202006250000' limit 3; +---------------+---------------------------+----------+--+ | event_time | enb_id | vend_id | +---------------+---------------------------+----------+--+ | 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG | | 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG | | 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG | +---------------+---------------------------+----------+--+ 3 rows selected (0.149 seconds) 0: jdbc:hive2://0.0.0.0:13000> 5. Zeppelin \u00b6 Zeppelin \ud29c\ud1a0\ub9ac\uc5bc\uc744 \uc9c4\ud589\ud558\uae30 \uc804, Zeppelin\uc5d0 Lightning DB \uad00\ub828 Jar \ud30c\uc77c\ub4e4\uc774 \ucd94\uac00\ud558\ub3c4\ub85d \uc124\uc815 \ud558\uc600\ub294\uc9c0 \ud655\uc778\ud574\uc8fc\uc138\uc694. \uc544\ub798\uc640 \uac19\uc774 \ud29c\ud1a0\ub9ac\uc5bc \ub178\ud2b8 \ud30c\uc77c \uc744 \ub0b4\ub824\ubc1b\uac70\ub098 \uc544\ub798 URL\uc744 \uc785\ub825\ud558\uc5ec Zeppelin\uc5d0\uc11c \ubd88\ub7ec\uc635\ub2c8\ub2e4. \ud29c\ud1a0\ub9ac\uc5bc \ub178\ud2b8 \ub9c1\ud06c: https://docs.lightningdb.io/scripts/tutorial_ko.json","title":"Tutorial"},{"location":"tutorial/#1-about-lightning-db","text":"","title":"1. About Lightning DB"},{"location":"tutorial/#architecture-key-features","text":"","title":"Architecture & key features"},{"location":"tutorial/#replication-failoverfailback","text":"","title":"Replication & failover/failback"},{"location":"tutorial/#2-ltcli","text":"Installation Command Line","title":"2. LTCLI"},{"location":"tutorial/#3-ingestion","text":"","title":"3. Ingestion"},{"location":"tutorial/#loadsh","text":"'tsr2-tools'\ub97c \uc774\uc6a9\ud55c \uc801\uc7ac\ub85c '-s' option\uc73c\ub85c delimiter \uc124\uc815 \ud6c4 \uc0ac\uc6a9 #!/bin/bash if [ $# -ne 2 ]; then echo \"Usage: load_data.sh data-directory json-file\" echo \"(e.g.: load_data.sh ./data/split ./json/106.json)\" echo \"Warning: delimiter is '|'\" exit fi tsr2-tools insert java_options \"-Xms1g -Xmx32g\" -d $1 -s \"|\" -t $2 -p 40 -c 1 -i \uc0ac\uc6a9\ubc29\ubc95 [ltdb@d205 ~/tsr2-test]$ cat ./json/cell_nvkvs.json // json file \uc791\uc131. { \"endpoint\" : \"192.168.111.205:18600\", \"id\" :\"9999\", \"columns\" : 219, \"partitions\" : [ 216, 218, 3, 4 ], \"rowStore\" : true // \uc801\uc7ac\ud558\ub294 \ud074\ub7ec\uc2a4\ud130\uc5d0\uc11c 'row-store-enabled'\uac00 \ub3d9\uc77c\ud558\uac8c 'true'\uc778\uc9c0 \ud655\uc778 \ud544\uc694. \ubc18\ub300\uc758 \uacbd\uc6b0 \ub458\ub2e4 'false'\ub85c \uc124\uc815\ud558\uba74 \ub428. } [ltdb@d205 ~/tsr2-test]$ ls -alh ./test_data_one/ // \uc801\uc7ac\ud560 \ub370\uc774\ud130 \ud655\uc778. dir \ubc0f file path \ubaa8\ub450 \uac00\ub2a5\ud568. total 8.7M drwxrwxr-x. 2 ltdb ltdb 50 2020-06-18 08:58:44 ./ drwxrwxr-x. 7 ltdb ltdb 84 2020-06-18 08:58:28 ../ -rw-rw-r--. 1 ltdb ltdb 8.7M 2020-06-18 08:58:44 ANALYSIS_CELL_ELG_20160711055950.dat [ltdb@d205 ~/tsr2-test]$ load.sh // \uadf8\ub0e5 'load.sh'\ub97c \uc785\ub825\ud558\uba74 args \uc815\ubcf4\uac00 \ud45c\uc2dc\ub428. Usage: load_data.sh data-directory json-file (e.g.: load_data.sh ./data/split ./json/106.json) Warning: delimiter is '|' [ltdb@d205 ~/tsr2-test]$ load.sh ./test_data_one/ ./json/cell_nvkvs.json /home/ltdb/tsr2-test/sbin/load.sh: line 9: tsr2-tools: command not found // cluster \uc9c0\uc815\uc774 \ub418\uc9c0 \uc54a\uc544 \uc2e4\ud589\uc774 \uc548\ub418\ub294 \uac83\uc73c\ub85c 'cfc'\ub97c \uc0ac\uc6a9\ud558\uc5ec cluster \ub97c \uc9c0\uc815\ud568 [ltdb@d205 ~/tsr2-test]$ cfc 6 [C:6][ltdb@d205 ~/tsr2-test]$ load.sh ./test_data_one/ ./json/cell_nvkvs.json // \uc801\uc7ac \uc2dc\uc791. SLF4J: Class path contains multiple SLF4J bindings. SLF4J: Found binding in [jar:file:/home/ltdb/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/lib/logback-classic-1.2.3.jar!/org/slf4j/impl/StaticLoggerBinder.class]","title":"load.sh"},{"location":"tutorial/#kafka-kafkacat-tsr2-kaetlyn","text":"zookeeper/kafka zookeeper \ubc0f kafka broker \uc124\uce58\uac00 \uc120\ud589\ub418\uc5b4\uc57c \ud568 ==> Kafka&Kaetlyn \uc124\uce58 \ucc38\uace0 \uc0ac\uc6a9\ud558\ub294 topic\uc740 \ud06c\uac8c \uc544\ub798\uc640 \uac19\uc774 3\uac00\uc9c0\ub85c \ub098\ub204\uc5b4\uc9d0 1. \ub370\uc774\ud130 \uc801\uc7ac\ub97c \uc704\ud55c topic - table\uac04 dependency\uac00 \uc5c6\ub3c4\ub85d table\ubcc4\ub85c \ub098\ub220\uc11c \uc801\uc7ac \ud544\uc694 2. error topic - 'tsr2-kaetlyn edit'\uc744 \ud1b5\ud574 'KAFKA_ERROR_REPORT_TOPIC_NAME=topic-error' \ub85c \uc124\uc815 - consuming \ub2e8\uacc4\uc5d0\uc11c \uc801\uc7ac \uc2e4\ud328 \uc2dc error topic\uc73c\ub85c \uc5d0\ub7ec \ub0b4\uc6a9\uc744 \ub123\uc5b4\uc11c \uc801\uc7ac client\uac00 \ud655\uc778\ud560 \uc218 \uc788\ub3c4\ub85d \ud568 3. result topic(optional) - consuming \ud6c4 \uc801\uc7ac \uacb0\uacfc\ub97c \uc815\uc758\ub41c protocol\uc5d0 \ub9de\uac8c \uc801\uc7ac app\uc5d0 \uc804\ub2ec\ud560 \uc218 \uc788\uc74c kafka consumer Kafka&Kaetlyn \uc124\uce58 \uc5d0\uc11c \uac00\uc774\ub4dc\ud558\uace0 \uc788\ub294 kaetlyn consumer\ub97c \uc0ac\uc6a9\ud558\uc5ec consumer\ub97c \uc124\uc815. 'tsr2-kaetlyn edit'\uc744 \ud1b5\ud574 consumer \uc124\uc815\uc774 \ud544\uc694\ud568 \uae30\ubcf8\uc801\uc73c\ub85c '\uc218\uc815 \ud544\uc694'\ub85c \ucf54\uba58\ud2b8\ub41c \ubd80\ubd84\uc740 \uac80\ud1a0 \ubc0f \uc218\uc815\uc774 \ud544\uc694\ud568 #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14040} KAFKA_SERVER=localhost:9092 ############################################################################### # Properties for Consumer DRIVER_MEMORY=1g // \uc218\uc815 \ud544\uc694 EXECUTOR_MEMORY=1g // \uc218\uc815 \ud544\uc694 EXECUTORS=12 // \uc218\uc815 \ud544\uc694 EXECUTOR_CORES=12 // \uc218\uc815 \ud544\uc694 [[JSON_PATH]]=~/Flashbase/flashbase-benchmark/json/load_no_skew JSON_PATH=/home/ltdb/tsr2-test/json // \uc218\uc815 \ud544\uc694, json file \uc5c5\ub370\uc774\ud2b8 \uc2dc kaetlyn \uc7ac\uc2dc\uc791 \ud544\uc694! [[HIVE_METASTORE_URI]]=thrift://localhost:9083 HIVE_METASTORE_URI='' // \uc218\uc815 \ud544\uc694 KAFKA_CONSUMER_GROUP_ID=nvkvs_redis_connector // \uc218\uc815 \ud544\uc694 KAFKA_CONSUMING_TOPIC_LIST=nvkvs3 // \uc218\uc815 \ud544\uc694 JOB_GENERATION_PERIOD=1 MAX_RATE_PER_PARTITION=20000 KAFKA_ERROR_REPORT_TOPIC_NAME=topic-error TEST_MODE=false EXCUTOR_LOCALITY=false kafka producer \uae30\ubcf8\uc801\uc73c\ub85c kafka producing\uc740 \uc544\ub798\uc640 \uac19\uc740 \ubc29\ubc95\uc73c\ub85c \ud560 \uc218 \uc788\uc74c kafka-console-producer.sh --broker-list localhost:9092 --topic {topic name} < {\uc801\uc7ac\ud560 filename} \ud558\uc9c0\ub9cc, kaetlyn \uc801\uc7ac\ub97c \uc704\ud574\uc11c\ub294 \uba54\uc2dc\uc9c0\uc5d0 \uc544\ub798 \ud5e4\ub354 \uc815\ubcf4\uac00 \ud3ec\ud568\ub418\uc5b4\uc57c \ud55c\ub2e4. TABLE_ID SEPARATOR \ub530\ub77c\uc11c kafkacat\uc774\ub77c\ub294 tool\uc744 \ud1b5\ud574 \ud5e4\ub354 \uc815\ubcf4\uc640 \ud568\uaed8 producing\uc744 \ud574\uc57c \ud55c\ub2e4.(https://docs.confluent.io/3.3.0/app-development/kafkacat-usage.html# \ucc38\uace0) c++ compiler \uc124\uce58 $yum install gcc-c++ $ git clone https://github.com/edenhill/librdkafka $ cd librdkafka $ ./configure $ make $ sudo make install /usr/local/lib \ub85c \uc774\ub3d9\ud574\uc8fc\uc5b4 \ub2e4\uc74c \uba85\ub839\uc5b4 \uc2e4\ud589\ud55c\ub2e4. $ git clone https://github.com/edenhill/kafkacat $ cd kafkacat $ ./configure $ make $ sudo make install Lib \ud30c\uc77c\uc744 \ucc3e\uc744 \uc218 \uc5c6\ub2e4\uba74 $ ldd kafkacat \ub2e4\uc74c\uc758 \ud30c\uc77c\uc744 \ub9cc\ub4e4\uace0 \uc544\ub798\ub97c \ucd94\uac00 /etc/ld.so.conf.d/usrlocal.conf Contents: /usr/local/lib \uc800\uc7a5 \ud6c4 \uc544\ub798 \uba85\ub839\uc5b4 \uc2e4\ud589 $ ldconfig -v $kafkacat Kafkacat\uc5d0 \ub300\ud55c \uba85\ub839\uc5b4\uac00 \ub098\uc624\uba74 \uc131\uacf5 kafkacat\uc774 \uc815\uc0c1 \uc124\uce58\ub418\uc5c8\uc73c\uba74 \uc544\ub798\uc640 \uac19\uc774 producing\uc774 \uac00\ub2a5\ud568 file \ud558\ub098\ub9cc \uc801\uc7ac\ud560 \uacbd\uc6b0 kafkacat -b localhost:9092 -t {topic name} -T -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l {\uc801\uc7ac\ud560 filename} 2. dir\uc5d0 \uc788\ub294 \ubaa8\ub4e0 \ud30c\uc77c\uc744 \uc801\uc7ac\ud560 \ub54c\uc5d0\ub294 \ud574\ub2f9 dir\ub85c \uc774\ub3d9\ud55c \ud6c4\uc5d0, ls | xargs -n 1 kafkacat -q -b localhost:9092 -t {topic name} -P -H TABLE_ID='{table id}' -H SEPARATOR='|' -l ** \uae30\ubcf8\uc801\uc778 \uac00\uc774\ub4dc\ub294 Kafka&Kaetlyn \uc124\uce58 \uc5d0 \uc788\uc73c\ubbc0\ub85c \uac1c\ub150 \uc774\ud574\ub97c \uc704\ud574\uc11c\ub294 \uc774 \ud398\uc774\uc9c0\ub97c \ucc38\uace0\ud558\uba74 \ub418\uc9c0\ub9cc \uc880 \ub354 \ud3b8\ub9ac\ud558\uac8c \uc0ac\uc6a9\ud558\uae30 \uc704\ud574 kafka-utils.sh\ub97c \uc81c\uacf5\ud558\uace0 \uc788\uc5b4 \uc6b4\uc601 \uc2dc\uc5d0\ub294 kafka-utils.sh\ub97c \uc0ac\uc6a9\ud558\uba74 \ub428. 'kafka-utils.sh'\ub294 \uac01 \ud074\ub7ec\uc2a4\ud130\ubcc4 sbin\uc5d0 \uc788\uc73c\ubbc0\ub85c, 'cfc'\ub85c cluster \uc124\uc815 \ud6c4 \uc0ac\uc6a9\uc774 \uac00\ub2a5\ud568. [C:6][ltdb@d205 ~]$ which kafka-utils.sh ~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/kafka-utils.sh \uc544\ub798\uc640 \uac19\uc774 'CONSUMER_GROUP_ID'\uac00 \uc9c0\uc815\ub418\uc5b4 \uc788\uc9c0 \uc54a\uc73c\uba74 \uc2e4\ud589\uc774 \ub418\uc9c0 \uc54a\uc73c\ubbc0\ub85c, [C:6][ltdb@d205 ~]$ kafka-utils.sh help Please, set $CONSUMER_GROUP_ID first. \uc544\ub798\uc640 \uac19\uc774 'kafka-utils.sh'\ub97c \uc5f4\uc5b4\uc11c \uc218\uc815\uc744 \ud574\uc57c \ud568. #!/bin/bash CONSUMER_GROUP_ID='nvkvs_redis_connector' // \uc218\uc815 \ud544\uc694 KAFKA_SERVER=localhost:9092 ZOOKEEPER_SERVER=localhost:2181... 'help'\ub97c \ud1b5\ud574 \uac00\ub2a5\ud55c \ucee4\ub9e8\ub4dc\ub97c \ud655\uc778\ud560 \uc218 \uc788\uc74c. [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh help kafka-utils.sh offset-check kafka-utils.sh offset-monitor kafka-utils.sh offset-earliest topic_name kafka-utils.sh offset-latest topic_name kafka-utils.sh offset-move topic_name 10000 kafka-utils.sh error-monitor error_topic_name kafka-utils.sh consumer-list kafka-utils.sh topic-check topic_name kafka-utils.sh topic-create topic_name 10 kafka-utils.sh topic-delete topic_name kafka-utils.sh topic-config-check topic_name kafka-utils.sh topic-config-set topic_name config_name config_value kafka-utils.sh topic-config-remove topic_name config_name kafka-utils.sh topic-list kafka-utils.sh message-earliest topic_name kafka-utils.sh message-latest topic_name command\uc5d0 args\uac00 \ud544\uc694\ud55c \uacbd\uc6b0, args\uc5c6\uc774 \uc785\ub825\ud558\uba74 \uc544\ub798\uc640 \uac19\uc774 \uac00\uc774\ub4dc \ubb38\uad6c\uac00 \ub098\uc634. [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh offset-move Please, specify topic name & the size of moving offset (ex) kafka-utils.sh offset-move my-topic 100 [C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh topic-create Please, specify topic name and its partition count. (ex) kafka-utils.sh topic-create topic-new 10 [C:6][ltdb@d205 ~/kafka/config]$ \uc0ac\uc6a9 \uc608, [C:6][ltdb@d205 ~]$ kafka-utils.sh message-earliest nvkvs3 20160711055950|ELG|2635055200|34317|5|6091|1|25|0|11|0|100.0|0.0|0|2846|3|33|0|5|0|-1000|0.0|0.0|94932|1027|0|176|35.2|40|0|7818000000|109816071|10|0|6000000.0|164843|2.75|0|2592|6000000|0.04|1288488|1303|1338|0|530|1|88.33|0|721|67948|428|0|1|108|108.0|108|0|0.0|0|0|0|-1000|1|1|100.0|62|39.0|62.9|23.0|37.1|0|0|0|0|29|10|-7022851.0|59998.0|-117.05|-6865443.5|59998.0|-114.43|4|198060.0|59998.0|22.5|3.3|0|1|5.82|3|1.94||0|0|0|0|0|0|0|0|4|0|0|0|15|14|231|140|0|0|0|0|0|0|0|0|4|0|0|0|15|13|174|110|1|0|0|0|0|0|0|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|0|0|0.0|0.0|0.0|0.0|0.0|0.0|570.0|0.0|3.0|0.0|0.0|0.0|0.0|2.0|3.0|3.0|0.0|15.73|0.0|0.0|0.0|0.0|0.0|12.0|22.0|68.0|83.0|339.0|205.0|144.0|54.0|38.0|12.0|0.0|0.0|0.0|0.0|0.0|0.0|100.0|50.55|1:22,2:7|1.0|||||1:1,17:1,23:1|13.67|0|0|0.0|0.0|-1000||-1000||-1000|11|2|05 Processed a total of 1 messages [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-list __consumer_offsets nvkvs3 topic-error topic_name [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-create ksh 18 Created topic ksh. [C:6][ltdb@d205 ~]$ kafka-utils.sh topic-check ksh Topic:ksh PartitionCount:18 ReplicationFactor:2 Configs: Topic: ksh Partition: 0 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 1 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 2 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 3 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 4 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 5 Leader: 3 Replicas: 3,1 Isr: 3,1 Topic: ksh Partition: 6 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 7 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 8 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 9 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 10 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 11 Leader: 3 Replicas: 3,1 Isr: 3,1 Topic: ksh Partition: 12 Leader: 1 Replicas: 1,3 Isr: 1,3 Topic: ksh Partition: 13 Leader: 2 Replicas: 2,1 Isr: 2,1 Topic: ksh Partition: 14 Leader: 3 Replicas: 3,2 Isr: 3,2 Topic: ksh Partition: 15 Leader: 1 Replicas: 1,2 Isr: 1,2 Topic: ksh Partition: 16 Leader: 2 Replicas: 2,3 Isr: 2,3 Topic: ksh Partition: 17 Leader: 3 Replicas: 3,1 Isr: 3,1","title":"kafka & kafkacat & tsr2-kaetlyn"},{"location":"tutorial/#4-query","text":"","title":"4. Query"},{"location":"tutorial/#thriftserver","text":"thriftserver\ub97c \uc0ac\uc6a9\ud558\uc5ec \uc9c8\uc758\ub97c \uc218\ud589\ud560 \uc218 \uc788\uc73c\uba70, hive-metastore\ub97c \uc0ac\uc6a9\ud574\uc11c \uba54\ud0c0\uc815\ubcf4\ub97c \uad00\ub9ac\ud560 \uc218 \uc788\uc2b5\ub2c8\ub2e4. \uac01 cluster\uc5d0 thriftserver \uac00 \uc788\uc73c\uba70, \ud2b9\uc815 \ud074\ub7ec\uc2a4\ud130\uc5d0\uc11c \ub744\uc6b8 \uc218 \uc788\uc2b5\ub2c8\ub2e4. > cfc 6 > which thriftserver ~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/thriftserver \uc2e4\ud589 \uc804, 'thriftserver edit'\uc744 \ud1b5\ud574 \uc124\uc815\uac12\uc744 \ubcc0\uacbd\ud569\ub2c8\ub2e4. #!/bin/bash ############################################################################### # Common variables SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf} SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin} SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin} SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs} SPARK_METRICS=${SPARK_CONF}/metrics.properties SPARK_UI_PORT=${SPARK_UI_PORT:-14050} EXECUTERS=12 // \uc218\uc815 \ud544\uc694 EXECUTER_CORES=32 // \uc218\uc815\ud544\uc694 HIVE_METASTORE_URL='' HIVE_HOST=${HIVE_HOST:-localhost} HIVE_PORT=${HIVE_PORT:-13000} COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \\ -o -name 'geospark*' -o -name 'gt-*' | tr '\\n' ':') ############################################################################### # Driver DRIVER_MEMORY=6g // \uc218\uc815 \ud544\uc694 DRIVER_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Execute EXECUTOR_MEMORY=2g // \uc218\uc815 \ud544\uc694 EXECUTOR_CLASSPATH=$COMMON_CLASSPATH ############################################################################### # Thrift Server logs EVENT_LOG_ENABLED=false EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling EVENT_LOG_SAVE_MIN=60 EXTRACTED_EVENT_LOG_SAVE_DAY=5 SPARK_LOG_SAVE_MIN=2000 ############## start \ubc29\ubc95 thriftserver start stop \ubc29\ubc95 thriftserver stop yarn application \ud655\uc778 > yarn application -list 20/06/25 17:04:35 INFO client.RMProxy: Connecting to ResourceManager at d205/192.168.111.205:18032 Total number of applications (application-types: [] and states: [SUBMITTED, ACCEPTED, RUNNING]):1 Application-Id Application-Name Application-Type User Queue State Final-State Progress Tracking-URL application_1592880218288_0002 ThriftServer_d205_6 SPARK ltdb default RUNNING UNDEFINED 10% http://d205:14050 table properties \ud655\uc778 0: jdbc:hive2://0.0.0.0:13000> show tables; +-----------+-----------------------------------------------+--------------+--+ | database | tableName | isTemporary | +-----------+-----------------------------------------------+--------------+--+ | default | aua_adong_cd | false | | default | aua_aom_log | false | | default | aua_cell_by10sec_sum | false | | default | aua_cell_by5min_sum | false | | default | aua_cell_cfg_inf | false | | default | aua_enb_nsn_ho_log | false | | default | aua_enb_nsn_rrc_log | false | | default | aua_enb_ss_csl_log | false | | default | aua_ra_cscore_area_5min_sum | false | | default | aua_ue_rf_sum | false | | | aua_aom_log_6fbb17bb9718a46306ec7a9766464813 | true | +-----------+-----------------------------------------------+--------------+--+ 11 rows selected (0.045 seconds) 0: jdbc:hive2://0.0.0.0:13000> show tblproperties aua_ue_rf_sum; +---------------------------------------------+-------------------------------------------------------------------------------------------------+--+ | key | value | +---------------------------------------------+-------------------------------------------------------------------------------------------------+--+ | transient_lastDdlTime | 1581872791 | | fb.load.kafka.bootstrap.servers | 90.90.200.182:9092,90.90.200.183:9092,90.90.200.184:9092,90.90.200.185:9092,90.90.200.186:9092 | | fb.transformation.column.add.IMSI_KEY | ${IMSI_NO}.length() >= 2 && ${IMSI_NO}.substring(0, 2).equals(\"T1\") ? \"T1\" : \"O\" | | fb.transformation.column.add.IMSI_HASH_KEY | fnvHash(${IMSI_NO}, 5) | | fb.load.kafka.producer.compression.type | zstd | | fb.transformation.column.add.EVENT_TIME | ${EVT_DTM}.length() < 12 ? \"000000000000\" : ${EVT_DTM}.substring(0, 11).concat(\"0\") | | fb.load.kafka.topic.name | topic-tango-dev | | Comment | \ub2e8\ub9d0\ubcc4 \ubd84\uc11d \uacb0\uacfc | | fb.load.kafka.producer.max.request.size | 1048576 | +---------------------------------------------+-------------------------------------------------------------------------------------------------+--+ 9 rows selected (0.1 seconds) 0: jdbc:hive2://0.0.0.0:13000> 0: jdbc:hive2://0.0.0.0:13000> show create table aua_ue_rf_sum; +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ | createtab_stmt | +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ | CREATE TABLE `aua_ue_rf_sum` (`EVT_DTM` STRING, `VEND_ID` STRING, `ADONG_CD` STRING, `ENB_ID` STRING, `CELL_ID` STRING, `ENB_UE_S1AP_ID` STRING, `MME_UE_S1AP_ID` STRING, `IMSI_NO` STRING, `EVT_ID` STRING, `CALL_RESULT_CODE` STRING, `CALL_RESULT_MSG` STRING, `FREQ_TYP_CD` STRING, `CQI` STRING, `TA` STRING, `RSRP` STRING, `RSRQ` STRING, `DL_PACKET_LOSS_SUCC_CNT` STRING, `DL_PACKET_LOSS_LOST_CNT` STRING, `DL_PACKET_LOSS_RATE` STRING, `SINR_PUSCH` STRING, `SINR_PUCCH` STRING, `UE_TX_POWER` STRING, `PHR` STRING, `UL_PACKET_LOSS_SUCC_CNT` STRING, `UL_PACKET_LOSS_LOST_CNT` STRING, `UL_PACKET_LOSS_RATE` STRING, `RRC_LATENCY` STRING, `HO_LATENCY` STRING, `RRE_LATENCY` STRING, `DL_NO_RTP` STRING, `UL_NO_RTP` STRING, `ERAB_LATENCY` STRING, `RRC_ERAB_LATENCY` STRING, `EVENT_TIME` STRING, `IMSI_KEY` STRING, `IMSI_HASH_KEY` STRING, `UE_ENDC_STAGE` STRING) USING r2 OPTIONS ( `query_result_partition_cnt_limit` '2000000', `query_response_timeout` '1200000', `query_result_task_row_cnt_limit` '1000000', `host` '90.90.200.187', `serialization.format` '1', `query_result_total_row_cnt_limit` '2147483647', `group_size` '10', `port` '18600', `mode` 'nvkvs', `partitions` 'EVENT_TIME ENB_ID IMSI_KEY IMSI_HASH_KEY', `second_filter_enabled` 'no', `table` '6010' ) | +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+ 1 row selected (0.027 seconds) 0: jdbc:hive2://0.0.0.0:13000> select event_time, enb_id, vend_id from aua_ue_rf_sum limit 3; Error: com.skt.spark.r2.exception.UnsupportedQueryTypeException: at least one partition column should be included in where predicates; caused by 6010 table. You must check to exist partition column(s) of this table or mistype condition of partition column(s) in where clause. : EVENT_TIME,ENB_ID,IMSI_KEY,IMSI_HASH_KEY (state=,code=0) 0: jdbc:hive2://0.0.0.0:13000> select event_time, enb_id, vend_id from aua_ue_rf_sum where event_time='202006250000' limit 3; +---------------+---------------------------+----------+--+ | event_time | enb_id | vend_id | +---------------+---------------------------+----------+--+ | 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG | | 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG | | 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG | +---------------+---------------------------+----------+--+ 3 rows selected (0.149 seconds) 0: jdbc:hive2://0.0.0.0:13000>","title":"thriftserver"},{"location":"tutorial/#5-zeppelin","text":"Zeppelin \ud29c\ud1a0\ub9ac\uc5bc\uc744 \uc9c4\ud589\ud558\uae30 \uc804, Zeppelin\uc5d0 Lightning DB \uad00\ub828 Jar \ud30c\uc77c\ub4e4\uc774 \ucd94\uac00\ud558\ub3c4\ub85d \uc124\uc815 \ud558\uc600\ub294\uc9c0 \ud655\uc778\ud574\uc8fc\uc138\uc694. \uc544\ub798\uc640 \uac19\uc774 \ud29c\ud1a0\ub9ac\uc5bc \ub178\ud2b8 \ud30c\uc77c \uc744 \ub0b4\ub824\ubc1b\uac70\ub098 \uc544\ub798 URL\uc744 \uc785\ub825\ud558\uc5ec Zeppelin\uc5d0\uc11c \ubd88\ub7ec\uc635\ub2c8\ub2e4. \ud29c\ud1a0\ub9ac\uc5bc \ub178\ud2b8 \ub9c1\ud06c: https://docs.lightningdb.io/scripts/tutorial_ko.json","title":"5. Zeppelin"},{"location":"version-update/","text":"You can update LightningDB by using the 'deploy' command. > c 1 // alias of 'cluster use 1' > deploy (Watch out) Cluster 1 is already deployed. Do you want to deploy again? (y/n) [n] y (1) Select installer Select installer [ INSTALLER LIST ] (1) lightningdb.release.master.5a6a38.bin (2) lightningdb.trial.master.dbcb9e-dirty.bin (3) lightningdb.trial.master.dbcb9e.bin Please enter the number, file path or URL of the installer you want to use. you can also add a file in list by copy to '$FBPATH/releases/' 1 OK, lightningdb.release.master.5a6a38.bin (2) Restore Do you want to restore conf? (y/n) y If the current settings will be reused, type 'y'. (3) Check all settings finally +-----------------+---------------------------------------------------+ | NAME | VALUE | +-----------------+---------------------------------------------------+ | installer | lightningdb.release.master.5a6a38.bin | | nodes | nodeA | | | nodeB | | | nodeC | | | nodeD | | master ports | 18100 | | slave ports | 18150-18151 | | ssd count | 3 | | redis data path | ~/sata_ssd/ssd_ | | redis db path | ~/sata_ssd/ssd_ | | flash db path | ~/sata_ssd/ssd_ | +-----------------+---------------------------------------------------+ Do you want to proceed with the deploy accroding to the above information? (y/n) y Check status of hosts... +-----------+--------+ | HOST | STATUS | +-----------+--------+ | nodeA | OK | | nodeB | OK | | nodeC | OK | | nodeD | OK | +-----------+--------+ Checking for cluster exist... +------+--------+ | HOST | STATUS | +------+--------+ Backup conf of cluster 1... OK, cluster_1_conf_bak_ Backup info of cluster 1 at nodeA... OK, cluster_1_bak_ Backup info of cluster 1 at nodeB... OK, cluster_1_bak_ Backup info of cluster 1 at nodeC... OK, cluster_1_bak_ Backup info of cluster 1 at nodeD... OK, cluster_1_bak_ Transfer installer and execute... - nodeA - nodeB - nodeC - nodeD Sync conf... Complete to deploy cluster 1. Cluster 1 selected. Backup path of cluster: ${base-directory}/backup/cluster_${cluster-id}_bak_${time-stamp} Backup path of conf files: $FBAPTH/conf_backup/cluster_${cluster-id}_conf_bak_${time-stamp} (4) Restart > cluster restart After the restart, the new version will be applied.","title":"Version update"},{"location":"why-k8s/","text":"Why Kubernetes? \u00b6 Legacy system \u00b6 As we work with multiple companies/organizations, deployment environments have diversified to cloud, Kubernetes, on-premise, etc. and OS requirements have also diversified, requiring us to be agile and responsive. On Kubernetes \u00b6 We built these environments to help you create versions in a consistently optimized environment across different environments.","title":"Why Kubernetes?"},{"location":"why-k8s/#why-kubernetes","text":"","title":"Why Kubernetes?"},{"location":"why-k8s/#legacy-system","text":"As we work with multiple companies/organizations, deployment environments have diversified to cloud, Kubernetes, on-premise, etc. and OS requirements have also diversified, requiring us to be agile and responsive.","title":"Legacy system"},{"location":"why-k8s/#on-kubernetes","text":"We built these environments to help you create versions in a consistently optimized environment across different environments.","title":"On Kubernetes"},{"location":"zeppelin-example-face-data/","text":"In Zeppelin, you can import the Face data file with below link. KNN_SEARCH_TEST.json 1. Put csv files into HDFS hdfs dfs -mkdir /face_data hdfs dfs -put /face_data/csv/* /face_data 2. Load face data %spark import org.apache.spark.sql.functions._ import org.apache.spark.sql.r2.UDF.R2UDFs val toFloatArray = udf(R2UDFs.toFloatArray) val fnvHash = udf(R2UDFs.fnvHash) R2UDFs.register() val r2Option = Map(\"table\" -> \"300\", \"host\" -> \"d201\", \"port\" -> \"18400\", \"partitions\" -> \"nvhash\", \"mode\" -> \"nvkvs\", \"at_least_one_partition_enabled\" -> \"no\") spark.sqlContext.read.format(\"csv\") .option(\"sep\", \"|\") .option(\"header\", \"false\") .option(\"inferSchema\", \"true\") .load(\"/face_data/csv/*.csv\") .withColumnRenamed(\"_c0\", \"group\") .withColumnRenamed(\"_c1\", \"subgroup\") .withColumnRenamed(\"_c2\", \"subject\") .withColumnRenamed(\"_c3\", \"face\") .withColumnRenamed(\"_c4\", \"raw_feature\") .withColumn(\"feature\", toFloatArray(split(col(\"raw_feature\"), \",\"))) .select(\"group\", \"subgroup\", \"subject\", \"face\", \"feature\") .withColumn(\"nvhash\", fnvHash(col(\"face\"), functions.lit(30))) .write .format(\"r2\") .options(r2Options) .mode(\"append\") .save() 3. Create table %spark import org.apache.spark.sql.types._ import org.apache.spark.sql.r2.UDF.R2UDFs val fields = \"group_id,app_id,subject_id,face_id,feature,nvhash\".split(\",\").map( fieldName => { if (\"feature\".equals(fieldName)) { StructField(fieldName, ArrayType(FloatType)) } else { StructField(fieldName, StringType) } } ) val faceTableSchema = StructType(fields) spark.sqlContext.read.format(\"r2\") .schema(faceTableSchema) .options(r2Option) .load() .createOrReplaceTempView(\"face_1m\") 4. Enable \u2018KNN SEARCH PUSHDOWN\u2019 feature %sql SET spark.r2.knn.pushdown=true 5. KNN search with using the udf(cosineDistance) of Lightning DB %sql SELECT group_id, app_id, subject_id, face_id, cosineDistance(feature, toFloatArray(array(0.04074662,0.07717144,-0.01387950,0.01287790,0.04414229,0.03390900,0.03808868,0.03956917,0.00592308,-0.00935156,0.04414903,-0.01830893,-0.01918902,0.00154574,-0.02383651,-0.01054291,0.12655860,0.02075430,0.10315673,0.01371782,0.01522089,0.04304991,0.03376650,0.06703991,0.03827063,-0.00063873,0.02516229,0.07061137,0.08456459,-0.04103030,0.03004974,0.00297395,0.00295535,0.01112351,0.02805021,0.04350155,-0.00448326,0.04780317,0.10815978,-0.01784242,0.03320745,0.02912348,0.00183310,0.05318154,0.00922967,-0.04507693,0.01333585,0.00048346,-0.04612860,0.00427735,0.01232839,-0.00100568,0.03865110,0.01765136,-0.00942758,0.02383475,-0.01068696,0.08959154,0.08527020,0.03379998,-0.03852739,0.00607160,0.01309861,-0.01262910,0.00418265,0.03614477,-0.02971224,0.03703139,0.04333942,-0.03143747,0.06752674,-0.02173617,0.03583429,0.07731125,-0.02637132,-0.00395790,-0.04447101,0.03351297,0.08787052,0.00647665,0.03660145,-0.00640601,-0.01004024,0.00763107,0.04762240,-0.00068071,0.00863653,0.06126453,0.04588475,-0.03891076,0.07472295,0.02470182,0.08828569,0.01660202,0.02419317,0.09363404,0.05495216,0.01202847,0.00120040,-0.02136896,0.03100559,0.07371868,0.00986731,0.03934553,0.01289396,0.04705510,-0.02040175,0.01501585,0.00678832,0.03882410,0.02261387,0.02165552,-0.05097445,0.00240807,-0.04210005,0.00760698,-0.02904095,0.06572093,0.03549200,0.06070529,0.06948626,0.02832109,0.01669887,0.00914011,-0.00024529,-0.00124402,0.06481186,0.08246713,0.07499877,0.13112830,0.01034968,0.04224777,0.01175614,0.07395388,0.04937797,0.01561183,-0.03251049,0.05449009,0.04767901,-0.01149555,-0.02055555,-0.05990825,0.06633005,0.07592525,-0.04504610,0.03348448,0.04178635,0.01327751,0.02208084,0.08780535,-0.00799043,0.02236966,0.01560906,0.01171102,0.00814554,-0.00257578,0.08387835,-0.01018093,-0.02170087,0.03230520,0.00955880,-0.01824543,0.05438962,0.01805668,0.02112979,-0.01372666,-0.01057472,0.05453142,0.03742066,0.05534794,0.00977020,0.01991821,-0.00884413,0.09644359,0.02875310,0.10519003,0.05280351,-0.01918891,0.03197290,0.02722778,0.03450845,0.02669794,0.08618007,0.09387484,0.05103674,-0.01431658,0.00783211,-0.00434245,0.02062620,-0.00611403,0.06696083,0.01333337,-0.00156842,0.04325287,-0.05481976,0.01642864,-0.02679648,-0.00642413,0.03808333,0.06134293,0.06049823,0.03818581,0.03599750,-0.01651556,0.06601544,0.01385061,0.00730943,0.03045858,-0.00200028,0.04009718,0.04393080,-0.02568381,-0.01271287,-0.01860873,0.03669106,0.00154059,-0.04202117,0.07374570,-0.00380450,0.03164477,0.00637422,-0.02361638,0.01918917,0.01680134,0.01346881,0.02424449,-0.00504802,-0.06241146,0.08241857,0.02817723,0.02132487,0.08051144,0.06332499,0.02585857,-0.04057337,0.00279212,-0.00005161,-0.06566417,0.07860317,-0.01276221,0.06822366,-0.00191142,0.08534018,0.06014366,0.07053877,-0.01962799,0.08602677,-0.00817098,0.00302233,-0.10041475,-0.01908947,0.03235617,0.00931559,0.05451865,0.02233902,-0.01173994))) AS distance FROM face_1m ORDER BY distance DESC LIMIT 20","title":"KNN SEARCH"},{"location":"zeppelin-example-nyctaxi/","text":"In Zeppelin, you can import the NYC TAXI Benchmark file with below link. NYC_TAXI_BM_load_and_query.json 1. Put csv files into HDFS %sh hdfs dfs -mkdir /nyc_taxi hdfs dfs -mkdir /nyc_taxi/csv hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv1/* /nyc_taxi/csv hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv2/* /nyc_taxi/csv hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv3/* /nyc_taxi/csv hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv4/* /nyc_taxi/csv hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv5/* /nyc_taxi/csv hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv6/* /nyc_taxi/csv 2. Create dataframe and load data %spark import org.apache.spark.sql.types._ val taxiSchema = StructType(Array( StructField(\"trip_id\", IntegerType, true), StructField(\"vendor_id\", StringType, true), StructField(\"pickup_datetime\", TimestampType, true), StructField(\"dropoff_datetime\", TimestampType, true), StructField(\"store_and_fwd_flag\", StringType, true), StructField(\"rate_code_id\", IntegerType, true), StructField(\"pickup_longitude\", DoubleType, true), StructField(\"pickup_latitude\", DoubleType, true), StructField(\"dropoff_longitude\", DoubleType, true), StructField(\"dropoff_latitude\", DoubleType, true), StructField(\"passenger_count\", StringType, true), StructField(\"trip_distance\", DoubleType, true), StructField(\"fare_amount\", DoubleType, true), StructField(\"extra\", DoubleType, true), StructField(\"mta_tax\", DoubleType, true), StructField(\"tip_amount\", DoubleType, true), StructField(\"tolls_amount\", DoubleType, true), StructField(\"improvement_surcharge\", DoubleType, true), StructField(\"total_amount\", DoubleType, true), StructField(\"payment_type\", StringType, true), StructField(\"trip_type\", IntegerType, true), StructField(\"cab_type\", StringType, true), StructField(\"precipitation\", DoubleType, true), StructField(\"snow_depth\", DoubleType, true), StructField(\"snowfall\", DoubleType, true), StructField(\"max_temperature\", IntegerType, true), StructField(\"min_temperature\", IntegerType, true), StructField(\"average_wind_speed\", DoubleType, true), StructField(\"pickup_nyct2010_gid\", IntegerType, true), StructField(\"pickup_ctlabel\", StringType, true), StructField(\"pickup_borocode\", IntegerType, true), StructField(\"pickup_boroname\", StringType, true), StructField(\"pickup_ct2010\", StringType, true), StructField(\"pickup_boroct2010\", StringType, true), StructField(\"pickup_cdeligibil\", StringType, true), StructField(\"pickup_ntacode\", StringType, true), StructField(\"pickup_ntaname\", StringType, true), StructField(\"pickup_puma\", StringType, true), StructField(\"dropoff_nyct2010_gid\", IntegerType, true), StructField(\"dropoff_ctlabel\", StringType, true), StructField(\"dropoff_borocode\", IntegerType, true), StructField(\"dropoff_boroname\", StringType, true), StructField(\"dropoff_ct2010\", IntegerType, true), StructField(\"dropoff_boroct2010\", StringType, true), StructField(\"dropoff_cdeligibil\", StringType, true), StructField(\"dropoff_ntacode\", StringType, true), StructField(\"dropoff_ntaname\", StringType, true), StructField(\"dropoff_puma\", StringType, true) )) val taxiDF = spark.read.format(\"csv\") .option(\"header\", \"false\") .option(\"delimiter\", \",\") .option(\"mode\", \"FAILFAST\") .schema(taxiSchema) .load(\"/nyc_taxi/csv/*.csv.gz\") 4. Create temp view for the dataframe %spark taxiDF.createOrReplaceTempView(\"trips\") 5. Transform the dataframe for Lightning DB %spark import org.apache.spark.sql.functions._ val deltaDf = taxiDF .filter($\"pickup_datetime\".isNotNull && $\"passenger_count\".isNotNull && $\"cab_type\".isNotNull) .withColumn(\"pickup_yyyyMMddhh\", from_unixtime(unix_timestamp($\"pickup_datetime\"), \"yyyyMMddhh\")) .withColumn(\"round_trip_distance\", round($\"trip_distance\")) deltaDf.printSchema() 6. Create temp view for Lightning DB with r2 options those support Lightning DB as the data source %spark val r2Options = Map[String, String](\"table\" -> \"100\", \"host\" -> \"192.168.111.35\", \"port\" -> \"18800\", \"partitions\" -> \"pickup_yyyyMMddhh passenger_count cab_type\", \"mode\" -> \"nvkvs\", \"rowstore\" -> \"false\", \"group_size\" -> \"40\", \"at_least_one_partition_enabled\" -> \"no\") spark.sqlContext.read.format(\"r2\").schema(deltaDf.schema).options(r2Options).load().createOrReplaceTempView(\"fb_trips\") 7. Load data from the dataframe into Lightning DB %spark deltaDf.write .format(\"r2\") .insertInto(\"fb_trips\") 8. Enable \u2018aggregation pushdown\u2019 feature SET spark.r2.aggregation.pushdown=true 9. Do \u2018NYC TAXI Benchmark\u2019 Q1 %sql SELECT cab_type, count(*) FROM fb_trips GROUP BY cab_type Q2 %sql SELECT passenger_count, avg(total_amount) FROM fb_trips GROUP BY passenger_count Q3 %sql SELECT passenger_count, substring(pickup_yyyyMMddhh, 1, 4), count(*) FROM fb_trips GROUP BY passenger_count, substring(pickup_yyyyMMddhh, 1, 4) Q4 %sql SELECT passenger_count, substring(pickup_yyyyMMddhh, 1, 4), round_trip_distance, count(*) FROM fb_trips GROUP BY 1, 2, 3 ORDER BY 2, 4 desc","title":"NYC TAXI Benchmark"}]} \ No newline at end of file diff --git a/search/worker.js b/search/worker.js new file mode 100644 index 0000000..8628dbc --- /dev/null +++ b/search/worker.js @@ -0,0 +1,133 @@ +var base_path = 'function' === typeof importScripts ? '.' : '/search/'; +var allowSearch = false; +var index; +var documents = {}; +var lang = ['en']; +var data; + +function getScript(script, callback) { + console.log('Loading script: ' + script); + $.getScript(base_path + script).done(function () { + callback(); + }).fail(function (jqxhr, settings, exception) { + console.log('Error: ' + exception); + }); +} + +function getScriptsInOrder(scripts, callback) { + if (scripts.length === 0) { + callback(); + return; + } + getScript(scripts[0], function() { + getScriptsInOrder(scripts.slice(1), callback); + }); +} + +function loadScripts(urls, callback) { + if( 'function' === typeof importScripts ) { + importScripts.apply(null, urls); + callback(); + } else { + getScriptsInOrder(urls, callback); + } +} + +function onJSONLoaded () { + data = JSON.parse(this.responseText); + var scriptsToLoad = ['lunr.js']; + if (data.config && data.config.lang && data.config.lang.length) { + lang = data.config.lang; + } + if (lang.length > 1 || lang[0] !== "en") { + scriptsToLoad.push('lunr.stemmer.support.js'); + if (lang.length > 1) { + scriptsToLoad.push('lunr.multi.js'); + } + if (lang.includes("ja") || lang.includes("jp")) { + scriptsToLoad.push('tinyseg.js'); + } + for (var i=0; i < lang.length; i++) { + if (lang[i] != 'en') { + scriptsToLoad.push(['lunr', lang[i], 'js'].join('.')); + } + } + } + loadScripts(scriptsToLoad, onScriptsLoaded); +} + +function onScriptsLoaded () { + console.log('All search scripts loaded, building Lunr index...'); + if (data.config && data.config.separator && data.config.separator.length) { + lunr.tokenizer.separator = new RegExp(data.config.separator); + } + + if (data.index) { + index = lunr.Index.load(data.index); + data.docs.forEach(function (doc) { + documents[doc.location] = doc; + }); + console.log('Lunr pre-built index loaded, search ready'); + } else { + index = lunr(function () { + if (lang.length === 1 && lang[0] !== "en" && lunr[lang[0]]) { + this.use(lunr[lang[0]]); + } else if (lang.length > 1) { + this.use(lunr.multiLanguage.apply(null, lang)); // spread operator not supported in all browsers: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Spread_operator#Browser_compatibility + } + this.field('title'); + this.field('text'); + this.ref('location'); + + for (var i=0; i < data.docs.length; i++) { + var doc = data.docs[i]; + this.add(doc); + documents[doc.location] = doc; + } + }); + console.log('Lunr index built, search ready'); + } + allowSearch = true; + postMessage({config: data.config}); + postMessage({allowSearch: allowSearch}); +} + +function init () { + var oReq = new XMLHttpRequest(); + oReq.addEventListener("load", onJSONLoaded); + var index_path = base_path + '/search_index.json'; + if( 'function' === typeof importScripts ){ + index_path = 'search_index.json'; + } + oReq.open("GET", index_path); + oReq.send(); +} + +function search (query) { + if (!allowSearch) { + console.error('Assets for search still loading'); + return; + } + + var resultDocuments = []; + var results = index.search(query); + for (var i=0; i < results.length; i++){ + var result = results[i]; + doc = documents[result.ref]; + doc.summary = doc.text.substring(0, 200); + resultDocuments.push(doc); + } + return resultDocuments; +} + +if( 'function' === typeof importScripts ) { + onmessage = function (e) { + if (e.data.init) { + init(); + } else if (e.data.query) { + postMessage({ results: search(e.data.query) }); + } else { + console.error("Worker - Unrecognized message: " + e); + } + }; +} diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..da0f847 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,138 @@ + + + + https://mnms.github.io/docs/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/awards-recognition/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/build-lightningdb-on-k8s/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/cli-cli/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/cli-cli2/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/cli-cluster/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/cli-conf/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/cli-thriftserver/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/cli-version/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/command-line-interface/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/data-ingestion-and-querying/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/deploy-lightningdb-on-k8s/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/get-started-with-scratch/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/how-to-scaleout/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/how-to-use-flashbase/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/how-to-use-lightningdb-on-k8s/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/install-ltcli/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/kaetlyn/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/manage-failover/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/prerequisite/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/release-note/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/try-with-zeppelin/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/tutorial/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/version-update/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/why-k8s/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/zeppelin-example-face-data/ + 2024-04-03 + daily + + + https://mnms.github.io/docs/zeppelin-example-nyctaxi/ + 2024-04-03 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 0000000..beff640 Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/try-with-zeppelin/index.html b/try-with-zeppelin/index.html new file mode 100644 index 0000000..61094cf --- /dev/null +++ b/try-with-zeppelin/index.html @@ -0,0 +1,930 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Try out with Zeppelin - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

1. Setting for Zeppelin

+

You can try LightningDB in the Zeppelin notebook.

+

Firstly, deploy and start the cluster of LightningDB using Installation before launching the Zeppelin daemon.

+

Secondly, to run LightningDB on the Spark, the jars in the LightningDB should be passed to the Spark. +When EC2 Instance is initialized, the environment variable ($SPARK_SUBMIT_OPTIONS) is configured for this reason. +Thus just need to check the setting in zeppelin-env.sh.

+
$ vim $ZEPPELIN_HOME/conf/zeppelin-env.sh 
+...
+LIGHTNINGDB_LIB_PATH=$(eval echo $(cat $FBPATH/config | head -n 1 | awk {'print $2'}))/cluster_$(cat $FBPATH/HEAD)/tsr2-assembly-1.0.0-SNAPSHOT/lib/
+if [[ -e $LIGHTNINGDB_LIB_PATH ]]; then
+    export SPARK_SUBMIT_OPTIONS="--jars $(find $LIGHTNINGDB_LIB_PATH -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' -o -name 'geospark*' -o -name 'gt-*' | tr '\n' ',')"
+fi
+...
+
+

Finally, start Zeppelin daemon.

+
$ cd $ZEPPELIN_HOME/bin
+$ ./zeppelin-daemon.sh start
+
+

2. Tutorial with Zeppelin

+

After starting zeppelin daemon, you can access zeppelin UI using a browser. The URL is https://your-server-ip:8080.

+
+

Tip

+

We recommend that you proceed with the tutorial at the Chrome browser.

+
+

There is a github page for tutorial.

+

The repository includes a tool for generating sample csv data and a notebook for the tutorial.

+

You can import the tutorial notebook with its URL.

+

https://raw.githubusercontent.com/mnms/tutorials/master/zeppelin-notebook/note.json

+

import notebook

+

The tutorial runs on the spark interpreter of Zeppelin. +Please make sure that the memory of the Spark driver is at least 10GB in the Spark interpreter setting.

+

spark driver memory

+

Also, make sure that the timeout of a shell command is at least 120000 ms.

+

Shell timeout

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/tutorial/index.html b/tutorial/index.html new file mode 100644 index 0000000..4c74948 --- /dev/null +++ b/tutorial/index.html @@ -0,0 +1,1328 @@ + + + + + + + + + + + + + + + + + + + + + + + + Tutorial - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Tutorial

+

1. About Lightning DB

+

Architecture & key features

+

+ + + + +

+

Replication & failover/failback

+

+

+

2. LTCLI

+

Installation

+

Command Line

+

3. Ingestion

+

load.sh

+

'tsr2-tools'를 이용한 적재로 '-s' option으로 delimiter 설정 후 사용

+
#!/bin/bash
+if [ $# -ne 2 ];
+then
+    echo "Usage: load_data.sh data-directory json-file"
+    echo "(e.g.: load_data.sh ./data/split ./json/106.json)"
+    echo "Warning: delimiter is '|'"
+    exit
+fi
+tsr2-tools insert java_options "-Xms1g -Xmx32g" -d $1 -s "|" -t $2 -p 40 -c 1 -i
+
+
    +
  • 사용방법
  • +
+
[ltdb@d205 ~/tsr2-test]$ cat ./json/cell_nvkvs.json    // json file 작성.
+{
+"endpoint" : "192.168.111.205:18600",
+"id" :"9999",
+"columns" : 219,
+"partitions" : [
+216, 218, 3, 4
+],
+"rowStore" : true             // 적재하는 클러스터에서 'row-store-enabled'가 동일하게 'true'인지 확인 필요. 반대의 경우 둘다 'false'로 설정하면 됨.
+}
+
+[ltdb@d205 ~/tsr2-test]$ ls -alh ./test_data_one/                   // 적재할 데이터 확인. dir 및 file path 모두 가능함.
+total 8.7M
+drwxrwxr-x. 2 ltdb ltdb 50 2020-06-18 08:58:44 ./
+drwxrwxr-x. 7 ltdb ltdb 84 2020-06-18 08:58:28 ../
+-rw-rw-r--. 1 ltdb ltdb 8.7M 2020-06-18 08:58:44 ANALYSIS_CELL_ELG_20160711055950.dat
+
+[ltdb@d205 ~/tsr2-test]$ load.sh                                        // 그냥 'load.sh'를 입력하면  args 정보가 표시됨.
+Usage: load_data.sh data-directory json-file
+(e.g.: load_data.sh ./data/split ./json/106.json)
+Warning: delimiter is '|'
+[ltdb@d205 ~/tsr2-test]$ load.sh ./test_data_one/ ./json/cell_nvkvs.json
+/home/ltdb/tsr2-test/sbin/load.sh: line 9: tsr2-tools: command not found             // cluster 지정이 되지 않아 실행이 안되는 것으로 'cfc'를 사용하여 cluster 를 지정함
+
+[ltdb@d205 ~/tsr2-test]$ cfc 6
+[C:6][ltdb@d205 ~/tsr2-test]$ load.sh ./test_data_one/ ./json/cell_nvkvs.json       // 적재 시작.
+SLF4J: Class path contains multiple SLF4J bindings.
+SLF4J: Found binding in [jar:file:/home/ltdb/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/lib/logback-classic-1.2.3.jar!/org/slf4j/impl/StaticLoggerBinder.class]
+
+

kafka & kafkacat & tsr2-kaetlyn

+
    +
  • zookeeper/kafka
  • +
+

zookeeper 및 kafka broker 설치가 선행되어야 함 ==> Kafka&Kaetlyn 설치 참고

+

사용하는 topic은 크게 아래와 같이 3가지로 나누어짐

+
1. 데이터 적재를 위한 topic
+    - table간 dependency가 없도록 table별로 나눠서 적재 필요
+2. error topic
+    - 'tsr2-kaetlyn edit'을 통해 'KAFKA_ERROR_REPORT_TOPIC_NAME=topic-error' 로 설정
+    - consuming 단계에서 적재 실패 시 error topic으로 에러 내용을 넣어서 적재 client가 확인할 수 있도록 함
+3. result topic(optional)
+    - consuming 후 적재 결과를 정의된 protocol에 맞게 적재 app에 전달할 수 있음
+
+
    +
  • kafka consumer
  • +
+

Kafka&Kaetlyn 설치에서 가이드하고 있는 kaetlyn consumer를 사용하여 consumer를 설정.

+

'tsr2-kaetlyn edit'을 통해 consumer 설정이 필요함

+

기본적으로 '수정 필요'로 코멘트된 부분은 검토 및 수정이 필요함

+
#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14040}
+
+KAFKA_SERVER=localhost:9092
+
+###############################################################################
+# Properties for Consumer
+DRIVER_MEMORY=1g                               // 수정 필요
+
+EXECUTOR_MEMORY=1g                               // 수정 필요
+EXECUTORS=12                               // 수정 필요
+EXECUTOR_CORES=12                               // 수정 필요
+
+[[JSON_PATH]]=~/Flashbase/flashbase-benchmark/json/load_no_skew
+JSON_PATH=/home/ltdb/tsr2-test/json                               // 수정 필요, json file 업데이트 시 kaetlyn 재시작 필요!
+[[HIVE_METASTORE_URI]]=thrift://localhost:9083
+HIVE_METASTORE_URI=''                               // 수정 필요
+KAFKA_CONSUMER_GROUP_ID=nvkvs_redis_connector                               // 수정 필요
+KAFKA_CONSUMING_TOPIC_LIST=nvkvs3                               // 수정 필요
+JOB_GENERATION_PERIOD=1
+MAX_RATE_PER_PARTITION=20000
+KAFKA_ERROR_REPORT_TOPIC_NAME=topic-error
+TEST_MODE=false
+EXCUTOR_LOCALITY=false
+
+
    +
  • kafka producer +기본적으로 kafka producing은 아래와 같은 방법으로 할 수 있음
  • +
+
kafka-console-producer.sh --broker-list localhost:9092 --topic {topic name} < {적재할 filename}
+
+

하지만, kaetlyn 적재를 위해서는 메시지에 아래 헤더 정보가 포함되어야 한다.

+
    +
  • TABLE_ID
  • +
  • SEPARATOR
  • +
+

따라서 kafkacat이라는 tool을 통해 헤더 정보와 함께 producing을 해야 한다.(https://docs.confluent.io/3.3.0/app-development/kafkacat-usage.html# 참고)

+
c++ compiler 설치
+
+$yum install gcc-c++
+
+
+
+$ git clone https://github.com/edenhill/librdkafka
+
+
+
+$ cd librdkafka
+
+$ ./configure
+
+$ make
+
+$ sudo make install
+
+
+
+ /usr/local/lib 로 이동해주어 다음 명령어 실행한다.
+
+
+
+$ git clone https://github.com/edenhill/kafkacat
+
+
+
+$ cd kafkacat
+
+$ ./configure
+
+$ make
+
+$ sudo make install
+
+
+
+Lib 파일을 찾을 수 없다면
+
+$ ldd kafkacat
+
+
+
+다음의 파일을 만들고 아래를 추가 /etc/ld.so.conf.d/usrlocal.conf
+
+Contents:
+
+/usr/local/lib
+
+
+
+저장 후 아래 명령어 실행
+
+$ ldconfig -v
+
+
+
+$kafkacat
+
+Kafkacat에 대한 명령어가 나오면 성공
+
+

kafkacat이 정상 설치되었으면 아래와 같이 producing이 가능함

+

file 하나만 적재할 경우

+
kafkacat -b localhost:9092 -t {topic name} -T -P -H TABLE_ID='{table id}' -H  SEPARATOR='|' -l {적재할 filename}
+
+

2. dir에 있는 모든 파일을 적재할 때에는 해당 dir로 이동한 후에,

+
ls | xargs -n 1 kafkacat -q -b localhost:9092 -t {topic name} -P -H TABLE_ID='{table id}' -H  SEPARATOR='|' -l
+
+

** 기본적인 가이드는 Kafka&Kaetlyn 설치에 있으므로 개념 이해를 위해서는 이 페이지를 참고하면 되지만 좀 더 편리하게 사용하기 위해 kafka-utils.sh를 제공하고 있어 운영 시에는  kafka-utils.sh를 사용하면 됨.

+

'kafka-utils.sh'는 각 클러스터별 sbin에 있으므로, 'cfc'로 cluster 설정 후 사용이 가능함.

+
[C:6][ltdb@d205 ~]$ which kafka-utils.sh
+~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/kafka-utils.sh
+
+

아래와 같이 'CONSUMER_GROUP_ID'가 지정되어 있지 않으면 실행이 되지 않으므로,

+
[C:6][ltdb@d205 ~]$ kafka-utils.sh help
+Please, set $CONSUMER_GROUP_ID first.
+
+

아래와 같이 'kafka-utils.sh'를 열어서 수정을 해야 함.

+
#!/bin/bash
+
+CONSUMER_GROUP_ID='nvkvs_redis_connector'  // 수정 필요
+KAFKA_SERVER=localhost:9092
+ZOOKEEPER_SERVER=localhost:2181...
+
+

'help'를 통해 가능한 커맨드를 확인할 수 있음.

+
[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh help
+kafka-utils.sh offset-check
+kafka-utils.sh offset-monitor
+kafka-utils.sh offset-earliest topic_name
+kafka-utils.sh offset-latest topic_name
+kafka-utils.sh offset-move topic_name 10000
+kafka-utils.sh error-monitor error_topic_name
+kafka-utils.sh consumer-list
+kafka-utils.sh topic-check topic_name
+kafka-utils.sh topic-create topic_name 10
+kafka-utils.sh topic-delete topic_name
+kafka-utils.sh topic-config-check topic_name
+kafka-utils.sh topic-config-set topic_name config_name config_value
+kafka-utils.sh topic-config-remove topic_name config_name
+kafka-utils.sh topic-list
+kafka-utils.sh message-earliest topic_name
+kafka-utils.sh message-latest topic_name
+
+

command에 args가 필요한 경우, args없이 입력하면 아래와 같이 가이드 문구가 나옴.

+
[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh offset-move 
+Please, specify topic name & the size of moving offset (ex) kafka-utils.sh offset-move my-topic 100
+[C:6][ltdb@d205 ~/kafka/config]$ kafka-utils.sh topic-create
+Please, specify topic name and its partition count. (ex) kafka-utils.sh topic-create topic-new 10
+[C:6][ltdb@d205 ~/kafka/config]$
+
+

사용 예,

+
[C:6][ltdb@d205 ~]$ kafka-utils.sh message-earliest nvkvs3
+20160711055950|ELG|2635055200|34317|5|6091|1|25|0|11|0|100.0|0.0|0|2846|3|33|0|5|0|-1000|0.0|0.0|94932|1027|0|176|35.2|40|0|7818000000|109816071|10|0|6000000.0|164843|2.75|0|2592|6000000|0.04|1288488|1303|1338|0|530|1|88.33|0|721|67948|428|0|1|108|108.0|108|0|0.0|0|0|0|-1000|1|1|100.0|62|39.0|62.9|23.0|37.1|0|0|0|0|29|10|-7022851.0|59998.0|-117.05|-6865443.5|59998.0|-114.43|4|198060.0|59998.0|22.5|3.3|0|1|5.82|3|1.94||0|0|0|0|0|0|0|0|4|0|0|0|15|14|231|140|0|0|0|0|0|0|0|0|4|0|0|0|15|13|174|110|1|0|0|0|0|0|0|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|1|0|0|0|0|0|0|0|0|0|0.0|0.0|0.0|0.0|0.0|0.0|570.0|0.0|3.0|0.0|0.0|0.0|0.0|2.0|3.0|3.0|0.0|15.73|0.0|0.0|0.0|0.0|0.0|12.0|22.0|68.0|83.0|339.0|205.0|144.0|54.0|38.0|12.0|0.0|0.0|0.0|0.0|0.0|0.0|100.0|50.55|1:22,2:7|1.0|||||1:1,17:1,23:1|13.67|0|0|0.0|0.0|-1000||-1000||-1000|11|2|05
+Processed a total of 1 messages
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-list
+__consumer_offsets
+nvkvs3
+topic-error
+topic_name
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-create ksh 18
+Created topic ksh.
+
+
+[C:6][ltdb@d205 ~]$ kafka-utils.sh topic-check  ksh
+Topic:ksh   PartitionCount:18   ReplicationFactor:2 Configs:
+    Topic: ksh  Partition: 0    Leader: 1   Replicas: 1,3   Isr: 1,3
+    Topic: ksh  Partition: 1    Leader: 2   Replicas: 2,1   Isr: 2,1
+    Topic: ksh  Partition: 2    Leader: 3   Replicas: 3,2   Isr: 3,2
+    Topic: ksh  Partition: 3    Leader: 1   Replicas: 1,2   Isr: 1,2
+    Topic: ksh  Partition: 4    Leader: 2   Replicas: 2,3   Isr: 2,3
+    Topic: ksh  Partition: 5    Leader: 3   Replicas: 3,1   Isr: 3,1
+    Topic: ksh  Partition: 6    Leader: 1   Replicas: 1,3   Isr: 1,3
+    Topic: ksh  Partition: 7    Leader: 2   Replicas: 2,1   Isr: 2,1
+    Topic: ksh  Partition: 8    Leader: 3   Replicas: 3,2   Isr: 3,2
+    Topic: ksh  Partition: 9    Leader: 1   Replicas: 1,2   Isr: 1,2
+    Topic: ksh  Partition: 10   Leader: 2   Replicas: 2,3   Isr: 2,3
+    Topic: ksh  Partition: 11   Leader: 3   Replicas: 3,1   Isr: 3,1
+    Topic: ksh  Partition: 12   Leader: 1   Replicas: 1,3   Isr: 1,3
+    Topic: ksh  Partition: 13   Leader: 2   Replicas: 2,1   Isr: 2,1
+    Topic: ksh  Partition: 14   Leader: 3   Replicas: 3,2   Isr: 3,2
+    Topic: ksh  Partition: 15   Leader: 1   Replicas: 1,2   Isr: 1,2
+    Topic: ksh  Partition: 16   Leader: 2   Replicas: 2,3   Isr: 2,3
+    Topic: ksh  Partition: 17   Leader: 3   Replicas: 3,1   Isr: 3,1
+
+

4. Query

+

thriftserver

+

thriftserver를 사용하여 질의를 수행할 수 있으며, hive-metastore를 사용해서 메타정보를 관리할 수 있습니다.

+

각 cluster에 thriftserver 가 있으며, 특정 클러스터에서 띄울 수 있습니다.

+
> cfc 6
+> which thriftserver
+~/tsr2/cluster_6/tsr2-assembly-1.0.0-SNAPSHOT/sbin/thriftserver
+
+

실행 전, 'thriftserver edit'을 통해 설정값을 변경합니다.

+
#!/bin/bash
+###############################################################################
+# Common variables
+SPARK_CONF=${SPARK_CONF:-$SPARK_HOME/conf}
+SPARK_BIN=${SPARK_BIN:-$SPARK_HOME/bin}
+SPARK_SBIN=${SPARK_SBIN:-$SPARK_HOME/sbin}
+SPARK_LOG=${SPARK_LOG:-$SPARK_HOME/logs}
+
+SPARK_METRICS=${SPARK_CONF}/metrics.properties
+SPARK_UI_PORT=${SPARK_UI_PORT:-14050}
+EXECUTERS=12       // 수정 필요
+EXECUTER_CORES=32     // 수정필요
+
+HIVE_METASTORE_URL=''
+HIVE_HOST=${HIVE_HOST:-localhost}
+HIVE_PORT=${HIVE_PORT:-13000}
+
+COMMON_CLASSPATH=$(find $SR2_LIB -name 'tsr2*' -o -name 'spark-r2*' -o -name '*jedis*' -o -name 'commons*' -o -name 'jdeferred*' \
+-o -name 'geospark*' -o -name 'gt-*' | tr '\n' ':')
+
+###############################################################################
+# Driver
+DRIVER_MEMORY=6g      // 수정 필요
+DRIVER_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Execute
+EXECUTOR_MEMORY=2g      // 수정 필요
+EXECUTOR_CLASSPATH=$COMMON_CLASSPATH
+
+###############################################################################
+# Thrift Server logs
+EVENT_LOG_ENABLED=false
+EVENT_LOG_DIR=/nvdrive0/thriftserver-event-logs
+EVENT_LOG_ROLLING_DIR=/nvdrive0/thriftserver-event-logs-rolling
+EVENT_LOG_SAVE_MIN=60
+EXTRACTED_EVENT_LOG_SAVE_DAY=5
+SPARK_LOG_SAVE_MIN=2000
+##############
+
+
+
    +
  • start 방법
  • +
+
thriftserver start
+
+
    +
  • stop 방법
  • +
+
thriftserver stop
+
+
    +
  • yarn application 확인
  • +
+
> yarn application -list
+20/06/25 17:04:35 INFO client.RMProxy: Connecting to ResourceManager at d205/192.168.111.205:18032
+Total number of applications (application-types: [] and states: [SUBMITTED, ACCEPTED, RUNNING]):1
+                Application-Id      Application-Name        Application-Type          User       Queue               State         Final-State         Progress                        Tracking-URL
+application_1592880218288_0002   ThriftServer_d205_6                   SPARK          ltdb     default             RUNNING           UNDEFINED              10%                   http://d205:14050
+
+
    +
  • table properties 확인
  • +
+
0: jdbc:hive2://0.0.0.0:13000> show tables;
++-----------+-----------------------------------------------+--------------+--+
+| database | tableName | isTemporary |
++-----------+-----------------------------------------------+--------------+--+
+| default | aua_adong_cd | false |
+| default | aua_aom_log | false |
+| default | aua_cell_by10sec_sum | false |
+| default | aua_cell_by5min_sum | false |
+| default | aua_cell_cfg_inf | false |
+| default | aua_enb_nsn_ho_log | false |
+| default | aua_enb_nsn_rrc_log | false |
+| default | aua_enb_ss_csl_log | false |
+| default | aua_ra_cscore_area_5min_sum | false |
+| default | aua_ue_rf_sum | false |
+| | aua_aom_log_6fbb17bb9718a46306ec7a9766464813 | true |
++-----------+-----------------------------------------------+--------------+--+
+11 rows selected (0.045 seconds)
+
+0: jdbc:hive2://0.0.0.0:13000> show tblproperties aua_ue_rf_sum;
++---------------------------------------------+-------------------------------------------------------------------------------------------------+--+
+| key | value |
++---------------------------------------------+-------------------------------------------------------------------------------------------------+--+
+| transient_lastDdlTime | 1581872791 |
+| fb.load.kafka.bootstrap.servers | 90.90.200.182:9092,90.90.200.183:9092,90.90.200.184:9092,90.90.200.185:9092,90.90.200.186:9092 |
+| fb.transformation.column.add.IMSI_KEY | ${IMSI_NO}.length() >= 2 && ${IMSI_NO}.substring(0, 2).equals("T1") ? "T1" : "O" |
+| fb.transformation.column.add.IMSI_HASH_KEY | fnvHash(${IMSI_NO}, 5) |
+| fb.load.kafka.producer.compression.type | zstd |
+| fb.transformation.column.add.EVENT_TIME | ${EVT_DTM}.length() < 12 ? "000000000000" : ${EVT_DTM}.substring(0, 11).concat("0") |
+| fb.load.kafka.topic.name | topic-tango-dev |
+| Comment | 단말별 분석 결과 |
+| fb.load.kafka.producer.max.request.size | 1048576 |
++---------------------------------------------+-------------------------------------------------------------------------------------------------+--+
+9 rows selected (0.1 seconds)
+0: jdbc:hive2://0.0.0.0:13000>
+
+0: jdbc:hive2://0.0.0.0:13000> show create table aua_ue_rf_sum;
++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+
+| createtab_stmt |
++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+
+| CREATE TABLE `aua_ue_rf_sum` (`EVT_DTM` STRING, `VEND_ID` STRING, `ADONG_CD` STRING, `ENB_ID` STRING, `CELL_ID` STRING, `ENB_UE_S1AP_ID` STRING, `MME_UE_S1AP_ID` STRING, `IMSI_NO` STRING, `EVT_ID` STRING, `CALL_RESULT_CODE` STRING, `CALL_RESULT_MSG` STRING, `FREQ_TYP_CD` STRING, `CQI` STRING, `TA` STRING, `RSRP` STRING, `RSRQ` STRING, `DL_PACKET_LOSS_SUCC_CNT` STRING, `DL_PACKET_LOSS_LOST_CNT` STRING, `DL_PACKET_LOSS_RATE` STRING, `SINR_PUSCH` STRING, `SINR_PUCCH` STRING, `UE_TX_POWER` STRING, `PHR` STRING, `UL_PACKET_LOSS_SUCC_CNT` STRING, `UL_PACKET_LOSS_LOST_CNT` STRING, `UL_PACKET_LOSS_RATE` STRING, `RRC_LATENCY` STRING, `HO_LATENCY` STRING, `RRE_LATENCY` STRING, `DL_NO_RTP` STRING, `UL_NO_RTP` STRING, `ERAB_LATENCY` STRING, `RRC_ERAB_LATENCY` STRING, `EVENT_TIME` STRING, `IMSI_KEY` STRING, `IMSI_HASH_KEY` STRING, `UE_ENDC_STAGE` STRING)
+USING r2
+OPTIONS (
+`query_result_partition_cnt_limit` '2000000',
+`query_response_timeout` '1200000',
+`query_result_task_row_cnt_limit` '1000000',
+`host` '90.90.200.187',
+`serialization.format` '1',
+`query_result_total_row_cnt_limit` '2147483647',
+`group_size` '10',
+`port` '18600',
+`mode` 'nvkvs',
+`partitions` 'EVENT_TIME ENB_ID IMSI_KEY IMSI_HASH_KEY',
+`second_filter_enabled` 'no',
+`table` '6010'
+)
+|
++-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+--+
+1 row selected (0.027 seconds)
+
+0: jdbc:hive2://0.0.0.0:13000> select event_time, enb_id, vend_id from aua_ue_rf_sum limit 3;
+Error: com.skt.spark.r2.exception.UnsupportedQueryTypeException: at least one partition column should be included in where predicates; caused by 6010 table. You must check to exist partition column(s) of this table or mistype condition of partition column(s) in where clause. : EVENT_TIME,ENB_ID,IMSI_KEY,IMSI_HASH_KEY (state=,code=0)
+
+0: jdbc:hive2://0.0.0.0:13000> select event_time, enb_id, vend_id from aua_ue_rf_sum where event_time='202006250000' limit 3;
++---------------+---------------------------+----------+--+
+| event_time | enb_id | vend_id |
++---------------+---------------------------+----------+--+
+| 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG |
+| 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG |
+| 202006250000 | +F/Ca+X5UZSATNF0zTR9kA== | ELG |
++---------------+---------------------------+----------+--+
+3 rows selected (0.149 seconds)
+0: jdbc:hive2://0.0.0.0:13000>
+
+

5. Zeppelin

+

Zeppelin 튜토리얼을 진행하기 전, Zeppelin에 Lightning DB 관련 Jar 파일들이 추가하도록 설정하였는지 확인해주세요.

+

아래와 같이 튜토리얼 노트 파일을 내려받거나 아래 URL을 입력하여 Zeppelin에서 불러옵니다.

+
    +
  • 튜토리얼 노트 링크: https://docs.lightningdb.io/scripts/tutorial_ko.json
  • +
+

Import Zeppelin note

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/version-update/index.html b/version-update/index.html new file mode 100644 index 0000000..71bfa6e --- /dev/null +++ b/version-update/index.html @@ -0,0 +1,955 @@ + + + + + + + + + + + + + + + + + + + + + + + + Version update - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

You can update LightningDB by using the 'deploy' command.

+
> c 1 // alias of 'cluster use 1'
+> deploy
+(Watch out) Cluster 1 is already deployed. Do you want to deploy again? (y/n) [n]
+y
+
+

(1) Select installer

+
Select installer
+
+    [ INSTALLER LIST ]
+    (1) lightningdb.release.master.5a6a38.bin
+    (2) lightningdb.trial.master.dbcb9e-dirty.bin
+    (3) lightningdb.trial.master.dbcb9e.bin
+
+Please enter the number, file path or URL of the installer you want to use.
+you can also add a file in list by copy to '$FBPATH/releases/'
+1
+OK, lightningdb.release.master.5a6a38.bin
+
+

(2) Restore

+
Do you want to restore conf? (y/n)
+y
+
+

If the current settings will be reused, type 'y'.

+

(3) Check all settings finally

+
+-----------------+---------------------------------------------------+
+| NAME            | VALUE                                             |
++-----------------+---------------------------------------------------+
+| installer       | lightningdb.release.master.5a6a38.bin             |
+| nodes           | nodeA                                             |
+|                 | nodeB                                             |
+|                 | nodeC                                             |
+|                 | nodeD                                             |
+| master ports    | 18100                                             |
+| slave ports     | 18150-18151                                       |
+| ssd count       | 3                                                 |
+| redis data path | ~/sata_ssd/ssd_                                   |
+| redis db path   | ~/sata_ssd/ssd_                                   |
+| flash db path   | ~/sata_ssd/ssd_                                   |
++-----------------+---------------------------------------------------+
+Do you want to proceed with the deploy accroding to the above information? (y/n)
+y
+Check status of hosts...
++-----------+--------+
+| HOST      | STATUS |
++-----------+--------+
+| nodeA     | OK     |
+| nodeB     | OK     |
+| nodeC     | OK     |
+| nodeD     | OK     |
++-----------+--------+
+Checking for cluster exist...
++------+--------+
+| HOST | STATUS |
++------+--------+
+Backup conf of cluster 1...
+OK, cluster_1_conf_bak_<time-stamp>
+Backup info of cluster 1 at nodeA...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeB...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeC...
+OK, cluster_1_bak_<time-stamp>
+Backup info of cluster 1 at nodeD...
+OK, cluster_1_bak_<time-stamp>
+Transfer installer and execute...
+ - nodeA
+ - nodeB
+ - nodeC
+ - nodeD
+Sync conf...
+Complete to deploy cluster 1.
+Cluster 1 selected.
+
+
    +
  • Backup path of cluster: ${base-directory}/backup/cluster_${cluster-id}_bak_${time-stamp}
  • +
  • Backup path of conf files: $FBAPTH/conf_backup/cluster_${cluster-id}_conf_bak_${time-stamp}
  • +
+

(4) Restart

+
> cluster restart
+
+

After the restart, the new version will be applied.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/why-k8s/index.html b/why-k8s/index.html new file mode 100644 index 0000000..aa2bc21 --- /dev/null +++ b/why-k8s/index.html @@ -0,0 +1,967 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + Why Kubernetes? - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Why Kubernetes?

+

Legacy system

+
    +
  • As we work with multiple companies/organizations, deployment environments have diversified to cloud, Kubernetes, on-premise, etc. and OS requirements have also diversified, requiring us to be agile and responsive.
  • +
+

curr-build-env

+

On Kubernetes

+
    +
  • We built these environments to help you create versions in a consistently optimized environment across different environments.
  • +
+

deployment-env

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/zeppelin-example-face-data/index.html b/zeppelin-example-face-data/index.html new file mode 100644 index 0000000..5f72f77 --- /dev/null +++ b/zeppelin-example-face-data/index.html @@ -0,0 +1,965 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + KNN SEARCH - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

In Zeppelin, you can import the Face data file with below link.

+

KNN_SEARCH_TEST.json

+

1. Put csv files into HDFS

+
hdfs dfs -mkdir /face_data
+
+hdfs dfs -put /face_data/csv/* /face_data
+
+

2. Load face data

+
%spark
+import org.apache.spark.sql.functions._
+import org.apache.spark.sql.r2.UDF.R2UDFs
+
+val toFloatArray = udf(R2UDFs.toFloatArray)
+val fnvHash = udf(R2UDFs.fnvHash)
+
+R2UDFs.register()
+val r2Option = Map("table" -> "300",
+      "host" -> "d201",
+      "port" -> "18400",
+      "partitions" -> "nvhash",
+      "mode" -> "nvkvs",
+      "at_least_one_partition_enabled" -> "no")
+
+spark.sqlContext.read.format("csv")
+  .option("sep", "|")
+  .option("header", "false")
+  .option("inferSchema", "true")
+  .load("/face_data/csv/*.csv")
+  .withColumnRenamed("_c0", "group")
+  .withColumnRenamed("_c1", "subgroup")
+  .withColumnRenamed("_c2", "subject")
+  .withColumnRenamed("_c3", "face")
+  .withColumnRenamed("_c4", "raw_feature")
+  .withColumn("feature", toFloatArray(split(col("raw_feature"), ",")))
+  .select("group", "subgroup", "subject", "face", "feature")
+  .withColumn("nvhash", fnvHash(col("face"), functions.lit(30)))
+  .write
+  .format("r2")
+  .options(r2Options)
+  .mode("append")
+  .save()
+
+

3. Create table

+
%spark
+import org.apache.spark.sql.types._
+import org.apache.spark.sql.r2.UDF.R2UDFs
+
+val fields = "group_id,app_id,subject_id,face_id,feature,nvhash".split(",").map(
+  fieldName => {
+    if ("feature".equals(fieldName)) {
+      StructField(fieldName, ArrayType(FloatType))
+    } else {
+      StructField(fieldName, StringType)
+    }
+  }
+)
+val faceTableSchema = StructType(fields)
+
+
+spark.sqlContext.read.format("r2")
+  .schema(faceTableSchema)
+  .options(r2Option)
+  .load()
+  .createOrReplaceTempView("face_1m")
+
+

4. Enable ‘KNN SEARCH PUSHDOWN’ feature

+
%sql
+SET spark.r2.knn.pushdown=true
+
+

5. KNN search with using the udf(cosineDistance) of Lightning DB

+
%sql
+SELECT group_id, app_id, subject_id, face_id, cosineDistance(feature, toFloatArray(array(0.04074662,0.07717144,-0.01387950,0.01287790,0.04414229,0.03390900,0.03808868,0.03956917,0.00592308,-0.00935156,0.04414903,-0.01830893,-0.01918902,0.00154574,-0.02383651,-0.01054291,0.12655860,0.02075430,0.10315673,0.01371782,0.01522089,0.04304991,0.03376650,0.06703991,0.03827063,-0.00063873,0.02516229,0.07061137,0.08456459,-0.04103030,0.03004974,0.00297395,0.00295535,0.01112351,0.02805021,0.04350155,-0.00448326,0.04780317,0.10815978,-0.01784242,0.03320745,0.02912348,0.00183310,0.05318154,0.00922967,-0.04507693,0.01333585,0.00048346,-0.04612860,0.00427735,0.01232839,-0.00100568,0.03865110,0.01765136,-0.00942758,0.02383475,-0.01068696,0.08959154,0.08527020,0.03379998,-0.03852739,0.00607160,0.01309861,-0.01262910,0.00418265,0.03614477,-0.02971224,0.03703139,0.04333942,-0.03143747,0.06752674,-0.02173617,0.03583429,0.07731125,-0.02637132,-0.00395790,-0.04447101,0.03351297,0.08787052,0.00647665,0.03660145,-0.00640601,-0.01004024,0.00763107,0.04762240,-0.00068071,0.00863653,0.06126453,0.04588475,-0.03891076,0.07472295,0.02470182,0.08828569,0.01660202,0.02419317,0.09363404,0.05495216,0.01202847,0.00120040,-0.02136896,0.03100559,0.07371868,0.00986731,0.03934553,0.01289396,0.04705510,-0.02040175,0.01501585,0.00678832,0.03882410,0.02261387,0.02165552,-0.05097445,0.00240807,-0.04210005,0.00760698,-0.02904095,0.06572093,0.03549200,0.06070529,0.06948626,0.02832109,0.01669887,0.00914011,-0.00024529,-0.00124402,0.06481186,0.08246713,0.07499877,0.13112830,0.01034968,0.04224777,0.01175614,0.07395388,0.04937797,0.01561183,-0.03251049,0.05449009,0.04767901,-0.01149555,-0.02055555,-0.05990825,0.06633005,0.07592525,-0.04504610,0.03348448,0.04178635,0.01327751,0.02208084,0.08780535,-0.00799043,0.02236966,0.01560906,0.01171102,0.00814554,-0.00257578,0.08387835,-0.01018093,-0.02170087,0.03230520,0.00955880,-0.01824543,0.05438962,0.01805668,0.02112979,-0.01372666,-0.01057472,0.05453142,0.03742066,0.05534794,0.00977020,0.01991821,-0.00884413,0.09644359,0.02875310,0.10519003,0.05280351,-0.01918891,0.03197290,0.02722778,0.03450845,0.02669794,0.08618007,0.09387484,0.05103674,-0.01431658,0.00783211,-0.00434245,0.02062620,-0.00611403,0.06696083,0.01333337,-0.00156842,0.04325287,-0.05481976,0.01642864,-0.02679648,-0.00642413,0.03808333,0.06134293,0.06049823,0.03818581,0.03599750,-0.01651556,0.06601544,0.01385061,0.00730943,0.03045858,-0.00200028,0.04009718,0.04393080,-0.02568381,-0.01271287,-0.01860873,0.03669106,0.00154059,-0.04202117,0.07374570,-0.00380450,0.03164477,0.00637422,-0.02361638,0.01918917,0.01680134,0.01346881,0.02424449,-0.00504802,-0.06241146,0.08241857,0.02817723,0.02132487,0.08051144,0.06332499,0.02585857,-0.04057337,0.00279212,-0.00005161,-0.06566417,0.07860317,-0.01276221,0.06822366,-0.00191142,0.08534018,0.06014366,0.07053877,-0.01962799,0.08602677,-0.00817098,0.00302233,-0.10041475,-0.01908947,0.03235617,0.00931559,0.05451865,0.02233902,-0.01173994))) AS distance
+FROM face_1m
+ORDER BY distance DESC
+LIMIT 20
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/zeppelin-example-nyctaxi/index.html b/zeppelin-example-nyctaxi/index.html new file mode 100644 index 0000000..e591773 --- /dev/null +++ b/zeppelin-example-nyctaxi/index.html @@ -0,0 +1,1037 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + NYC TAXI Benchmark - Lightning DB Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

In Zeppelin, you can import the NYC TAXI Benchmark file with below link.

+

NYC_TAXI_BM_load_and_query.json

+

1. Put csv files into HDFS

+
%sh
+hdfs dfs -mkdir /nyc_taxi
+
+hdfs dfs -mkdir /nyc_taxi/csv
+
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv1/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv2/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv3/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv4/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv5/* /nyc_taxi/csv
+hdfs dfs -put /nvme_ssd/nyc_taxi/csv_gz/csv6/* /nyc_taxi/csv
+
+

2. Create dataframe and load data

+
%spark
+
+import org.apache.spark.sql.types._
+
+val taxiSchema = StructType(Array(
+        StructField("trip_id", IntegerType, true),
+        StructField("vendor_id", StringType, true),
+        StructField("pickup_datetime", TimestampType, true),
+        StructField("dropoff_datetime", TimestampType, true),
+        StructField("store_and_fwd_flag", StringType, true),
+        StructField("rate_code_id", IntegerType, true),
+        StructField("pickup_longitude", DoubleType, true),
+        StructField("pickup_latitude", DoubleType, true),
+        StructField("dropoff_longitude", DoubleType, true),
+        StructField("dropoff_latitude", DoubleType, true),
+        StructField("passenger_count", StringType, true),
+        StructField("trip_distance", DoubleType, true),
+        StructField("fare_amount", DoubleType, true),
+        StructField("extra", DoubleType, true),
+        StructField("mta_tax", DoubleType, true),
+        StructField("tip_amount", DoubleType, true),
+        StructField("tolls_amount", DoubleType, true),
+        StructField("improvement_surcharge", DoubleType, true),
+        StructField("total_amount", DoubleType, true),
+        StructField("payment_type", StringType, true),
+        StructField("trip_type", IntegerType, true),
+        StructField("cab_type", StringType, true),
+        StructField("precipitation", DoubleType, true),
+        StructField("snow_depth", DoubleType, true),
+        StructField("snowfall", DoubleType, true),
+        StructField("max_temperature", IntegerType, true),
+        StructField("min_temperature", IntegerType, true),
+        StructField("average_wind_speed", DoubleType, true),
+        StructField("pickup_nyct2010_gid", IntegerType, true),
+        StructField("pickup_ctlabel", StringType, true),
+        StructField("pickup_borocode", IntegerType, true),
+        StructField("pickup_boroname", StringType, true),
+        StructField("pickup_ct2010", StringType, true),
+        StructField("pickup_boroct2010", StringType, true),
+        StructField("pickup_cdeligibil", StringType, true),
+        StructField("pickup_ntacode", StringType, true),
+        StructField("pickup_ntaname", StringType, true),
+        StructField("pickup_puma", StringType, true),
+        StructField("dropoff_nyct2010_gid", IntegerType, true),
+        StructField("dropoff_ctlabel", StringType, true),
+        StructField("dropoff_borocode", IntegerType, true),
+        StructField("dropoff_boroname", StringType, true),
+        StructField("dropoff_ct2010", IntegerType, true),
+        StructField("dropoff_boroct2010", StringType, true),
+        StructField("dropoff_cdeligibil", StringType, true),
+        StructField("dropoff_ntacode", StringType, true),
+        StructField("dropoff_ntaname", StringType, true),
+        StructField("dropoff_puma", StringType, true)
+    ))
+
+    val taxiDF = spark.read.format("csv")
+                .option("header", "false")
+                .option("delimiter", ",")
+                .option("mode", "FAILFAST")
+                .schema(taxiSchema)
+                .load("/nyc_taxi/csv/*.csv.gz")
+
+
+

4. Create temp view for the dataframe

+
%spark
+taxiDF.createOrReplaceTempView("trips")
+
+

5. Transform the dataframe for Lightning DB

+
%spark
+import org.apache.spark.sql.functions._
+val deltaDf = taxiDF
+    .filter($"pickup_datetime".isNotNull && $"passenger_count".isNotNull && $"cab_type".isNotNull)
+    .withColumn("pickup_yyyyMMddhh", from_unixtime(unix_timestamp($"pickup_datetime"),  "yyyyMMddhh"))
+    .withColumn("round_trip_distance", round($"trip_distance"))
+
+deltaDf.printSchema()
+
+

6. Create temp view for Lightning DB with r2 options those support Lightning DB as the data source

+
%spark
+val r2Options = Map[String, String]("table" -> "100",
+      "host" -> "192.168.111.35",
+      "port" -> "18800",
+      "partitions" -> "pickup_yyyyMMddhh passenger_count cab_type",
+      "mode" -> "nvkvs",
+      "rowstore" -> "false",
+      "group_size" -> "40",
+      "at_least_one_partition_enabled" -> "no")
+spark.sqlContext.read.format("r2").schema(deltaDf.schema).options(r2Options).load().createOrReplaceTempView("fb_trips")
+
+

7. Load data from the dataframe into Lightning DB

+
%spark
+deltaDf.write
+    .format("r2")
+    .insertInto("fb_trips")
+
+

8. Enable ‘aggregation pushdown’ feature

+
SET spark.r2.aggregation.pushdown=true
+
+

9. Do ‘NYC TAXI Benchmark’

+

Q1

+
%sql
+SELECT cab_type, count(*) FROM fb_trips GROUP BY cab_type
+
+

Q2

+
%sql
+SELECT passenger_count,
+       avg(total_amount)
+FROM fb_trips
+GROUP BY passenger_count
+
+

Q3

+
%sql
+SELECT passenger_count,
+       substring(pickup_yyyyMMddhh, 1, 4),
+       count(*)
+FROM fb_trips
+GROUP BY passenger_count, 
+         substring(pickup_yyyyMMddhh, 1, 4)
+
+

Q4

+
%sql
+SELECT passenger_count,
+       substring(pickup_yyyyMMddhh, 1, 4),
+       round_trip_distance,
+       count(*)
+FROM fb_trips
+GROUP BY 1,
+         2,
+         3
+ORDER BY 2,
+         4 desc
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file