This repository has been archived by the owner on Aug 22, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 3
/
MudletPackageManager.xml
2707 lines (2285 loc) · 92.9 KB
/
MudletPackageManager.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE MudletPackage>
<MudletPackage version="1.001">
<TriggerPackage />
<TimerPackage />
<AliasPackage>
<AliasGroup isActive="yes" isFolder="yes">
<name>mkpg</name>
<script></script>
<command></command>
<packageName></packageName>
<regex></regex>
<Alias isActive="yes" isFolder="no">
<name>mpkg help</name>
<script>mpkg.functions.printHelp()
</script>
<command></command>
<packageName></packageName>
<regex>^mpkg help$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg repo add</name>
<script>mpkg.functions.addRepo(matches[2], matches[3], matches[4])</script>
<command></command>
<packageName></packageName>
<regex>^mpkg repo add (.+?) (.+?) (.*)$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg repo list</name>
<script>mpkg.functions.listRepos()</script>
<command></command>
<packageName></packageName>
<regex>^mpkg repo list$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg repo info</name>
<script>mpkg.functions.repoInfo(matches[2])</script>
<command></command>
<packageName></packageName>
<regex>^mpkg repo info (.+)$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg repo remove</name>
<script>mpkg.functions.deleteRepo(matches[2])</script>
<command></command>
<packageName></packageName>
<regex>^mpkg repo remove (.+)$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg update</name>
<script>mpkg.functions.update()</script>
<command></command>
<packageName></packageName>
<regex>^mpkg update$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg package list</name>
<script>mpkg.functions.packageList()</script>
<command></command>
<packageName></packageName>
<regex>^mpkg package list$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg package info</name>
<script>mpkg.functions.packageInfo(matches[2])</script>
<command></command>
<packageName></packageName>
<regex>^mpkg package info (.+)$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg install</name>
<script>mpkg.functions.install(matches[2])</script>
<command></command>
<packageName></packageName>
<regex>^mpkg install ([^\s]+?)$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg uninstall</name>
<script>mpkg.functions.uninstall(matches[2])</script>
<command></command>
<packageName></packageName>
<regex>^mpkg uninstall ([^\s]+)$</regex>
</Alias>
<Alias isActive="yes" isFolder="no">
<name>mpkg upgrade</name>
<script>mpkg.functions.upgrade()</script>
<command></command>
<packageName></packageName>
<regex>^mpkg upgrade$</regex>
</Alias>
</AliasGroup>
</AliasPackage>
<ActionPackage />
<ScriptPackage>
<ScriptGroup isActive="yes" isFolder="yes">
<name>mpkg</name>
<packageName></packageName>
<script>-------------------------------------------------
-- Put your Lua functions here. --
-- --
-- Note that you can also use external Scripts --
-------------------------------------------------
</script>
<eventHandlerList />
<Script isActive="yes" isFolder="no">
<name>patch DB</name>
<packageName></packageName>
<script>-------------------------------------------------
-- Put your Lua functions here. --
-- --
-- Note that you can also use external Scripts --
-------------------------------------------------
-- If we don't have qzery_by_example, we need to patch DB.lua as there were a lot of bugs.
if not db.query_by_example then
-- the timestamp is stored in UTC time, so work out the difference in seconds
-- from local to UTC time. Credit: https://github.com/stevedonovan/Penlight/blob/master/lua/pl/Date.lua#L85
function datetime:calculate_UTCdiff(ts)
local date, time = os.date, os.time
local utc = date('!*t',ts)
local lcl = date('*t',ts)
lcl.isdst = false
return os.difftime(time(lcl), time(utc))
end
-- NOT LUADOC
-- The rex.match function does not return named patterns even if you use named capture
-- groups, but the r:tfind does -- but this only operates on compiled patterns. So,
-- we are caching the conversion of 'simple format' date patterns into a regex, and
-- then compiling them.
function datetime:_get_pattern(format)
if not datetime._pattern_cache[format] then
local fmt = rex.gsub(format, "(%[A-Za-z])",
function(m)
return datetime._directives[m] or m
end
)
datetime._pattern_cache[format] = rex.new(fmt, rex.flags().CASELESS)
end
return datetime._pattern_cache[format]
end
--- Parses the specified source string, according to the format if given, to return a representation of
--- the date/time. The default format if not specified is: "^%Y-%m-%d %H:%M:%S$" <br/><br/>
---
--- If as_epoch is provided and true, the return value will be a Unix epoch -- the number
--- of seconds since 1970. This is a useful format for exchanging date/times with other systems. If as_epoch
--- is false, then a Lua time table will be returned. Details of the time tables are provided
--- in the http://www.lua.org/pil/22.1.html. <br/><br/>
---
--- Supported Format Codes
--- </pre>
--- %b Abbreviated Month Name
--- %B Full Month Name
--- %d Day of Month
--- %H Hour (24-hour format)
--- %I Hour (12-hour format, requires %p as well)
--- %p AM or PM
--- %m 2-digit month (01-12)
--- %M 2-digit minutes (00-59)
--- %S 2-digit seconds (00-59)
--- %y 2-digit year (00-99), will automatically prepend 20 so 10 becomes 2010 and not 1910.
--- %Y 4-digit year.
--- </pre>
function datetime:parse(source, format, as_epoch)
if not format then
format = "^%Y-%m-%d %H:%M:%S$"
end
local fmt = datetime:_get_pattern(format)
local m = {fmt:tfind(source)}
if m and m[3] then
m = m[3]
dt = {}
if m.year_half then
dt.year = tonumber("20"..m.year_half)
elseif m.year_full then
dt.year = tonumber(m.year_full)
end
if m.month then
dt.month = tonumber(m.month)
elseif m.month_name then
dt.month = datetime._month_names[m.month_name:lower()]
elseif m.abbrev_month_name then
dt.month = datetime._abbrev_month_names[m.abbrev_month_name:lower()]
end
dt.day = m.day_of_month
if m.hour_12 then
assert(m.ampm, "You must use %p (AM|PM) with %I (12-hour time)")
if m.ampm == "PM" then
dt.hour = 12 + tonumber(m.hour_12)
else
dt.hour = tonumber(m.hour_12)
end
else
dt.hour = tonumber(m.hour_24)
end
dt.min = tonumber(m.minute)
dt.sec = tonumber(m.second)
dt.isdst = false
if as_epoch then
return os.time(dt)
else
return dt
end
else
return nil
end
end
-----------------------------------------------------------------------------
-- The database wrapper library
-----------------------------------------------------------------------------
if package.loaded["luasql.sqlite3"] then luasql = require "luasql.sqlite3" end
db = {}
db.__autocommit = {}
db.__schema = {}
db.__conn = {}
db.debug_sql = false
-- NOT LUADOC
-- Converts the type of a lua object to the equivalent type in SQL
function db:_sql_type(value)
local t = type(value)
if t == "number" then
return "REAL"
elseif t == "nil" then
return "NULL"
elseif t == "table" and value._timestamp ~= nil then
return "INTEGER"
else
return "TEXT"
end
end
-- NOT LUADOC
-- Converts a data value in Lua to its SQL equivalent; notably it will also escape single-quotes to
-- prevent inadvertant SQL injection.
-- called when generating the schema
function db:_sql_convert(value)
local t = db:_sql_type(value)
if value == nil then
return "NULL"
elseif t == "TEXT" and type(value) == "string" then
return '"'..value:gsub("'", "''")..'"'
elseif t == "NULL" then
return "NULL"
elseif t == "INTEGER" then
-- With db.Timestamp's, a value of false should be interpreted as nil.
if value._timestamp == false then
return "NULL"
end
return tostring(value._timestamp)
else
return tostring(value)
end
end
-- NOT LUADOC
-- Given a sheet name and the details of an index, this function will return a unique index name to
-- add to the database. The purpose of this is to create unique index names as indexes are tested
-- for existance on each call of db:create and not only on creation. That way new indexes can be
-- added after initial creation.
function db:_index_name(tbl_name, params)
local t = type(params)
if t == "string" then
return "idx_" .. tbl_name .. "_c_" .. params
elseif assert(t == "table", "Indexes must be either a string or a table.") then
local parts = {"idx", tbl_name, "c"}
for _, v in pairs(params) do
parts[#parts+1] = v
end
return table.concat(parts, "_")
end
end
-- NOT LUADOC
-- This function returns true if all of the columns referenced in index_columns also exist within
-- the sheet_columns table array. The purpose of this is to raise an error if someone tries to index
-- a column which doesn't currently exist in the schema.
function db:_index_valid(sheet_columns, index_columns)
if type(index_columns) == "string" then
if sheet_columns[index_columns] ~= nil then
return true
else
return false
end
else
for _, v in ipairs(index_columns) do
if sheet_columns[v] == nil then
db:echo_sql("\n--> Bad index "..v)
return false
end
end
end
return true
end
-- NOT LUADOC
-- The column_spec is either a string or an indexed table. This function returns either "column" or
-- "column1", "column2" for use in the column specification of INSERT.
function db:_sql_columns(value)
local colstr = ''
local t = type(value)
if t == "table" then
col_chunks = {}
for _, v in ipairs(value) do
-- see https://www.sqlite.org/syntaxdiagrams.html#ordering-term
if v:lower() == "desc" or v:lower() == "asc" then
col_chunks[#col_chunks] = col_chunks[#col_chunks] .. " " .. v
else
col_chunks[#col_chunks+1] = '"'..v:lower()..'"'
end
end
colstr = table.concat(col_chunks, ',')
elseif assert(t == "string",
"Must specify either a table array or string for index, not "..type(value)) then
colstr = '"'..value:lower()..'"'
end
return colstr
end
-- NOT LUADOC
-- This serves as a very similar function to db:_sql_columns, quoting column names properly but for
-- uses outside of INSERTs.
function db:_sql_fields(values)
local sql_fields = {}
for k, v in pairs(values) do
sql_fields[#sql_fields+1] = '"'..k..'"'
end
return "("..table.concat(sql_fields, ",")..")"
end
-- NOT LUADOC
-- This quotes values to be passed into an INSERT or UPDATE operation in a SQL list. Meaning, it turns
-- {x="this", y="that", z=1} into ('this', 'that', 1).
-- It is intelligent with data-types; strings are automatically quoted (with internal single quotes
-- escaped), nil turned into NULL, timestamps converted to integers, and such.
function db:_sql_values(values)
local sql_values = {}
for k, v in pairs(values) do
local t = type(v)
local s = ""
if t == "string" then
s = "'"..v:gsub("'", "''").."'"
elseif t == "nil" then
s = "NULL"
elseif t == "table" and t._timestamp ~= nil then
if not t._timestamp then
return "NULL"
else
s = "datetime('"..t._timestamp.."', 'unixepoch')"
end
else
s = tostring(v)
end
sql_values[#sql_values+1] = s
end
return "("..table.concat(sql_values, ",")..")"
end
--- <b><u>TODO</u></b> db:safe_name(name)
-- On a filesystem level, names are restricted to being alphanumeric only. So, "my_database" becomes
-- "mydatabase", and "../../../../etc/passwd" becomes "etcpasswd". This prevents any possible
-- security issues with database names.
function db:safe_name(name)
name = name:gsub("[^%ad]", "")
name = name:lower()
return name
end
--- Creates and/or modifies an existing database. This function is safe to define at a top-level of a Mudlet
--- script: in fact it is reccommended you run this function at a top-level without any kind of guards.
--- If the named database does not exist it will create it. If the database does exist then it will add
--- any columns or indexes which didn't exist before to that database. If the database already has all the
--- specified columns and indexes, it will do nothing. <br/><br/>
---
--- The database will be called Database_<sanitized database name>.db and will be stored in the
--- Mudlet configuration directory. <br/><br/>
---
--- Database 'tables' are called 'sheets' consistently throughout this documentation, to avoid confusion
--- with Lua tables. <br/><br/>
---
--- The schema table must be a Lua table array containing table dictionaries that define the structure and
--- layout of each sheet. <br/><br/>
---
--- For sheets with unique indexes, you may specify a _violations key to indicate how the db layer handle
--- cases where the unique index is violated. The options you may use are:
--- <pre>
--- FAIL - the default. A hard error is thrown, cancelling the script.
--- IGNORE - The command that would add a record that violates uniqueness just fails silently.
--- REPLACE - The old record which matched the unique index is dropped, and the new one is added to replace it.
--- </pre>
---
--- @usage Example bellow will create a database with two sheets; the first is kills and is used to track every successful kill,
--- with both where and when the kill happened. It has one index, a compound index tracking the combination of name and area.
--- The second sheet has two indexes, but one is unique: it isn't possible to add two items to the enemies sheet with the same name.
--- <pre>
--- local mydb = db:create("combat_log",
--- {
--- kills = {
--- name = "",
--- area = "",
--- killed = db:Timestamp("CURRENT_TIMESTAMP"),
--- _index = {{"name", "area"}}
--- },
--- enemies = {
--- name = "",
--- city = "",
--- reason = "",
--- enemied = db:Timestamp("CURRENT_TIMESTAMP"),
--- _index = { "city" },
--- _unique = { "name" },
--- _violations = "IGNORE"
--- }
--- }
--- )
--- </pre>
--- Note that you have to use double {{ }} if you have composite index/unique constrain.
function db:create(db_name, sheets)
if not db.__env then
db.__env = luasql.sqlite3()
end
db_name = db:safe_name(db_name)
if not db.__conn[db_name] then
db.__conn[db_name] = db.__env:connect(getMudletHomeDir() .. "/Database_" .. db_name .. ".db")
db.__conn[db_name]:setautocommit(false)
db.__autocommit[db_name] = true
end
db.__schema[db_name] = {}
-- We need to separate the actual column configuration from the meta-configuration of the desired
-- sheet. {sheet={"column"}} verses {sheet={"column"}, _index={"column"}}. In the former we are
-- creating a database with a single field; in the latter we are also adding an index on that
-- field. The db package reserves any key that begins with an underscore to be special and syntax
-- for its own use.
for s_name, sht in pairs(sheets) do
options = {}
if sht[1] ~= nil then -- in case the sheet was provided in the sheet = {"column1", "column2"} format:
local t = {} -- assume field types are text, and should default to ""
for k, v in pairs(sht) do
t[v] = ""
end
sht = t
else -- sheet provided in the sheet = {"column1" = default} format
local opts = {}
for k, v in pairs(sht) do
if string.starts(k, "_") then
options[k] = v
opts[#opts + 1] = k
end
end
for _, v in ipairs(opts) do
sht[v] = nil
end
end
if not options._violations then
options._violations = "FAIL"
end
db.__schema[db_name][s_name] = {columns=sht, options=options}
db:_migrate(db_name, s_name)
end
return db:get_database(db_name)
end
-- NOT LUADOC
-- The migrate function is meant to upgrade an existing database live, to maintain a consistant
-- and correct set of sheets and fields, along with their indexes. It should be safe to run
-- at any time, and must not cause any data loss. It simply adds to what is there: in perticular
-- it is not capable of removing indexes, columns, or sheets after they have been defined.
function db:_migrate(db_name, s_name)
local conn = db.__conn[db_name]
local schema = db.__schema[db_name][s_name]
local current_columns = {}
-- The PRAGMA table_info command is a query which returns all of the columns currently
-- defined in the specified table. The purpose of this section is to see if any new columns
-- have been added.
local cur = conn:execute("PRAGMA table_info('"..s_name.."')") -- currently broken - LuaSQL bug, needs to be upgraded for new sqlite API
if type(cur) ~= "number" then
local row = cur:fetch({}, "a")
if row then
while row do
current_columns[row.name] = row.type
row = cur:fetch({}, "a")
end
else
--------------- GETS ALL COLUMNS FROM SHEET IF IT EXISTS
db:echo_sql("SELECT * FROM "..s_name)
local get_sheet_cur = conn:execute("SELECT * FROM "..s_name) -- select the sheet
if get_sheet_cur and get_sheet_cur ~= 0 then
local row = get_sheet_cur:fetch({}, "a") -- grab the first row, if any
if not row then -- if no first row then
local tried_cols, contains, found_something, col = {}, table.contains, false
while not found_something do -- guarded by the error below from infinite looping
col = false
for k,v in pairs(schema.columns) do -- look through sheet schema to find the first column that is text
if type(k) == "number" then
if string.sub(v,1,1) ~= "_" and not contains(tried_cols, v) then col = v break end
else
if string.sub(k,1,1) ~= "_" and type(v) == "string" and not contains(tried_cols, k) then col = k break end
end
end
if not col then error("db:_migrate: cannot find a suitable column for testing a new row with.") end
-- add row with found column set as "test"
db:add({_db_name = db_name, _sht_name = s_name},{[col] = "test"})
db:echo_sql("SELECT * FROM "..s_name)
local get_row_cur = conn:execute("SELECT * FROM "..s_name) -- select the sheet
row = get_row_cur:fetch({}, "a") -- grab the newly created row
get_row_cur:close()
-- delete the newly created row. If we picked a row that doesn't exist yet and we're
-- trying to add, the delete will fail - remember this, and try another row
local worked, msg = pcall(db.delete, db, {_db_name = db_name, _sht_name = s_name},db:eq({database = db_name, sheet = s_name, name = col, type = "string"},"test"))
if not worked then
tried_cols[#tried_cols+1] = col
else
found_something = true
end
end
end
if row then -- add each column from row to current_columns table
for k,v in pairs(row) do
current_columns[k] = ""
end
end
get_sheet_cur:close()
end
end
end
if type(cur) == "userdata" then
cur:close()
end
-- The SQL definition of a column is:
-- "column_name" column_type NULL
-- The db module does not presently support columns that are required. Everything is optional,
-- everything may be NULL / nil.
-- If you specify a column's type, you also specify its default value.
if table.is_empty(current_columns) then
-- At this point, we know that the specified table does not exist in the database and so we
-- should create it.
-- Every sheet has an implicit _row_id column. It is not presently (and likely never will be)
-- supported to define the primary key of any sheet.
local sql = db:_build_create_table_sql(schema, s_name)
db:echo_sql(sql)
conn:execute(sql)
else
-- At this point we know that the sheet already exists, but we are concerned if the current
-- definition includes columns which may be added.
local missing = {}
for k, v in pairs(schema.columns) do
-- Here we test it a given column exists in the sheet already, and if not, we add that
-- column.
if not current_columns[k] then
missing[#missing + 1] = { name = k, default = v }
end
end
if #missing > 0 and
table.size(current_columns) + #missing == table.size(schema.columns)+1
-- We have changes and when we did those changes, we have exactly
-- the number of columns we need. The "+1" is for the _row_id
-- which is not in the schema.
then
local sql_add = 'ALTER TABLE %s ADD COLUMN "%s" %s NULL DEFAULT %s'
for _, v in ipairs(missing) do
local t = db:_sql_type(v.default)
local def = db:_sql_convert(v.default)
local sql = sql_add:format(s_name, v.name, t, def)
conn:execute(sql)
db:echo_sql(sql)
end
elseif
#missing + table.size(current_columns) > table.size(schema.columns) + 1
-- if we add all missing columns and we have more columns than we want
-- then there are currently some columns we don't want anymore.
then
local get_create = "SELECT sql FROM sqlite_master " ..
"WHERE type = 'table' AND " ..
"name = '" .. s_name .."'"
local ret_str
cur, ret_str = conn:execute(get_create)
assert(cur, ret_str)
if type(cur) ~= "number" then
local row = cur:fetch({}, "a");
cur:close()
local create_tmp = row.sql:gsub(s_name, s_name .. "_bak")
local sql_chunks = {}
local fields = { "_row_id" }
local sql
create_tmp = create_tmp:gsub("TABLE", "TEMPORARY TABLE")
for k, _ in pairs(schema.columns) do
fields[#fields + 1] = string.format('"%s"', k)
end
local fields_sql = table.concat(fields, ", ")
sql_chunks[#sql_chunks + 1] = create_tmp .. ";"
sql_chunks[#sql_chunks + 1] = "INSERT INTO " .. s_name .. "_bak " ..
"SELECT * FROM " .. s_name .. ";"
sql_chunks[#sql_chunks + 1] = "DROP TABLE " .. s_name .. ";"
sql_chunks[#sql_chunks + 1] = db:_build_create_table_sql(schema,
s_name) .. ";"
sql_chunks[#sql_chunks + 1] = string.format(
"INSERT INTO %s SELECT %s FROM %s_bak;", s_name, fields_sql,
s_name)
sql_chunks[#sql_chunks + 1] = "DROP TABLE " .. s_name .. "_bak;"
for _, sql in ipairs(sql_chunks) do
db:echo_sql(sql)
local ret, str = conn:execute(sql)
assert(ret, str)
end
end
end
end
-- On every invocation of db:create we run the code that creates indexes, as that code will
-- do nothing if the specific indexes already exist. This is enforced by the db:_index_name
-- function creating a unique index.
--
-- Note that in no situation will an existing index be deleted.
-- make up current_columns, as pragma_info currently does not populate it, due to luasql bug
for key, value in pairs(schema.columns) do
current_columns[key] = db:_sql_type(value)
end
db:_migrate_indexes(conn, s_name, schema, current_columns)
db:echo_sql("COMMIT")
conn:commit()
conn:execute("VACUUM")
end
function db:_build_create_table_sql(schema, s_name)
local sql_column = ', "%s" %s NULL'
local sql_column_default = sql_column..' DEFAULT %s'
local sql_chunks = {"CREATE TABLE ", s_name, '("_row_id" INTEGER PRIMARY KEY AUTOINCREMENT'}
-- We iterate over every defined column, and add a line which creates it.
for key, value in pairs(schema.columns) do
local sql = ""
if value == nil then
sql = sql_column:format(key, db:_sql_type(value))
else
sql = sql_column_default:format(key, db:_sql_type(value), db:_sql_convert(value))
end
if (type(schema.options._unique) == "table" and table.contains(schema.options._unique, key))
or (type(schema.options._unique) == "string" and schema.options._unique == key) then
sql = sql .. " UNIQUE"
end
sql_chunks[#sql_chunks+1] = sql
end
sql_chunks[#sql_chunks+1] = ")"
return table.concat(sql_chunks, "")
end
-- NOT LUADOC
-- Creates any indexes which do not yet exist in the given database.
function db:_migrate_indexes(conn, s_name, schema, current_columns)
local sql_create_index = "CREATE %s IF NOT EXISTS %s ON %s (%s);"
local opt = {_unique = "UNIQUE INDEX", _index = "INDEX"} -- , _check = "CHECK"}
for option_type, options in pairs(schema.options) do
if option_type == "_unique" or option_type == "_index" then
for _, value in pairs(options) do
-- If an index references a column which does not presently exist within the schema
-- this will fail.
if db:_index_valid(current_columns, value) then
--assert(db:_index_valid(current_columns, value),
-- "In sheet "..s_name.." an index field is specified that does not exist.")
local sql = sql_create_index:format(
opt[option_type], db:_index_name(s_name, value), s_name, db:_sql_columns(value)
)
db:echo_sql(sql)
conn:execute(sql)
end
end
end
end
end
--- Adds one or more new rows to the specified sheet. If any of these rows would violate a UNIQUE index,
--- a lua error will be thrown and execution will cancel. As such it is advisable that if you use a UNIQUE
--- index, you test those values before you attempt to insert a new row. <br/><br/>
---
--- Each table is a series of key-value pairs to set the values of the sheet, but if any keys do not exist
--- then they will be set to nil or the default value. As you can see, all fields are optional.
---
--- @usage Adding one record.
--- <pre>
--- db:add(mydb.enemies, {name="Bob Smith", city="San Francisco"})
--- </pre>
--- @usage Adding multiple records.
--- <pre>
--- db:add(mydb.enemies,
--- {name="John Smith", city="San Francisco"},
--- {name="Jane Smith", city="San Francisco"},
--- {name="Richard Clark"}
--- )
--- </pre>
function db:add(sheet, ...)
local db_name = sheet._db_name
local s_name = sheet._sht_name
assert(s_name, "First argument to db:add must be a proper Sheet object.")
local conn = db.__conn[db_name]
local sql_insert = "INSERT OR %s INTO %s %s VALUES %s"
for _, t in ipairs({...}) do
if t._row_id then
-- You are not permitted to change a _row_id
t._row_id = nil
end
local sql = sql_insert:format(db.__schema[db_name][s_name].options._violations, s_name, db:_sql_fields(t), db:_sql_values(t))
db:echo_sql(sql)
local result, msg = conn:execute(sql)
if not result then return nil, msg end
end
if db.__autocommit[db_name] then
conn:commit()
end
return true
end
--- Execute SQL select query against database. This only useful for some very specific cases. <br/>
--- Use db:fetch if possible instead - this function should not be normally used!
---
--- @release post Mudlet 1.1.1 (<b><u>TODO update before release</u></b>)
---
--- @usage Following will select all distinct area from my kills DB.
--- <pre>
--- db:fetch_sql(mydb.kills, "SELECT distinct area FROM kills")
--- </pre>
---
--- @see db:fetch
function db:fetch_sql(sheet, sql)
local db_name = sheet._db_name
local conn = db.__conn[db_name]
db:echo_sql(sql)
local cur = conn:execute(sql)
-- if we had a syntax error in our SQL, cur will be nil
if cur and cur ~= 0 then
local results = {}
local row = cur:fetch({}, "a")
while row do
results[#results+1] = db:_coerce_sheet(sheet, row)
row = cur:fetch({}, "a")
end
cur:close()
return results
else
return nil
end
end
--- Returns a table array containing a table for each matching row in the specified sheet. All arguments
--- but sheet are optional. If query is nil, the entire contents of the sheet will be returned. <br/><br/>
---
--- Query is a string which should be built by calling the various db: expression functions, such as db:eq,
--- db:AND, and such. You may pass a SQL WHERE clause here if you wish, but doing so is very dangerous.
--- If you don't know SQL well, its best to build the expression.<br/><br/>
---
--- Query may also be a table array of such expressions, if so they will be AND'd together implicitly.<br/><br/>
---
--- The results that are returned are not in any guaranteed order, though they are usually the same order
--- as the records were inserted. If you want to rely on the order in any way, you must pass a value to the
--- order_by field. This must be a table array listing the fields you want to sort by.
--- It can be { mydb.kills.area }, or { mydb.kills.area, mydb.kills.name } <br/><br/>
---
--- The results are returned in ascending (smallest to largest) order; to reverse this pass true into the final field.
---
--- @usage The first will fetch all of your enemies, sorted first by the city they reside in and then by their name.
--- <pre>
--- db:fetch(mydb.enemies, nil, {mydb.enemies.city, mydb.enemies.name})
--- </pre>
--- @usage The second will fetch only the enemies which are in San Francisco.
--- <pre>
--- db:fetch(mydb.enemies, db:eq(mydb.enemies.city, "San Francisco"))
--- </pre>
--- @usage The third will fetch all the things you've killed in Undervault which have Drow in their name.
--- <pre>
--- db:fetch(mydb.kills,
--- {
--- db:eq(mydb.kills.area, "Undervault"),
--- db:like(mydb.kills.name, "%Drow%")
--- }
--- )
--- </pre>
---
--- @see db:fetch_sql
function db:fetch(sheet, query, order_by, descending)
local s_name = sheet._sht_name
local sql = "SELECT * FROM "..s_name
if query then
if type(query) == "table" then
sql = sql.." WHERE "..db:AND(unpack(query))
else
sql = sql.." WHERE "..query
end
end
if order_by then
local o = {}
for _, v in ipairs(order_by) do
assert(v.name, "You must pass field instances (as obtained from yourdb.yoursheet.yourfield) to sort.")
o[#o+1] = v.name
if descending then
o[#o+1] = "DESC"
end
end
sql = sql.." ORDER BY "..db:_sql_columns(o)
end
return db:fetch_sql(sheet, sql)
end
--- Returns the result of calling the specified aggregate function on the field and its sheet. <br/><br/>
---
--- The supported aggregate functions are:
--- <pre>
--- COUNT - Returns the total number of records that are in the sheet or match the query.
--- AVG - Returns the average of all the numbers in the specified field.
--- MAX - Returns the highest number in the specified field.
--- MIN - Returns the lowest number in the specified field.
--- TOTAL - Returns the value of adding all the contents of the specified field.
--- </pre>
---
--- @param query optional
---
--- @usage Example:
--- <pre>
--- local mydb = db:get_database("my database")
--- echo(db:aggregate(mydb.enemies.name, "count"))
--- </pre>
function db:aggregate(field, fn, query, distinct)
local db_name = field.database
local s_name = field.sheet
local conn = db.__conn[db_name]
assert(type(field) == "table", "Field must be a field reference.")
assert(field.name, "Field must be a real field reference.")
local sql_chunks = {"SELECT", fn, "(", distinct and "DISTINCT" or "", field.name, ")", "AS", fn, "FROM", s_name}
if query then
sql_chunks[#sql_chunks+1] = "WHERE"
if type(query) == "table" then
sql_chunks[#sql_chunks+1] = db:AND(unpack(query))
else
sql_chunks[#sql_chunks+1] = query
end
end
local sql = table.concat(sql_chunks, " ")
db:echo_sql(sql)
local cur = conn:execute(sql)
if cur ~= 0 then
local row = cur:fetch({}, "a")
local count = row[fn]
cur:close()
-- give back the correct data type. see http://www.sqlite.org/lang_aggfunc.html
if (fn:upper() ~= "MIN" and fn:upper() ~= "MAX") or field.type == "number" then
return tonumber(count)
end
if field.type == "string" then
return count
end
-- Only datetime left
-- the value, count, is currently in a UTC timestamp
local localtime = datetime:parse(count, nil, true)
-- convert it into a UTC timestamp as datetime:parse parses it in the local time context
count = db:Timestamp(localtime + datetime:calculate_UTCdiff(localtime))
return count
else
return 0
end
end
--- Deletes rows from the specified sheet. The argument for query tries to be intelligent: <br/>
--- * if it is a simple number, it deletes a specific row by _row_id <br/>
--- * if it is a table that contains a _row_id (e.g., a table returned by db:get) it deletes just that record. <br/>
--- * Otherwise, it deletes every record which matches the query pattern which is specified as with db:get. <br/>
--- * If the query is simply true, then it will truncate the entire contents of the sheet. <br/>
---
--- @usage When passed an actual result table that was obtained from db:fetch, it will delete the record for that table.
--- <pre>
--- enemies = db:fetch(mydb.enemies)
--- db:delete(mydb.enemies, enemies[1])
--- </pre>
--- @usage When passed a number, will delete the record for that _row_id. This example shows getting the row id from a table.
--- <pre>
--- enemies = db:fetch(mydb.enemies)
--- db:delete(mydb.enemies, enemies[1]._row_id)
--- </pre>
--- @usage As above, but this example just passes in the row id directly.
--- <pre>
--- db:delete(mydb.enemies, 5)
--- </pre>
--- @usage Here, we will delete anything which matches the same kind of query as db:fetch uses - namely,