Skip to content

Commit

Permalink
Add some untracked misc files
Browse files Browse the repository at this point in the history
  • Loading branch information
nick8325 committed Jul 2, 2021
1 parent 048b74b commit 881dc4e
Show file tree
Hide file tree
Showing 5 changed files with 177 additions and 0 deletions.
111 changes: 111 additions & 0 deletions misc/magic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
import re
import tensorflow as tf
from tensorflow.keras import *
from tensorflow.keras.layers import *
from tensorflow.keras.metrics import *
from numpy.random import shuffle
from pprint import pprint
from collections import namedtuple
from glob import glob

dim = 30

model = Sequential()
model.add(Bidirectional(LSTM(dim, dropout = 0.1, input_shape=(1, None), return_sequences = True)))
model.add(Bidirectional(LSTM(dim, dropout = 0.1, return_sequences = True)))
model.add(Bidirectional(LSTM(dim, dropout = 0.1)))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=[BinaryAccuracy(), Precision(), Recall()])

problem = namedtuple('problem', ['vars', 'funcs', 'goal', 'axioms', 'rules'])

def read_problem(file):
vars = None
funcs = None
goal = None
axioms = []
rules = []

for line in open(file):
line = line[:-1]
prefix, rest = line.split(",", 1)
if prefix == "vars":
vars = rest.replace("[","").replace("]","").split(",")
elif prefix == "funcs":
funcs_list = rest.replace(" ", "").replace("[","").replace("]","").split(",")
funcs = {func: list(map(int, info)) for func_arity in funcs_list for func, *info in [func_arity.split("/")]}
elif prefix == "goal":
goal = rest
elif prefix == "axiom":
axioms.append(rest)
elif prefix == "false":
rules.append((rest, False))
elif prefix == "true":
rules.append((rest, True))
else:
raise ValueError(prefix)

return problem(vars, funcs, goal, axioms, rules)

problems = [read_problem(file) for file in glob("traces-out/modRNG*")]
rules = [(rule, used, problem) for problem in problems if problem.goal is not None for rule, used in problem.rules]
used = [(rule, problem) for rule, used, problem in rules if used]
unused = [(rule, problem) for rule, used, problem in rules if not used]
print(len(used), "used,", len(unused), "unused")

shuffle(unused)

def mask(info = [-1, -1, -1]):
return tf.concat([tf.constant(list(map(float, info))), tf.random.uniform([dim])], 0)

static = [x for x in "!;(,)=X"]
static_emb = {x: mask() for x in static}

def tokenise(s):
return [x for x in re.split("([ !;(),=])", s) if x.strip()]

def embed(emb, s):
return tf.stack([lookup(x, emb) for x in tokenise(s)])

def lookup(x, emb):
result = static_emb.get(x, emb.get(x))
if result is None:
raise ValueError(x)
else:
return result

def train_on(rule, problem, used):
print(used, rule)
if used:
weight = 1 #2
else:
weight = 1
emb = {x: mask(info) for x, info in problem.funcs.items()}
for x in problem.vars:
emb[x] = static_emb['X'] + mask()
batch = tf.expand_dims(embed(emb, ";".join([problem.goal, *problem.axioms, "!" + rule])), 0)
info = model.train_on_batch(batch,
tf.constant([[float(used)]]), return_dict=True,
reset_metrics=reset_metrics, sample_weight=[float(weight)])
#info['prediction'] = model(batch)[(0,0)].numpy()
print(info)
print()

used_reservoir = []
for epoch in range(10):
print("Epoch " + str(epoch) + "!!!")
reset_metrics = True
for rule, problem in unused:
train_on(rule, problem, False)

for _ in range(1):
if not used_reservoir:
used_reservoir = used
shuffle(used_reservoir)
rule, problem = used_reservoir[0]
train_on(rule, problem, True)
used_reservoir = used_reservoir[1:]

reset_metrics = False
50 changes: 50 additions & 0 deletions misc/print_trace.pl
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
rules(Module, Rule, Used) :-
%(Module:step(add(rule(_, Rule))); Module:step(add(rule(_, Rule, _, _)))),
Module:step(add(rule(_, Rule, _, _))),
(find_lemma(Module, Rule) -> Used=true; Used=false).

find_lemma(Module, Rule) :-
copy_term(Rule, GroundRule), numbervars(GroundRule),
clause(Module:lemma(GroundRule), true, Ref),
clause(Module:lemma(Lemma), true, Ref),
Rule =@= Lemma.

anywhere(Module, T) :-
Module:goal(T); Module:axiom(T); rules(Module, T, _).

module_var(Module, X) :-
anywhere(Module, T),
term_variables(T, Xs),
numbervars(Xs),
member(X, Xs).

all_vars(Module, S) :-
setof(X, module_var(Module, X), S).

module_func(Where, X/N) :-
call(Where, T),
sub_term(U, T),
nonvar(U),
functor(U, X, N),
X \= '='.

module_func_with_count(Module, F, AxiomCount, GoalCount) :-
module_func(anywhere(Module), F),
once(setof(_, module_func(Module:axiom, F), S1); S1=[]),
once(setof(_, module_func(Module:goal, F), S2); S2=[]),
length(S1, AxiomCount),
length(S2, GoalCount).

all_funcs(Module, S) :-
setof(X/AxiomCount/GoalCount, module_func_with_count(Module, X, AxiomCount, GoalCount), S).

print_trace(Module) :-
all_vars(Module, S),
all_funcs(Module, T),
writeln((vars, S)),
writeln((funcs, T)),
forall(Module:goal(Goal), (numbervars(Goal), writeln((goal, Goal)))),
forall(Module:axiom(Axiom), (numbervars(Axiom), writeln((axiom, Axiom)))),
forall(rules(Module, Rule, Used), (numbervars(Rule), writeln((Used, Rule)))).

init :- writeln(hello).
8 changes: 8 additions & 0 deletions misc/starexec/bin/parallel-twee
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#!/bin/bash
cpus=$(lscpu -p=SOCKET,CORE | grep -v '^#' | sort | uniq | wc -l)
vmem=$(ulimit -v)
[[ $vmem = unlimited ]] && vmem=100000000
# cpus+2: one for Twee master process, and one for luck
vmem=$((vmem/(cpus+2)))
ulimit -v $vmem
exec ./twee $cpus --quiet $*
2 changes: 2 additions & 0 deletions misc/starexec/bin/starexec_run_twee
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#!/bin/bash
exec ./parallel-twee $1 --tstp --conditional-encoding if --smaller --drop-non-horn --give-up-on-saturation --explain-encoding
6 changes: 6 additions & 0 deletions notes/ac plus
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
- order axioms by complexity (in frontend?)
- label each rule with which axioms it comes from
- can also compute for each critical pair
- now find "max axiom" for each queued cp and compute min
- no queued cp has max axiom less than this => axioms [1..min-1] are complete
- take rules having max axiom <= min-1

0 comments on commit 881dc4e

Please sign in to comment.