Skip to content

Commit

Permalink
Merge pull request #16 from sybila/regulations
Browse files Browse the repository at this point in the history
Regulations
  • Loading branch information
xtrojak authored Jun 24, 2021
2 parents 3267fbb + c68f159 commit d46b153
Show file tree
Hide file tree
Showing 31 changed files with 799 additions and 212 deletions.
11 changes: 8 additions & 3 deletions Callables/GenerateTS.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from Errors.RatesNotSpecifiedError import RatesNotSpecifiedError

"""
usage: GenerateTS.py [-h] --model MODEL --output OUTPUT
usage: GenerateTS.py [-h] --model MODEL --output OUTPUT --direct DIRECT
[--transition_file TRANSITION_FILE] [--max_time MAX_TIME]
[--max_size MAX_SIZE] [--bound BOUND]
Expand All @@ -20,6 +20,7 @@
required arguments:
--model MODEL
--output OUTPUT
--direct DIRECT
optional arguments:
--transition_file TRANSITION_FILE
Expand All @@ -36,6 +37,7 @@

required.add_argument('--model', type=str, required=True)
required.add_argument('--output', type=str, required=True)
required.add_argument('--direct', required=True)

optional.add_argument('--transition_file')
optional.add_argument('--max_time', type=float, default=np.inf)
Expand All @@ -57,8 +59,11 @@
if not model.data.all_rates:
raise RatesNotSpecifiedError

vm = model.data.to_vector_model(args.bound)
ts = vm.generate_transition_system(ts, args.max_time, args.max_size)
if eval(args.direct):
ts = model.data.generate_direct_transition_system(args.max_time, args.max_size, args.bound)
else:
vm = model.data.to_vector_model(args.bound)
ts = vm.generate_transition_system(ts, args.max_time, args.max_size)
ts.save_to_json(args.output)
else:
if "error" in model.data:
Expand Down
73 changes: 51 additions & 22 deletions Core/Model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from Core.Complex import Complex
from Core.Side import Side
from TS.DirectTS import DirectTS
from TS.State import DirectState
from TS.State import FullMemoryState, OneStepMemoryState, MultisetState
from TS.TSworker import DirectTSworker
from TS.TransitionSystem import TransitionSystem
from TS.VectorModel import VectorModel, handle_number_of_threads
Expand All @@ -23,12 +23,13 @@


class Model:
def __init__(self, rules: set, init: collections.Counter, definitions: dict, params: set):
def __init__(self, rules: set, init: collections.Counter, definitions: dict, params: set, regulation=None):
self.rules = rules # set of Rules
self.init = init # Counter: Complex -> int
self.definitions = definitions # dict str -> float
self.params = params # set of str
self.all_rates = True # indicates whether model is quantitative
self.regulation = regulation # used to rules filtering, can be unspecified (None)

# autocomplete
self.atomic_signature, self.structure_signature = self.extract_signatures()
Expand All @@ -38,7 +39,7 @@ def __eq__(self, other: 'Model') -> bool:

def __str__(self):
return "Model:\n" + "\n".join(map(str, self.rules)) + "\n\n" + str(self.init) + "\n\n" + str(self.definitions) \
+ "\n\n" + str(self.atomic_signature) + "\n" + str(self.structure_signature)
+ "\n\n" + str(self.atomic_signature) + "\n" + str(self.structure_signature) + "\n" + str(self.regulation)

def __repr__(self):
return "#! rules\n" + "\n".join(map(str, self.rules)) + \
Expand Down Expand Up @@ -269,19 +270,18 @@ def create_AP_labels(self, APs: list, ts: TransitionSystem, bound: int):
return state_labels, AP_lables

def network_free_simulation(self, max_time: float):
# TODO include regulations
state = copy.deepcopy(self.init)
state = FullMemoryState(copy.deepcopy(self.init))
for rule in self.rules:
# precompute complexes for each rule
rule.lhs, _ = rule.create_complexes()
rule.rate_agents, _ = rule.rate.get_params_and_agents()

history = dict()
collected_agents = set(state)
collected_agents = set(state.multiset)
time = 0.0
history[time] = state
history[time] = state.multiset
used_rules = []
while time < max_time:
print('TIME', time)
candidate_rules = pd.DataFrame(data=[(rule,
rule.evaluate_rate(state, self.definitions),
rule.match(state)) for rule in self.rules],
Expand All @@ -290,6 +290,12 @@ def network_free_simulation(self, max_time: float):
# drop rules which cannot be actually used (do not pass stoichiometry check)
candidate_rules = candidate_rules.dropna()

if self.regulation:
rules = {item: None for item in candidate_rules['rule']}
state.used_rules = used_rules
applicable_rules = self.regulation.filter(state, rules)
candidate_rules = candidate_rules[candidate_rules['rule'].isin(applicable_rules)]

if not candidate_rules.empty:
rates_sum = candidate_rules['rate'].sum()
sorted_candidates = candidate_rules.sort_values(by=["rate"])
Expand All @@ -301,17 +307,21 @@ def network_free_simulation(self, max_time: float):

# apply chosen rule to matched agents
match = sorted_candidates.iloc[0]["match"]
produced_agents = sorted_candidates.iloc[0]["rule"].replace(match)
rule = sorted_candidates.iloc[0]["rule"]
produced_agents = rule.replace(match)

# update state based on match & replace operation
state = update_state(state, match, produced_agents)
match = rule.reconstruct_complexes_from_match(match)
state = FullMemoryState(update_state(state.multiset, match, produced_agents))
if self.regulation:
used_rules.append(rule.label)
else:
rates_sum = random.uniform(0.5, 0.9)

# update time
time += random.expovariate(rates_sum)
collected_agents = collected_agents.union(set(state))
history[time] = state
collected_agents = collected_agents.union(set(state.multiset))
history[time] = state.multiset

# create pandas DataFrame
ordered_agents = list(collected_agents)
Expand All @@ -325,13 +335,34 @@ def network_free_simulation(self, max_time: float):
df.reset_index(inplace=True)
return df

def generate_direct_transition_system(self, ts=None, max_time: float = np.inf, max_size: float = np.inf):
if not ts:
ts = DirectTS()
ts.unprocessed = {DirectState(self.init)}
def compute_bound(self):
bound = 0
for rule in self.rules:
bound = max(rule.lhs.most_frequent(), rule.rhs.most_frequent())
return max(bound, Side(self.init).most_frequent())

def generate_direct_transition_system(self, max_time: float = np.inf, max_size: float = np.inf, bound=None):
ts = DirectTS()
if self.regulation:
if self.regulation.memory == 0:
ts.init = MultisetState(self.init)
elif self.regulation.memory == 1:
ts.init = OneStepMemoryState(self.init)
else:
ts.init = FullMemoryState(self.init)
else:
pass
# TODO: if a TS is given, extract all the data
ts.init = MultisetState(self.init)
ts.unprocessed = {ts.init}
ts.unique_complexes.update(set(ts.init.multiset))

for rule in self.rules:
# precompute complexes for each rule
rule.lhs, rule.rhs = rule.create_complexes()
rule.rate_agents, _ = rule.rate.get_params_and_agents()

if not bound:
bound = self.compute_bound()
self.bound = bound

workers = [DirectTSworker(ts, self) for _ in range(multiprocessing.cpu_count())]
for worker in workers:
Expand All @@ -343,7 +374,7 @@ def generate_direct_transition_system(self, ts=None, max_time: float = np.inf, m
try:
while any([worker.work.is_set() for worker in workers]) \
and time.time() - start_time < max_time \
and len(ts.processed) + len(ts.states_encoding) < max_size:
and len(ts.processed) < max_size:
handle_number_of_threads(len(ts.unprocessed), workers)
time.sleep(1)
except (KeyboardInterrupt, EOFError) as e:
Expand All @@ -355,9 +386,7 @@ def generate_direct_transition_system(self, ts=None, max_time: float = np.inf, m
while any([worker.is_alive() for worker in workers]):
time.sleep(1)

# TODO: transform to classic TS (vectors)
normal_ts = ts.to_TS(self.init)
return normal_ts
return ts


def call_storm(command: str, files: list, storm_local: bool):
Expand Down
2 changes: 1 addition & 1 deletion Core/Rate.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def agent(self, complex):
if complex.compatible(self.ordering[i]):
result[i] = 1

result = TS.State.State(result)
result = TS.State.MemorylessState(result)
self.visited.append(result)
return Tree("agent", [result])

Expand Down
25 changes: 20 additions & 5 deletions Core/Rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def column(lst, index):


class Rule:
def __init__(self, agents: tuple, mid: int, compartments: list, complexes: list, pairs: list, rate: Rate):
def __init__(self, agents: tuple, mid: int, compartments: list, complexes: list, pairs: list, rate: Rate, label=None):
"""
Class to represent BCSL rule
Expand All @@ -30,6 +30,7 @@ def __init__(self, agents: tuple, mid: int, compartments: list, complexes: list,
self.complexes = complexes
self.pairs = pairs
self.rate = rate
self.label = label
self.comment = (False, [])

def __eq__(self, other: 'Rule'):
Expand All @@ -48,8 +49,10 @@ def __str__(self):
pre_comment = comment + "// " if self.comment[0] else ""
post_comment = " " + comment if not self.comment[0] else ""

return pre_comment + " + ".join(lhs.to_list_of_strings()) + " => " + " + ".join(rhs.to_list_of_strings()) \
+ rate + post_comment
label = str(self.label) + " ~ " if self.label else ""

return label + pre_comment + " + ".join(lhs.to_list_of_strings()) + \
" => " + " + ".join(rhs.to_list_of_strings()) + rate + post_comment

def __lt__(self, other):
return str(self) < str(other)
Expand Down Expand Up @@ -179,7 +182,7 @@ def evaluate_rate(self, state, params):
@return: a real number of the rate
"""
values = dict()
for (state_complex, count) in state.items():
for (state_complex, count) in state.multiset.items():
for agent in self.rate_agents:
if agent.compatible(state_complex):
values[agent] = values.get(agent, 0) + count
Expand All @@ -193,7 +196,7 @@ def match(self, state, all=False):
@param all: bool to indicate if choose one matching randomly or return all of them
@return: random match/all matches
"""
state = deepcopy(state)
state = deepcopy(state.multiset)
matches = find_all_matches(self.lhs.agents, state)
matches = [sum(match, []) for match in matches]

Expand Down Expand Up @@ -225,6 +228,18 @@ def replace(self, aligned_match):

return output_complexes

def reconstruct_complexes_from_match(self, match):
"""
Create complexes from agents matched to the LHS
@param match: sequence of
@return:
"""
output_complexes = []
for (f, t) in list(filter(lambda item: item[1] < self.mid, self.complexes)):
output_complexes.append(Complex(match[f:t + 1], self.compartments[f]))
return output_complexes


def find_all_matches(lhs_agents, state):
"""
Expand Down
15 changes: 10 additions & 5 deletions Core/Side.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from sortedcontainers import SortedList

from Core.Complex import Complex
from TS.State import State
from TS.State import MemorylessState


class Side:
Expand Down Expand Up @@ -39,18 +39,23 @@ def to_list_of_strings(self):
def to_counter(self):
return collections.Counter(self.agents)

def to_vector(self, ordering: SortedList) -> State:
def most_frequent(self):
if self.agents:
return self.to_counter().most_common(1)[0][1]
return 0

def to_vector(self, ordering: SortedList) -> MemorylessState:
"""
Convert the Side to a State accoring to given ordering.
Convert the Side to a MemorylessState accoring to given ordering.
:param ordering: sequence of complex agents
:return: State representing vector
:return: MemorylessState representing vector
"""
vector = np.zeros(len(ordering), dtype=int)
multiset = self.to_counter()
for agent in list(multiset):
vector[ordering.index(agent)] = multiset[agent]
return State(vector)
return MemorylessState(vector)

def compatible(self, other: 'Side') -> bool:
"""
Expand Down
Loading

0 comments on commit d46b153

Please sign in to comment.