Skip to content

Commit

Permalink
refactor filtering
Browse files Browse the repository at this point in the history
  • Loading branch information
KulaginVladimir committed Oct 6, 2024
1 parent 35dc3a7 commit 72648b2
Show file tree
Hide file tree
Showing 4 changed files with 125 additions and 104 deletions.
4 changes: 2 additions & 2 deletions festim/exports/exports.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def _validate_export(self, value):
return value
raise TypeError("festim.Exports must be a list of festim.Export")

def write(self, label_to_function, dx, materials, chemical_pot):
def write(self, label_to_function, dx):
"""writes to file
Args:
Expand Down Expand Up @@ -129,7 +129,7 @@ def write(self, label_to_function, dx, materials, chemical_pot):
label_to_function[export.field], self.V_DG1
)
export.function = label_to_function[export.field]
export.write(self.t, self.final_time, materials, chemical_pot)
export.write(self.t, self.final_time)
self.nb_iterations += 1

def initialise_derived_quantities(self, dx, ds, materials):
Expand Down
125 changes: 68 additions & 57 deletions festim/exports/txt_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,20 @@ class TXTExport(festim.Export):
field (str): the exported field ("solute", "1", "retention",
"T"...)
filename (str): the filename (must end with .txt).
write_at_last (bool): if True, the data will be exported at
the last export time. Otherwise, the data will be exported
at each export time. Defaults to False.
times (list, optional): if provided, the field will be
exported at these timesteps. Otherwise exports at all
timesteps. Defaults to None.
header_format (str, optional): the format of column headers.
Defautls to ".2e".
Attributes:
data (np.array): the data array of the exported field. The first column
is the mesh vertices. Each next column is the field profile at the specific
export time.
header (str): the header of the exported file.
"""

def __init__(
Expand All @@ -31,10 +40,11 @@ def __init__(
self.filename = filename
self.write_at_last = write_at_last
self.header_format = header_format
self._first_time = True

self.data = None
self.header = None
self._unique_indices = None
self._V = None

@property
def filename(self):
Expand Down Expand Up @@ -71,80 +81,81 @@ def is_last(self, current_time, final_time):
return True
return False

def filter_duplicates(self, data, materials):
x = data[:, 0]

# Collect all borders
borders = []
for material in materials:
for border in material.borders:
borders.append(border)
borders = np.unique(borders)

# Find indices of the closest duplicates to interfaces
border_indx = []
for border in borders:
closest_indx = np.abs(x - border).argmin()
closest_x = x[closest_indx]
for ind in np.where(x == closest_x)[0]:
border_indx.append(ind)
def initialise_TXTExport(self, mesh, project_to_DG=False, materials=None):

# Find indices of first elements in duplicated pairs and mesh borders
_, unique_indx = np.unique(x, return_index=True)

# Combine both arrays of indices
combined_indx = np.concatenate([border_indx, unique_indx])
if project_to_DG:
self._V = f.FunctionSpace(mesh, "DG", 1)
else:
self._V = f.FunctionSpace(mesh, "CG", 1)

x = f.interpolate(f.Expression("x[0]", degree=1), self._V)
x_column = np.transpose([x.vector()[:]])

# if chemical_pot is True or trap_element_type is DG, get indices of duplicates near interfaces
# and indices of the first elements from a pair of duplicates otherwise
if project_to_DG:
# Collect all borders
borders = []
for material in materials:
if material.borders:
for border in material.borders:
borders.append(border)
borders = np.unique(borders)

# Find indices of the closest duplicates to interfaces
border_indices = []
for border in borders:
closest_indx = np.abs(x_column - border).argmin()
closest_x = x_column[closest_indx]
for ind in np.where(x_column == closest_x)[0]:
border_indices.append(ind)

# Find indices of first elements in duplicated pairs and mesh borders
_, mesh_indices = np.unique(x_column, return_index=True)

# Get unique indices from both arrays preserving the order in unsorted x-array
unique_indices = []
for indx in np.argsort(x_column, axis=0)[:, 0]:
if (indx in mesh_indices) or (indx in border_indices):
unique_indices.append(indx)

self._unique_indices = np.array(unique_indices)

# Sort unique indices to return a slice
combined_indx = sorted(np.unique(combined_indx))
else:
# Get list of unique indices as integers
self._unique_indices = np.argsort(x_column, axis=0)[:, 0]

return data[combined_indx, :]
self.data = x_column[self._unique_indices]
self.header = "x"

def write(self, current_time, final_time, materials, chemical_pot):
# create a DG1 functionspace if chemical_pot is True
# else create a CG1 functionspace
if chemical_pot:
V = f.FunctionSpace(self.function.function_space().mesh(), "DG", 1)
else:
V = f.FunctionSpace(self.function.function_space().mesh(), "CG", 1)
def write(self, current_time, final_time):

solution = f.project(self.function, V)
solution_column = np.transpose(solution.vector()[:])
if self.is_it_time_to_export(current_time):
solution = f.project(self.function, self._V)
solution_column = np.transpose(solution.vector()[:])

# if the directory doesn't exist
# create it
dirname = os.path.dirname(self.filename)
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)

# create header if steady or it is the first time to export
# else append new column to the existing file
if final_time is None or self._first_time:
if final_time is None:
self.header = "x,t=steady"
else:
self.header = f"x,t={format(current_time, self.header_format)}s"

x = f.interpolate(f.Expression("x[0]", degree=1), V)
x_column = np.transpose([x.vector()[:]])
self.data = np.column_stack([x_column, solution_column])
self._first_time = False
# if steady, add the corresponding label
# else append new export time to the header
steady = final_time is None
if steady:
self.header += ",t=steady"
else:
# Update the header
self.header += f",t={format(current_time, self.header_format)}s"
# Add new column
self.data = np.column_stack([self.data, solution_column])

# Add new column of filtered and sorted data
self.data = np.column_stack(
[self.data, solution_column[self._unique_indices]]
)

if (
self.write_at_last and self.is_last(current_time, final_time)
) or not self.write_at_last:
if self.is_last(current_time, final_time):
# Sort data by the x-column before the last export time
self.data = self.data[self.data[:, 0].argsort()]

# Filter duplicates if chemical_pot is True
if chemical_pot:
self.data = self.filter_duplicates(self.data, materials)

# Write data
np.savetxt(
Expand Down
58 changes: 33 additions & 25 deletions festim/generic_simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,11 +357,11 @@ def initialise(self):

self.h_transport_problem.initialise(self.mesh, self.materials, self.dt)

# raise warning if the derived quantities don't match the type of mesh
# eg. SurfaceFlux is used with cylindrical mesh
for export in self.exports:
if isinstance(export, festim.DerivedQuantities):
for q in export:
# raise warning if the derived quantities don't match the type of mesh
# eg. SurfaceFlux is used with cylindrical mesh
if self.mesh.type not in q.allowed_meshes:
warnings.warn(
f"{type(q)} may not work as intended for {self.mesh.type} meshes"
Expand All @@ -381,31 +381,41 @@ def initialise(self):
f"SurfaceKinetics boundary condition must be defined on surface {q.surface} to export data with festim.AdsorbedHydrogen"
)

self.exports.initialise_derived_quantities(
self.mesh.dx, self.mesh.ds, self.materials
)

# needed to ensure that data is actually exported at TXTExport.times
# see issue 675
for export in self.exports:
if isinstance(export, festim.TXTExport) and export.times:
if not self.dt.milestones:
self.dt.milestones = []
for time in export.times:
if time not in self.dt.milestones:
msg = "To ensure that TXTExport exports data at the desired times "
msg += "TXTExport.times are added to milestones"
warnings.warn(msg)
self.dt.milestones.append(time)
self.dt.milestones.sort()

# set Soret to True for SurfaceFlux quantities
if isinstance(export, festim.DerivedQuantities):
for q in export:
# set Soret to True for SurfaceFlux quantities
if isinstance(q, festim.SurfaceFlux):
q.soret = self.settings.soret
q.T = self.T.T

if isinstance(export, festim.TXTExport):
# pre-process data depending on the chemical potential flag, trap element type,
# and material borders
project_to_DG = (
self.settings.chemical_pot
or self.settings.traps_element_type == "DG"
)
export.initialise_TXTExport(
self.mesh.mesh,
project_to_DG,
self.materials,
)

# needed to ensure that data is actually exported at TXTExport.times
# see issue 675
if export.times:
if not self.dt.milestones:
self.dt.milestones = []
for time in export.times:
if time not in self.dt.milestones:
msg = "To ensure that TXTExport exports data at the desired times "
msg += "TXTExport.times are added to milestones"
warnings.warn(msg)
self.dt.milestones.append(time)
self.dt.milestones.sort()

self.exports.initialise_derived_quantities(
self.mesh.dx, self.mesh.ds, self.materials
)

def run(self, completion_tone=False):
"""Runs the model.
Expand Down Expand Up @@ -506,8 +516,6 @@ def run_post_processing(self):
self.exports.write(
self.label_to_function,
self.mesh.dx,
self.materials,
self.settings.chemical_pot,
)

def update_post_processing_solutions(self):
Expand Down
Loading

0 comments on commit 72648b2

Please sign in to comment.