diff --git a/pyNastran/bdf/bdf_interface/assign_type.py b/pyNastran/bdf/bdf_interface/assign_type.py index df8db36bf..c5239321c 100644 --- a/pyNastran/bdf/bdf_interface/assign_type.py +++ b/pyNastran/bdf/bdf_interface/assign_type.py @@ -492,6 +492,57 @@ def double(card: BDFCard, ifield: int, fieldname: str, end: str='') -> float: 'card=%s' % (fieldname, svalue, ifield, dtype, card)) return value +def double_from_str(svalue: str) -> float: + """ + Casts a value to a double + + Parameters + ---------- + card : BDFCard() + BDF card as a list + ifield : int + field number + fieldname : str + name of field + + Returns + ------- + value : float + the value from the desired field + + """ + try: + # 1.0, 1.0E+3, 1.0E-3 + value = float(svalue) + except TypeError: + dtype = _get_dtype(svalue) + raise SyntaxError('%s = %r (field #%s) on card must be a float (not %s).\n' + 'card=%s%s' % (fieldname, svalue, ifield, dtype, card, end)) + except ValueError: + # 1D+3, 1D-3, 1-3 + try: + svalue = svalue.upper() + if 'D' in svalue: + # 1.0D+3, 1.0D-3 + svalue2 = svalue.replace('D', 'E') + return float(svalue2) + + # 1.0+3, 1.0-3 + sign = '' + if svalue[0] in ('+', '-'): + sign = svalue[0] + svalue = svalue[1:] + if '+' in svalue: + svalue = sign + svalue.replace('+', 'E+') + elif '-' in svalue: + svalue = sign + svalue.replace('-', 'E-') + + value = float(svalue) + except ValueError: + dtype = _get_dtype(svalue) + raise SyntaxError(f'field = {svalue} on card must be a float (not {dtype}).') + return value + def double_or_blank(card: BDFCard, ifield: int, fieldname: str, default: Optional[float]=None, end: str='') -> float: diff --git a/pyNastran/dev/bdf_vectorized3/cards/elements/beam.py b/pyNastran/dev/bdf_vectorized3/cards/elements/beam.py index 9ee4f8f8e..d6bb05eee 100644 --- a/pyNastran/dev/bdf_vectorized3/cards/elements/beam.py +++ b/pyNastran/dev/bdf_vectorized3/cards/elements/beam.py @@ -29,7 +29,9 @@ if TYPE_CHECKING: from pyNastran.bdf.bdf_interface.bdf_card import BDFCard + from pyNastran.dev.bdf_vectorized3.types import TextIOLike from pyNastran.dev.bdf_vectorized3.bdf import BDF + from ..materials import MAT1 class BEAMOR(BaseCard): @@ -104,15 +106,16 @@ def add(self, eid: int, pid: int, nids: list[int], offt: str='GGG', bit=None, pa: int=0, pb: int=0, wa=None, wb=None, - sa: int=0, sb: int=0, comment: str='') -> None: + sa: int=0, sb: int=0, comment: str='') -> int: if wa is None: wa = [0., 0., 0.] if wb is None: wb = [0., 0., 0.] self.cards.append((eid, pid, nids, g0, x, offt, [pa, pb], wa, wb, sa, sb, comment)) self.n += 1 + return self.n - def add_card(self, card: BDFCard, comment: str='') -> None: + def add_card(self, card: BDFCard, comment: str='') -> int: PROPERTY_ID_DEFAULT = 0 OFFT_DEFAULT = '' eid = integer(card, 1, 'eid') @@ -141,6 +144,7 @@ def add_card(self, card: BDFCard, comment: str='') -> None: assert len(card) <= 19, f'len(CBEAM card) = {len(card):d}\ncard={card}' self.cards.append((eid, pid, [ga, gb], g0, x, offt, [pa, pb], wa, wb, sa, sb, comment)) self.n += 1 + return self.n def parse_cards(self): if self.n == 0: @@ -226,12 +230,12 @@ def geom_check(self, missing: dict[str, np.ndarray]): node=(nid, self.nodes), property_id=(pids, self.property_id)) - def write(self, size: int=8, is_double: bool=False, - write_card_header: bool=False) -> str: + def write_file(self, bdf_file: TextIOLike, + size: int=8, is_double: bool=False, + write_card_header: bool=False) -> None: if len(self.element_id) == 0: - return '' + return - lines = [] element_ids = array_str(self.element_id, size=size) property_ids = array_str(self.property_id, size=size) nodes_ = array_str(self.nodes, size=size) @@ -260,8 +264,8 @@ def write(self, size: int=8, is_double: bool=False, list_fields = ['CBEAM', eid, pid, n1, n2, x1, x2, x3, offt, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b] - lines.append(print_card_8(list_fields)) - return ''.join(lines) + bdf_file.write(print_card_8(list_fields)) + return @property def all_properties(self): @@ -950,11 +954,11 @@ def geom_check(self, missing: dict[str, np.ndarray]): material_id=(mids, self.material_id)) @property - def all_materials(self) -> list[Any]: + def all_materials(self) -> list[MAT1]: return [self.model.mat1] @property - def allowed_materials(self) -> list[Any]: + def allowed_materials(self) -> list[MAT1]: all_materials = self.all_materials materials = [mat for mat in all_materials if mat.n > 0] assert len(materials) > 0, f'{self.type}: all_allowed_materials={all_materials}\nall_materials={self.model.materials}' @@ -981,16 +985,17 @@ def mass_per_length(self) -> np.ndarray: return mass_per_length @property - def is_small_field(self): + def is_small_field(self) -> bool: return max(self.property_id.max(), self.material_id.max(), self.s1.max(), self.s2.max()) < 99_999_999 - def write(self, size: int=8, is_double: bool=False, - write_card_header: bool=False) -> str: + def write_file(self, bdf_file: TextIOLike, + size: int=8, is_double: bool=False, + write_card_header: bool=False) -> None: if len(self.property_id) == 0: - return '' - lines = [] + return + if size == 8 and self.is_small_field: print_card = print_card_8 else: @@ -1118,8 +1123,8 @@ def write(self, size: int=8, is_double: bool=False, m1a, m2a, m1b, m2b, n1a, n2a, n1b, n2b] if footer != [None] * len(footer): list_fields += footer - lines.append(print_card(list_fields)) - return ''.join(lines) + bdf_file.write(print_card(list_fields)) + return @property def istation(self) -> np.ndarray: @@ -1147,8 +1152,9 @@ def to_old_card(self) -> list[Any]: for icard in range(self.n): card_obj = self.slice_card_by_index(icard) card_lines = card_obj.write(size=16).split('\n') - bdf_card = model.add_card(card_lines, card_name, comment='', - ifile=None, is_list=False, has_none=False) + unused_bdf_card = model.add_card( + card_lines, card_name, comment='', + ifile=None, is_list=False, has_none=False) eid = self.property_id[icard] card = dicti[eid] cards.append(card) @@ -1211,7 +1217,7 @@ def slice_card_by_property_id(self, property_id: np.ndarray) -> PBEAML: def add(self, pid: int, mid: int, beam_type: str, xxb, dims, so=None, nsm=None, - group: str='MSCBML0', comment: str='') -> None: + group: str='MSCBML0', comment: str='') -> int: nxxb = len(xxb) if so is None: so = ['YES'] * nxxb @@ -1225,8 +1231,9 @@ def add(self, pid: int, mid: int, beam_type: str, ndim = self.valid_types[beam_type] self.cards.append((pid, mid, beam_type, group, xxb, so, nsm, ndim, dims, comment)) self.n += 1 + return self.n - def add_card(self, card: BDFCard, comment: str='') -> None: + def add_card(self, card: BDFCard, comment: str='') -> int: pid = integer(card, 1, 'pid') mid = integer(card, 2, 'mid') group = string_or_blank(card, 3, 'group', default='MSCBML0') @@ -1236,17 +1243,17 @@ def add_card(self, card: BDFCard, comment: str='') -> None: ndim = self.valid_types[beam_type] #: dimension list - dims = [] - dim = [] + dims: list[list[float]] = [] + dim: list[float] = [] #: Section position - xxb = [0.] + xxb: list[float] = [0.] #: Output flag - so = ['YES'] # station 0 + so: list[str] = ['YES'] # station 0 #: non-structural mass :math:`nsm` - nsm = [] + nsm: list[float] = [] ioffset = 9 n = 0 @@ -1314,8 +1321,9 @@ def add_card(self, card: BDFCard, comment: str='') -> None: assert len(card) > 5, card self.cards.append((pid, mid, beam_type, group, xxb, so, nsm, ndim, dims, comment)) self.n += 1 + return self.n - def parse_cards(self): + def parse_cards(self) -> None: if self.n == 0: return ncards = len(self.cards) @@ -1361,6 +1369,8 @@ def parse_cards(self): group[icard] = groupi Type[icard] = beam_type ndim[icard] = ndimi + #assert nstationi >= 2, f'pid={pid} mid={mid} beam_type={beam_type!r} xxb={xxbi} dims={dims}' + print(f'pid={pid} mid={mid} beam_type={beam_type!r} xxb={xxbi} dims={dims}') idim[icard, :] = [idim0, idim1] istation[icard, :] = [istation0, istation1] @@ -1374,8 +1384,12 @@ def parse_cards(self): so = np.array(all_so, dtype='|U4') nsm = np.array(all_nsm, dtype='float64') #ndim_total = self.ndim.sum() - self._save(property_id, material_id, idim, ndim, istation, nstation, Type, group, - xxb, dims, so, nsm) + self._save( + property_id, material_id, + idim, ndim, + istation, nstation, + Type, group, + xxb, dims, so, nsm) nstation_total = self.nstation.sum() self.sort() @@ -1385,8 +1399,13 @@ def parse_cards(self): assert len(self.nsm) == nstation_total self.cards = [] - def _save(self, property_id, material_id, idim, ndim, istation, nstation, Type, group, - xxb, dims, so, nsm): + def _save(self, property_id: np.ndarray, material_id: np.ndarray, + idim: np.ndarray, ndim: np.ndarray, + istation: np.ndarray, nstation: np.ndarray, + Type: np.ndarray, + group: np.ndarray, + xxb: np.ndarray, dims: np.ndarray, + so: np.ndarray, nsm: np.ndarray): self.property_id = property_id self.material_id = material_id @@ -1398,7 +1417,8 @@ def _save(self, property_id, material_id, idim, ndim, istation, nstation, Type, assert istation.ndim == 2, istation.shape assert istation.min() == 0, istation - assert nstation.min() >= 2, nstation + #assert nstation.min() >= 2, nstation + assert nstation.min() >= 1, nstation self.istation = istation self.nstation = nstation @@ -1449,11 +1469,11 @@ def geom_check(self, missing: dict[str, np.ndarray]): missing, material_id=(mids, self.material_id)) - def write(self, size: int=8, is_double: bool=False, - write_card_header: bool=False) -> str: + def write_file(self, bdf_file: TextIOLike, + size: int=8, is_double: bool=False, + write_card_header: bool=False) -> None: if len(self.property_id) == 0: - return '' - lines = [] + return print_card = get_print_card_8_16(size) assert len(self.property_id) == len(self.material_id) @@ -1499,9 +1519,8 @@ def write(self, size: int=8, is_double: bool=False, list_fields += dimi.tolist() + [nsmi] else: list_fields += [soi, xxbi] + dimi.tolist() + [nsmi] - lines.append(print_card(list_fields)) - assert len(lines) > 0, lines - return ''.join(lines) + bdf_file.write(print_card(list_fields)) + return def area(self) -> np.ndarray: nproperties = len(self.property_id) @@ -1520,22 +1539,53 @@ def area(self) -> np.ndarray: #A, I1, I2, I12 = A_I1_I2_I12(prop, beam_type, dim) areasi = [] for dim in dims: - areai = _bar_areaL('PBEAML', beam_type, dim, self) - areasi.append(areai) + area_i1_i2_i12 = _bar_areaL('PBEAML', beam_type, dim, self) + areasi.append(area_i1_i2_i12[0]) + if len(xxb) == 1: + areaii = areasi[i] + xxb = [0., 1.] + areasi = [areaii, areaii] A = integrate_positive_unit_line(xxb, areasi) area[i] = A return area + def nsm_func(self) -> np.ndarray: + nproperties = len(self.property_id) + nsm = np.zeros(nproperties, dtype='float64') + for i, beam_type, ndim, idim, nstation, istation in zip(count(), self.Type, + self.ndim, self.idim, + self.nstation, self.istation,): + #idim0, idim1 = idim + istation0, istation1 = istation + xxb = self.xxb[istation0:istation1] + nsms = self.nsm[istation0:istation1] + #so = self.so[istation0:istation1] + #dims = self.dims[idim0 : idim1].reshape(nstation, ndim) + + #prop = pbarl(self.property_id[i], self.material_id[i], beam_type, dim) + #A, I1, I2, I12 = A_I1_I2_I12(prop, beam_type, dim) + #areasi = [] + #for dim in dims: + #area_i1_i2_i12 = _bar_areaL('PBEAML', beam_type, dim, self) + #areasi.append(area_i1_i2_i12[0]) + if len(xxb) == 1: + nsmi = nsms[i] + xxb = [0., 1.] + nsms = [nsmi, nsmi] + nsmi = integrate_positive_unit_line(xxb, nsms) + nsm[i] = nsmi + return nsm + def rho(self) -> np.ndarray: rho = get_density_from_material(self.material_id, self.allowed_materials) return rho @property - def all_materials(self) -> list[Any]: + def all_materials(self) -> list[MAT1]: return [self.model.mat1] @property - def allowed_materials(self) -> list[Any]: + def allowed_materials(self) -> list[MAT1]: all_materials = self.all_materials materials = [mat for mat in all_materials if mat.n > 0] assert len(materials) > 0, f'{self.type}: all_allowed_materials={all_materials}\nall_materials={self.model.materials}' @@ -1545,7 +1595,7 @@ def mass_per_length(self): #nproperties = len(self.property_id) rho = get_density_from_material(self.material_id, self.allowed_materials) A = self.area() - nsm = 0. + nsm = self.nsm_func() return rho * A + nsm @@ -1592,7 +1642,7 @@ def add(self, pid: int, mid: int, y: list[float], z: list[float], k1: float=1.0, k2: float=1.0, m1: float=0.0, m2: float=0.0, n1: float=0.0, n2: float=0.0, - symopt: int=0, comment: str='') -> PBCOMP: + symopt: int=0, comment: str='') -> int: """ Creates a PBCOMP card @@ -1638,8 +1688,9 @@ def add(self, pid: int, mid: int, y: list[float], z: list[float], k1, k2, m1, m2, n1, n2, symopt, y, z, c, mids, comment)) self.n += 1 + return self.n - def add_card(self, card: BDFCard, comment: str='') -> None: + def add_card(self, card: BDFCard, comment: str='') -> int: pid = integer(card, 1, 'pid') mid = integer(card, 2, 'mid') area = double_or_blank(card, 3, 'Area', default=0.0) @@ -1685,8 +1736,9 @@ def add_card(self, card: BDFCard, comment: str='') -> None: k1, k2, m1, m2, n1, n2, symopt, y, z, c, mids, comment)) self.n += 1 + return self.n - def parse_cards(self): + def parse_cards(self) -> None: if self.n == 0: return ncards = len(self.cards) @@ -1829,15 +1881,12 @@ def istation(self) -> np.ndarray: idim = make_idim(self.n, self.nstation) return idim - def write(self, size: int=8): + def write_file(self, bdf_file: TextIOLike, + size: int=8, is_double: bool=False, + write_card_header: bool=False) -> None: if len(self.property_id) == 0: - return '' - lines = [] - if size == 8: - print_card = print_card_8 - else: - print_card = print_card_16 - + return + print_card = get_print_card_8_16(size) property_ids = array_str(self.property_id, size=size) material_ids = array_str(self.material_id, size=size) @@ -1875,8 +1924,8 @@ def write(self, size: int=8): ci = set_blank_if_default(ci, 0.0) list_fields += [yi, zi, ci, mid, None, None, None, None] #assert len(y) > 0, list_fields - lines.append(print_card(list_fields)) - return ''.join(lines) + bdf_file.write(print_card(list_fields)) + return @property def istation(self) -> np.ndarray: diff --git a/pyNastran/dev/bdf_vectorized3/cards/test/test_vector_coords.py b/pyNastran/dev/bdf_vectorized3/cards/test/test_vector_coords.py new file mode 100644 index 000000000..4cb485000 --- /dev/null +++ b/pyNastran/dev/bdf_vectorized3/cards/test/test_vector_coords.py @@ -0,0 +1,793 @@ +import unittest +from io import StringIO +import numpy as np +#from numpy import array, allclose + +from pyNastran.bdf.bdf import BDF as BDF_Old +from pyNastran.dev.bdf_vectorized3.bdf import BDF + + +class TestCoords(unittest.TestCase): + def test_same(self): # passes + grids = [ + [1, 0, 0., 0., 1.], + [2, 0, 0., 1., 0.], + [3, 0, 1., 0., 0.], + [4, 0, 1., 1., 1.], + [5, 0, 1., 1., 0.], + ] + grids_expected = grids + coords = [] + _get_nodes(grids, grids_expected, coords) + + def test_shift(self): # passes + grids = [ + #n cp x y z + [1, 1, 0., 0., 1.], + [2, 1, 0., 1., 0.], + [3, 1, 1., 0., 0.], + [4, 1, 1., 1., 1.], + [5, 1, 1., 1., 0.], + ] + grids_expected = [ + #n cp x y z + [1, 1, 1., 1., 2.], + [2, 1, 1., 2., 1.], + [3, 1, 2., 1., 1.], + [4, 1, 2., 2., 2.], + [5, 1, 2., 2., 1.], + ] + + coords = [ + # cid,rid, origin, zaxis, xaxis + [1, 0, [1., 1., 1.], [1., 1., 2.], [2., 1., 1.]], + ] + _get_nodes(grids, grids_expected, coords) + + def test_rotate(self): # passes + grids = [ + [1, 1, 0., 0., 1.], + [2, 1, 0., 1., 0.], + [3, 1, 1., 0., 0.], + [4, 1, 1., 1., 1.], + [5, 1, 1., 1., 0.], + + ] + grids_expected = [ + # y z x + [1, 1., 1., 0., 0.], + [2, 1., 0., -1., 0.], + [3, 1., 0., 0., 1.], + [4, 1., 1., -1., 1.], + [5, 1., 0., -1., 1.], + ] + + coords = [ + # cid, rid, origin, zaxis, xaxis + [1, 0, [0., 0., 0.], [1., 0., 0.], [0., 0., 1.]], + ] + _get_nodes(grids, grids_expected, coords) + + def test_rotate2(self): # passes + grids = [ + [1, 1, 0., 0., 1.], # nid, cid, x,y,z + [2, 1, 0., 1., 0.], + [3, 1, 1., 0., 0.], + [4, 1, 1., 1., 1.], + [5, 1, 1., 1., 0.], + ] + grids_expected = [ + [1, 1, 0., 0., -1.], + [2, 1, 0., -1., 0.], + [3, 1, 1., 0., 0.], + [4, 1, 1., -1., -1.], + [5, 1, 1., -1., 0.], + ] + + coords = [ + # cid, rid, origin, zaxis xaxis + [1, 0, [0., 0., 0.], [0., 0., -1.], [1., 0., 0.]], + ] + _get_nodes(grids, grids_expected, coords) + + def test_rotate3(self): # passes + grids = [ + [1, 1, 0., 0., 1.], + [2, 1, 0., 1., 0.], + [3, 1, 1., 0., 0.], + [4, 1, 1., 1., 1.], + [5, 1, 1., 1., 0.], + ] + grids_expected = [ + [1, 1, 0., 0., -1.], + [2, 1, 0., 1., 0.], + [3, 1, -1., 0., 0.], + [4, 1, -1., 1., -1.], + [5, 1, -1., 1., 0.], + ] + + coords = [ + # cid, rid, origin, zaxis xaxis + [1, 0, [0., 0., 0.], [0., 0., -1.], [-1., 0., 0.]], + ] + _get_nodes(grids, grids_expected, coords) + + def test_rid_1(self): + grids = [ + [1, 2, 10., 5., 3.], # nid, cid, x,y,z + [2, 3, 10., 5., 3.], + ] + grids_expected = [ + [1, 1, 11., 6., 4.], + [2, 1, 11., 6., 4.], + ] + + coords = [ + # cid, rid, origin, zaxis xaxis + [1, 0, [0., 0., 0.], [0., 0., 1.], [1., 0., 0.]], # cid=1 + [2, 1, [1., 1., 1.], [1., 1., 2.], [2., 1., 1.]], # cid=2 + #[2, 1, [0., 0., 0.], [0., 0., 1.], [1., 0., 0.]], # cid=2,equiv + + [3, 0, [1., 1., 1.], [1., 1., 2.], [2., 1., 1.]], # cid=3 + #[3, 0, [0., 0., 0.], [0., 0., 1.], [1., 0., 0.]], # cid=3,equiv + ] + _get_nodes(grids, grids_expected, coords) + + def test_cord1r_01(self): + #lines = ['cord1r,2,1,4,3'] + + model = BDF(debug=True) + model.add_cord1r(2, 1, 4, 3) + model.add_grid(4, [0., 0., 0.]) + model.add_grid(3, [0., 0., 1.]) + model.add_grid(1, [0., 1., 0.]) + model.log.info('running setup...') + #grids = [ + #['GRID', 4, 0, 0.0, 0., 0.], + #['GRID', 3, 0, 0.0, 0., 1.], + #['GRID', 1, 0, 0.0, 1., 0.], + #] + + #model.add_card(lines, 'CORD1R', is_list=False) + #for grid in grids: + #model.add_card(grid, 'GRID', is_list=True) + model.setup() + + #size = 8 + coord = model.coord + icoord = coord.index(2) + self.assertEqual(coord.coord_id[icoord], 2) + self.assertEqual(coord.ref_coord_id[icoord], -1) + + bdf_file = StringIO() + #coord.write_card(bdf_file, size=8, is_double=False) + coord.write(size=8) + + def test_cord2c_01(self): + lines_a = [ + 'CORD2C* 3 0 0. 0.', + '* 0. 0. 0. 1.*', + '* 1. 0. 1.' + ] + lines_b = [ + 'CORD2R 4 3 10. 0. 5. 10. 90. 5.', + ' 10. 0. 6.' + ] + card_count = { + 'CORD2C' : 1, + 'CORD2R' : 1, + } + + model = BDF(debug=False) + cards = { + 'CORD2C' : ['', lines_a], + 'CORD2R' : ['', lines_b], + } + #add_cards(model, cards, card_count, is_list=False) + #model.allocate(cards, card_count) + card = model.add_card(lines_a, 'CORD2C', is_list=False) + card = model.add_card(lines_b, 'CORD2R', is_list=False) + model.setup() + + cord2r = model.coord.slice_card_by_coord_id(3) + #print(type(cord2r)) + self.assertEqual(cord2r.coord_id[0], 3) + self.assertEqual(cord2r.ref_coord_id[0], 0) + + cord2r = model.coord.slice_card_by_coord_id(4) + #print(type(cord2r)) + icoord = cord2r.index(4) + self.assertEqual(cord2r.coord_id[icoord], 4) + self.assertEqual(cord2r.ref_coord_id[icoord], 3) + + i = model.coord.index(4) + T = model.coord.T[i, :, :].reshape(3, 3) + Ti = T[0, :] + Tj = T[1, :] + Tk = T[2, :] + msg = 'i=%s expected=(0., 0., 1.)' % Ti + self.assertTrue(np.allclose(Ti, np.array([0., 0., 1.])), msg) + delta = Tj - np.array([1., 1., 0.]) / 2**0.5 + self.assertTrue(np.allclose(Tj, np.array([1., 1., 0.]) / 2**0.5), str(delta)) + delta = Tk - np.array([-1., 1., 0.]) / 2**0.5 + self.assertTrue(np.allclose(Tk, np.array([-1., 1., 0.]) / 2**0.5), str(delta)) + + def test_grid_01(self): + model = BDF(debug=False) + model_old = BDF_Old(debug=False) + card_lines = [ + #['CORD1R', 1, 1, 2, 3], # fails on self.k + ['GRID', 1, 0, 0., 0., 0.], + ['GRID', 2, 0, 1., 0., 0.], + ['GRID', 4, 0, 1., 2., 3.], + ] + #card_count = { + #'GRID': 3, + #} + cards, card_count = add_cards_lines(model, model_old, card_lines) + #model.allocate(card_count, cards) + for card in cards: + model.add_card(card, card[0], comment='comment', is_list=True) + model.setup() + #+------+-----+----+----+----+----+----+----+------+ + #| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + #+======+=====+====+====+====+====+====+====+======+ + #| GRID | NID | CP | X1 | X2 | X3 | CD | PS | SEID | + #+------+-----+----+----+----+----+----+----+------+ + node = model.grid.slice_card_by_node_id(4) + #self.assertEqual(node.get_field(1), 4) + #self.assertEqual(node.get_field(2), 0) + #self.assertEqual(node.get_field(3), 1.) + #self.assertEqual(node.get_field(4), 2.) + #self.assertEqual(node.get_field(5), 3.) + + #node.update_field(1, 5) + #node.update_field(2, 6) + #node.update_field(3, 7.) + #node.update_field(4, 8.) + #node.update_field(5, 9.) + #with self.assertRaises(KeyError): + #node.update_field(9, 'dummy') + + #self.assertEqual(node.get_field(1), 5) + #self.assertEqual(node.get_field(2), 6) + #self.assertEqual(node.get_field(3), 7.) + #self.assertEqual(node.get_field(4), 8.) + #self.assertEqual(node.get_field(5), 9.) + #with self.assertRaises(KeyError): + #node.get_field(9) + + def test_cord1r_02(self): + model = BDF(debug=False) + unused_card_count = { + 'CORD1R' : 1, + 'GRID' : 3, + } + #model.allocate(card_count) + cards = [ + ['CORD1R', 1, 1, 2, 3], # fails on self.k + ['GRID', 1, 0, 0., 0., 0.], + ['GRID', 2, 0, 1., 0., 0.], + ['GRID', 3, 0, 1., 1., 0.], + ] + for card in cards: + model.add_card(card, card[0], comment='comment\n', is_list=True) + #model.setup() + #unused_c1 = model.coord.slice_by_coord_id(1) + + def test_cord2r_bad_01(self): + model = BDF(debug=True) + model_old = BDF_Old(debug=True) + + card_count = { + 'GRID' : 4, + 'CORD2R' : 3, + } + cards = {'GRID' : [], 'CORD2R' : []} + grids = [ + # nid cp x y z + ['GRID', 1, 0, 0., 0., 0.], + ['GRID', 20, 0, 0., 0., 0.], + ['GRID', 30, 0, 0., 0., 0.], + ['GRID', 11, 5, 0., 0., 0.], + ] + for grid in grids: + cards['GRID'].append(('', grid)) + + coord_cards = [ + #[ + #'CORD2R', 1, 0, 0., 0., 0., + #0., 0., 0., + #0., 0., 0.], # fails on self.k + #[ + #'CORD2R', 2, 0, 0., 0., 0., + #1., 0., 0., + #0., 0., 0.], # fails on normalize self.j + #[ + #'CORD2R', 3, 0, 0., 0., 0., + #1., 0., 0., + #1., 1., 0.], # passes + [ + 'CORD2R', 4, 0, 0., 1., 0., + 1., 0., 0., + 1., 1., 0.], # passes + [ + 'CORD2R', 5, 4, 0., 1., 0., + 1., 0., 0., + 1., 1., 0.], # passes + ] + for card in coord_cards: + card_name = card[0] + cid = card[1] + if cid in [1, 2]: + #with self.assertRaises(RuntimeError): + cards[card_name].append(('', card)) + else: + cards[card_name].append(('', card)) + add_cards(model, model_old, cards, card_count, is_list=True, setup=False) + #model.setup() + model_old.cross_reference() + model.setup() + compare_coords(model, model_old) + expected_a = old_transform_node_to_global_xyz(model_old, [30]) # nid + expected_aa = old_transform_node_to_global_xyz(model_old, [30, 30]) # nid + expected_b = old_transform_node_to_global_xyz(model_old, [1, 11, 30]) # [nid, nid, nid] + expected_c = old_transform_node_to_local_coord_id(model_old, [1, 11, 30], 4) # [nid, ...], cp_goal + expected_d = old_transform_node_to_local_coord_id(model_old, [1, 11, 30], 0) # [nid, ...], cp_goal + + expected_d1 = old_transform_node_to_local_coord_id(model_old, [1], 0) # [nid, ...], cp_goal + expected_d2 = old_transform_node_to_local_coord_id(model_old, [11], 0) # [nid, ...], cp_goal + expected_d3 = old_transform_node_to_local_coord_id(model_old, [30], 0) # [nid, ...], cp_goal + + # this runs because it's got rid=0 + coord = model.coord #.slice_by_coord_id(4) + actual_a = coord.transform_node_to_global_xyz(30) # nid + actual_aa = coord.transform_node_to_global_xyz([30, 30]) # nid + actual_b = coord.transform_node_to_global_xyz([1, 11, 30]) # [nid, nid, nid] + actual_c = coord.transform_node_to_local_coord_id([1, 11, 30], 4) # [nid, ...], cp_goal + actual_d = coord.transform_node_to_local_coord_id([1, 11, 30], 0) # [nid, ...], cp_goal + + actual_d1 = coord.transform_node_to_local_coord_id([1], 0) # [nid, ...], cp_goal + actual_d2 = coord.transform_node_to_local_coord_id([11], 0) # [nid, ...], cp_goal + actual_d3 = coord.transform_node_to_local_coord_id([30], 0) # [nid, ...], cp_goal + + assert np.allclose(expected_a, actual_a) + assert np.allclose(expected_b, actual_b) + assert np.allclose(expected_c, actual_c) + assert np.allclose(expected_d, actual_d) + + assert np.allclose(expected_d1, actual_d1), f'D1: cid={cid} expected={expected_d1} actual={actual_d1}' + assert np.allclose(expected_d2, actual_d2), f'D2: cid={cid} expected={expected_d2} actual={actual_d2}' + assert np.allclose(expected_d3, actual_d3), f'D3: cid={cid} expected={expected_d3} actual={actual_d3}' + + assert np.allclose(expected_d, actual_d), f'D: cid={cid} expected={expected_d} actual={actual_d}' + + xyz = [0., 0., 0.] + coord.transform_local_xyz_to_global(xyz, 4) # [xyz], cp_initial + xyz = [ + [0., 0., 0.], + [1., 1., 1.], + ] + coord.transform_local_xyz_to_global(xyz, 4) # [xyz], cp_initial + + # global from global + coord.transform_local_xyz_to_global(xyz, 0) # [xyz], cp_initial + + # this doesn't run because rid != 0 + with self.assertRaises(KeyError): + # cp=0 doesn't exist + coord.transform_local_xyz_to_global(xyz, 2) + + def test_cord2_rcs_01(self): + """ + all points are located at <30,40,50> + """ + model = BDF(debug=False) + model_old = BDF_Old(debug=False) + card_count = { + 'GRID' : 3, + 'CORD2R' : 1, + 'CORD2C' : 1, + 'CORD2S' : 1, + } + cards = [] + card_lines = [ + [ + #'$ Femap with NX Nastran Coordinate System 10 : rectangular defined in a rectangular', + 'CORD2R* 10 0 10. 5.', + '* 3. 10.3420201433 4.53015368961 3.81379768136* ', + '* 10.7198463104 5.68767171433 3.09449287122',], + [ + #'$ Femap with NX Nastran Coordinate System 11 : cylindrical defined in rectangular', + 'CORD2C* 11 0 7. 3.', + '* 9. 7.64278760969 2.73799736977 9.71984631039* ', + '* 7.75440650673 3.37968226211 8.46454486422',], + [ + #'$ Femap with NX Nastran Coordinate System 12 : spherical defined in rectangular', + 'CORD2S* 12 0 12. 8.', + '* 5. 12.6427876097 7.86697777844 5.75440650673* ', + '* 12.6634139482 8.58906867688 4.53861076379',], + + [ + 'GRID* 10 10 42.9066011565 34.2422137135', + '* 28.6442730262 0',], + [ + 'GRID* 11 11 48.8014631871 78.8394787869', + '* 34.6037164304 0',], + [ + 'GRID* 12 12 58.0775343829 44.7276544324', + '* 75.7955331161 0',], + ] + cards, card_count = add_cards_lines(model, model_old, card_lines) + #model.allocate(card_count, cards) + #model.setup() + + for nid in model.grid.node_id: + a = np.array([30., 40., 50.]) + b = model.grid.get_position_by_node_id(nid) + self.assertTrue(np.allclose(a, b), str(a - b)) + + def test_cord2_rcs_02(self): + """ + all points are located at <30,40,50> + """ + model = BDF(debug=False) + model_old = BDF_Old(debug=False) + #card_count = { + #'GRID' : 3, + #'CORD2R' : 1, + #'CORD2C' : 2, + #'CORD2S' : 1, + #} + #model.allocate(card_count) + card_lines = [ + [ + 'CORD2C* 1 0 0. 0.', + '* 0. 0. 0. 1.* ', + '* 1. 0. 1.',], + [ + #'$ Femap with NX Nastran Coordinate System 20 : rectangular defined in cylindrical', + 'CORD2R* 20 1 7. 20.', + '* -6. 7.07106781187 28.1301023542 -6.* ', + '* 7.70710678119 20. -5.29289321881',], + [ + #'$ Femap with NX Nastran Coordinate System 21 : cylindrical defined in cylindrical', + 'CORD2C* 21 1 15. -30.', + '* 12. 14.6565766735 -30.3177805524 12.9355733712* ', + '* 14.6234241583 -26.4257323272 11.9304419665',], + [ + #'$ Femap with NX Nastran Coordinate System 22 : spherical defined in cylindrical', + 'CORD2S* 22 1 5. -75.', + '* 20. 5.66032384035 -82.9319986389 19.8502545865* ', + '* 4.88876051026 -73.8006653677 19.0116094889',], + [ + 'GRID* 20 20 64.2559135157 -14.9400459772', + '* 27.3271005317 0',], + [ + 'GRID* 21 21 52.8328862418 -28.8729017195', + '* 34.615939507 0',], + [ + 'GRID* 22 22 61.1042111232 158.773483595', + '* -167.4951724 0',], + ] + cards, card_count = add_cards_lines(model, model_old, card_lines) + #model.allocate(card_count, cards) + #for lines in cards: + #card = model.add(lines) + #model.add_card(card, card[0]) + #model.setup() + for nid in model.grid.node_id: + a = np.array([30., 40., 50.]) + b = model.grid.get_position_by_node_id(nid) + self.assertTrue(np.allclose(a, b), str(a - b)) + + def test_cord2_rcs_03(self): + """ + all points are located at <30,40,50> + """ + model = BDF(debug=False) + model_old = BDF_Old(debug=False) + #card_count = { + #'GRID' : 3, + #'CORD2R' : 1, + #'CORD2C' : 1, + #'CORD2S' : 2, + #} + + card_lines = [ + [ + 'CORD2S* 2 0 0. 0.', + '* 0. 0. 0. 1.* ', + '* 1. 0. 1.',], + [ + #'$ Femap with NX Nastran Coordinate System 30 : rectangular in spherical', + 'CORD2R* 30 2 14. 30.', + '* 70. 13.431863852 32.1458443949 75.2107442927* ', + '* 14.4583462334 33.4569982885 68.2297989286',], + [ + #'$ Femap with NX Nastran Coordinate System 31 : cylindrical in spherical', + 'CORD2C* 31 2 3. 42.', + '* -173. 2.86526881213 45.5425615252 159.180363517* ', + '* 3.65222385965 29.2536614627 -178.631312271',], + [ + #'$ Femap with NX Nastran Coordinate System 32 : spherical in spherical', + 'CORD2S* 32 2 22. 14.', + '* 85. 22.1243073983 11.9537753718 77.9978191005* ', + '* 21.0997242967 13.1806120497 88.4824763008',], + [ + 'GRID* 30 30 40.7437952957 -23.6254877994', + '* -33.09784854 0',], + [ + 'GRID* 31 31 62.9378078196 15.9774797923', + '* 31.0484428362 0',], + [ + 'GRID* 32 32 53.8270847449 95.8215692632', + '* 159.097767463 0',], + ] + cards, card_count = add_cards_lines(model, model_old, card_lines) + #model.allocate(card_count, cards) + #model.setup() + + for nid in model.grid.node_id: + a = np.array([30., 40., 50.]) + b = model.grid.get_position_by_node_id(nid) + self.assertTrue(np.allclose(a, b), str(a - b)) + + def test_cord1c_01(self): + lines = ['cord1c,2,1,4,3'] + grids = [ + ['GRID', 4, 0, 0.0, 0., 0.], + ['GRID', 3, 0, 0.0, 0., 1.], + ['GRID', 1, 0, 0.0, 1., 0.], + ] + + card_count = { + 'CORD1C' : 1, + 'GRID' : 3, + } + + model = BDF(debug=False) + #model.allocate(card_count) + model.add_card(lines, 'CORD1C', is_list=False) + for grid in grids: + model.add_card(grid, grid[0], is_list=True) + model.setup() + + #size = 8 + bdf_file = StringIO() + card = model.coord.slice_card_by_coord_id(2) + self.assertEqual(card.coord_id, 2) + self.assertEqual(card.ref_coord_id, -1) + #card.write_card(bdf_file, size=8, is_double=False) + card.write(size=8) + + def test_cord1s_01(self): + cord1s = ['cord1s',2, 1,4,3] + grids = [ + ['GRID', 4, 0, 0.0, 0., 0.], + ['GRID', 3, 0, 0.0, 0., 1.], + ['GRID', 1, 0, 0.0, 1., 0.], + ] + card_count = { + 'CORD1S' : 1, + 'GRID' : 3, + } + model = BDF(debug=False) + model_old = BDF_Old(debug=False) + cards = { + 'GRID' : [ + ('', grids[0]), + ('', grids[1]), + ('', grids[2]), + ], + 'CORD1S' : [('', cord1s)] + } + add_cards(model, model_old, cards, card_count) + #model.setup() + + #size = 8 + bdf_file = StringIO() + card = model.coord.slice_card_by_coord_id(2) + self.assertEqual(card.coord_id[0], 2) + self.assertEqual(card.ref_coord_id[0], -1) + card.write(size=8) + #card.write_card(bdf_file, size=8, is_double=False) + #card.raw_fields() + + def test_cord2r_02(self): + grid = ['GRID 20143 7 -9.31-4 .11841 .028296'] + coord = [ + 'CORD2R 7 1.135 .089237 -.0676 .135 .089237 -.0676', + ' 1.135 .089237 .9324'] + + model = BDF(debug=False) + model_old = BDF_Old(debug=False) + card_count = { + 'GRID' : 1, + 'CORD2R' : 1, + } + cards = { + 'CORD2R' : [('', coord)], + 'GRID' : [('', grid)], + } + model.grid.debug = True + add_cards_lines(model, model_old, cards) + model.setup() + + g = model.grid.slice_card_by_node_id(20143) + #xyz = g.get_position() + xyz_cid0 = g.xyz_cid0() + inid = model.grid.index(20143) + xyz = xyz_cid0[inid, :] + #xyz = model.coord.get_global_position_by_node_id(20143, g.cp[0])[0] + + # by running it through Patran... + #GRID 20143 1.1067 .207647 -.068531 + expected = np.array([1.106704, .207647, -0.068531]) + diff = xyz - expected + + msg = '\nexpected=%s \nactual =%s \ndiff =%s' % (expected, xyz, diff) + assert np.allclose(diff, 0.), msg + coord = model.coord.slice_card_by_coord_id(7) + T = coord.T[0, :, :] + #self.assertTrue(array_equal(T, coord.beta_n(2))) + +def _get_nodes(grids, grids_expected, coords): + model = BDF(debug=False) + model_old = BDF_Old(debug=False) + + card_count = { + 'GRID' : len(grids), + 'CORD2R' : len(coords), + } + cards = {'GRID' : [], 'CORD2R' : []} + for grid in grids: + nid, cid, x, y, z = grid + card = ['GRID', nid, cid, x, y, z] + cards['GRID'].append(('', card)) + + for coord in coords: + cid, rid, x, y, z = coord + card = ['CORD2R', cid, rid] + x + y + z + cards['CORD2R'].append(('', card)) + #coordObj = model.coords.slice_by_coord_id(cid) + add_cards(model, model_old, cards, card_count) + #model.setup() + + #i = 0 + coord = model.coord + model_old.cross_reference() + for cid, coord_obj in sorted(model_old.coords.items()): + print(f'cid={cid}') + i = cid + #single_coord = coord.slice_card(cid) + #print(coord_obj) + #print(single_coord.write(size=8)) + assert cid == coord.coord_id[i] + assert np.allclose(coord_obj.origin, coord.origin[i, :]) + assert np.allclose(coord_obj.i, coord.i[i, :]) + assert np.allclose(coord_obj.j, coord.j[i, :]) + assert np.allclose(coord_obj.k, coord.k[i, :]) + + nodes = model.grid + xyz_cid0 = nodes.xyz_cid0() + for (i, grid) in enumerate(grids_expected): + nid, cid, x, y, z = grid + pos = nodes.get_position_by_node_id([nid])[0] + n = np.array([x, y, z]) + msg = 'i=%s expected=%s actual=%s\n' % (i, n, pos) + #print(msg) + assert np.allclose(n, pos), msg + +def add_cards(model: BDF, model_old: BDF_Old, cards, card_count, + is_list=True, setup=True): + for card_name, card_group in cards.items(): + #print(card_name, card_group) + for comment_card in card_group: + #print(comment_card) + comment, card = comment_card + #print(card) + model.add_card(card, card_name, comment='', ifile=None, is_list=is_list, has_none=True) + if setup: + model.setup() + + for card_name, card_group in cards.items(): + #print(card_name, card_group) + for comment_card in card_group: + #print(comment_card) + comment, card = comment_card + #print(card) + model_old.add_card(card, card_name, comment='', ifile=None, is_list=is_list, has_none=True) + +def add_cards_lines(model: BDF, model_old: BDF_Old, cards_dict): + cards = [] + if isinstance(cards_dict, list): + for card_lines in cards_dict: + line0 = card_lines[0] + field0 = line0[0:8] + card_name = field0.rstrip('* ').upper() + assert card_name in ['GRID', 'CORD2R', 'CORD2C', 'CORD2S'], card_name + if line0 == card_name: + model.add_card(card_lines, card_name, comment='', ifile=None, is_list=True, has_none=True) + model_old.add_card(card_lines, card_name, comment='', ifile=None, is_list=True, has_none=True) + else: + model.add_card_lines(card_lines, card_name, comment='', has_none=True) + model_old.add_card_lines(card_lines, card_name, comment='', has_none=True) + + elif isinstance(cards_dict, dict): + for card_name, comment_cards in cards_dict.items(): + for comment_card in comment_cards: + comment, card_lines = comment_card + #card_name = card[0][0:8].rstrip('* ').upper() + #print('card_name') + assert card_name in ['GRID', 'CORD2R', 'CORD2C', 'CORD2S'], card_name + model.add_card_lines(card_lines, card_name, comment='', has_none=True) + else: + asfd + return cards, model.card_count + +def compare_coords(model: BDF, model_old: BDF_Old): + nodes = np.array([ + [0., 0., 0.], + + [1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.], + + # ------------------------ + # add 1 to each axis + [2., 0., 0.], + [1., 1., 0.], + [1., 0., 1.], + + [1., 1., 0.], + [0., 2., 0.], + [0., 1., 1.], + + [1., 0., 1.], + [0., 1., 1.], + [0., 0., 2.], + + # ------------------------ + # add 1 to each axis + [5., 7., 89.], + [3., 44., 192.], + [57., 52., 13.], + ]) + assert len(model.coord.coord_id) == len(model_old.coords) + coord = model.coord + for i, cid in enumerate(coord.coord_id): + coord_old = model_old.coords[cid] # type: CORD2R + assert np.allclose(coord.k[i, :], coord_old.k), f'k: cid={cid} actual={coord.k[i,:]}; expected={coord_old.k}' + assert np.allclose(coord.j[i, :], coord_old.j), f'j: cid={cid} actual={coord.j[i,:]}; expected={coord_old.j}' + assert np.allclose(coord.i[i, :], coord_old.i), f'i: cid={cid} actual={coord.i[i,:]}; expected={coord_old.i}' + assert np.allclose(coord.origin[i, :], coord_old.origin), f'cid={cid} origin: actual={coord.origin[i,:]}; expected={coord_old.origin}' + for node in nodes: + global_node = coord.transform_local_xyz_to_global(node, cid) + local_node = coord.transform_global_xyz_to_local_coord_id(node, cid) + + local_node_old = coord_old.transform_node_to_local(node) + global_node_old = coord_old.transform_node_to_global(node) + + global_node = coord.transform_local_xyz_to_global(node, cid) + local_node = coord.transform_global_xyz_to_local_coord_id(node, cid) + + assert np.allclose(global_node, global_node_old), f'cid={cid} global xyz (old)={global_node_old}; new={global_node}' + assert np.allclose(global_node, global_node_old), f'cid={cid} local xyz (old)={local_node_old}; new={local_node}' + +def old_transform_node_to_global_xyz(model: BDF_Old, node_ids: list[int]): + xyz = [] + for nid in node_ids: + xyzi = model.nodes[nid].get_position() + xyz.append(xyzi) + return xyz + +def old_transform_node_to_local_coord_id(model: BDF_Old, node_ids: list[int], local_coord_id: int): + xyz = [] + for nid in node_ids: + xyzi = model.nodes[nid].get_position_wrt(model, local_coord_id) + xyz.append(xyzi) + return xyz + +if __name__ == '__main__': # pragma: no cover + unittest.main() diff --git a/pyNastran/dev/bdf_vectorized3/solver/__init__.py b/pyNastran/dev/bdf_vectorized3/solver/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pyNastran/dev/bdf_vectorized3/solver/build_stiffness.py b/pyNastran/dev/bdf_vectorized3/solver/build_stiffness.py new file mode 100644 index 000000000..10e9311be --- /dev/null +++ b/pyNastran/dev/bdf_vectorized3/solver/build_stiffness.py @@ -0,0 +1,619 @@ +from __future__ import annotations +import copy +from typing import Union, Optional, Any, TYPE_CHECKING + +import numpy as np +import scipy.sparse as sci_sparse + +#from pyNastran.dev.solver.stiffness.shells import build_kbb_cquad4, build_kbb_cquad8 +from .utils import lambda1d, DOF_MAP +#from pyNastran.bdf.cards.elements.bars import get_bar_vector, get_bar_yz_transform +if TYPE_CHECKING: # pragma: no cover + from pyNastran.nptyping_interface import NDArrayNNfloat + from pyNastran.dev.bdf_vectorized3.bdf import ( + BDF, + #CELAS1, CELAS2, CELAS3, CELAS4, + #CBAR, PBAR, PBARL, PBEAM, PBEAML, # , CBEAM + #MAT1, + ) + + +def build_Kgg(model: BDF, dof_map: DOF_MAP, + ndof: int, + ngrid: int, + ndof_per_grid: int, + idtype: str='int32', fdtype: str='float32') -> tuple[NDArrayNNfloat, Any]: + """[K] = d{P}/dx""" + model.log.debug(f'starting build_Kgg') + Kbb = sci_sparse.dok_matrix((ndof, ndof), dtype=fdtype) + #print(dof_map) + + #_get_loadid_ndof(model, subcase_id) + out = model.get_xyz_in_coord_array(cid=0, fdtype=fdtype, idtype=idtype) + nid_cp_cd, xyz_cid0, xyz_cp, unused_icd_transform, unused_icp_transform = out + all_nids = nid_cp_cd[:, 0] + del xyz_cp, nid_cp_cd + all_nids + + nelements = 0 + # TODO: I think these need to be in the global frame + nelements += _build_kbb_celas1(model, Kbb, dof_map) + nelements += _build_kbb_celas2(model, Kbb, dof_map) + nelements += _build_kbb_celas3(model, Kbb, dof_map) + nelements += _build_kbb_celas4(model, Kbb, dof_map) + + nelements += _build_kbb_conrod(model, Kbb, dof_map) + nelements += _build_kbb_crod(model, Kbb, dof_map) + nelements += _build_kbb_ctube(model, Kbb, dof_map) + nelements += _build_kbb_cbar(model, Kbb, dof_map) + nelements += _build_kbb_cbeam(model, Kbb, dof_map, + all_nids, xyz_cid0, idtype='int32', fdtype='float64') + #nelements += build_kbb_cquad4(model, Kbb, dof_map, + #all_nids, xyz_cid0, idtype='int32', fdtype='float64') + #nelements += build_kbb_cquad8(model, Kbb, dof_map, + #all_nids, xyz_cid0, idtype='int32', fdtype='float64') + assert nelements > 0, [elem for elem in model.elements if elem.n] + Kbb2 = Kbb.tocsc() + + #Kgg = Kbb_to_Kgg(model, Kbb, ngrid, ndof_per_grid, inplace=False) + Kgg = Kbb_to_Kgg(model, Kbb2, ngrid, ndof_per_grid) + + model.log.debug(f'end of build_Kgg') + return Kgg + + +def _build_kbb_celas1(model: BDF, Kbb, dof_map: DOF_MAP) -> None: + """fill the CELAS1 Kbb matrix""" + celas = model.celas1 + pelas = model.pelas + if celas.n == 0: + return celas.n + pelas = pelas.slice_card_by_id(celas.property_id, assume_sorted=True) + nids1 = celas.nodes[:, 0] + nids2 = celas.nodes[:, 1] + c1s = celas.components[:, 0] + c2s = celas.components[:, 1] + #pelas = model.celas2 + ks = pelas.k + for nid1, nid2, c1, c2, ki in zip(nids1, nids2, c1s, c2s, ks): + i = dof_map[(nid1, c1)] + j = dof_map[(nid2, c2)] + k = ki * np.array([[1, -1,], + [-1, 1]]) + ibe = [ + (i, 0), + (j, 1), + ] + for ib1, ie1 in ibe: + for ib2, ie2 in ibe: + Kbb[ib1, ib2] += k[ie1, ie2] + return celas.n + +def _build_kbb_celas2(model: BDF, Kbb, dof_map: DOF_MAP) -> int: + """fill the CELAS2 Kbb matrix""" + celas = model.celas2 + if celas.n == 0: + return celas.n + pelas = celas + nids1 = celas.nodes[:, 0] + nids2 = celas.nodes[:, 1] + c1s = celas.components[:, 0] + c2s = celas.components[:, 1] + pelas = model.celas2 + ks = pelas.k + for nid1, nid2, c1, c2, ki in zip(nids1, nids2, c1s, c2s, ks): + i = dof_map[(nid1, c1)] + j = dof_map[(nid2, c2)] + k = ki * np.array([[1, -1,], + [-1, 1]]) + ibe = [ + (i, 0), + (j, 1), + ] + for ib1, ie1 in ibe: + for ib2, ie2 in ibe: + Kbb[ib1, ib2] += k[ie1, ie2] + return celas.n + +def _build_kbb_celas3(model: BDF, Kbb, dof_map: DOF_MAP) -> None: + """fill the CELAS3 Kbb matrix""" + celas = model.celas3 + if celas.n == 0: + return celas.n + eids = model._type_to_id_map['CELAS3'] + for eid in eids: + elem = model.elements[eid] + ki = elem.K() + #print(elem, ki) + #print(elem.get_stats()) + _build_kbbi_celas34(Kbb, dof_map, elem, ki) + return len(eids) + +def _build_kbb_celas4(model: BDF, Kbb, dof_map: DOF_MAP) -> None: + """fill the CELAS4 Kbb matrix""" + celas = model.celas4 + if celas.n == 0: + return celas.n + eids = model._type_to_id_map['CELAS4'] + for eid in eids: + elem = model.elements[eid] + ki = elem.K() + #print(elem, ki) + #print(elem.get_stats()) + _build_kbbi_celas34(Kbb, dof_map, elem, ki) + return len(eids) + +def _build_kbbi_celas12(Kbb, dof_map: DOF_MAP, + elem: Union[CELAS1, CELAS2], ki: float) -> None: + """fill the CELASx Kbb matrix""" + nid1, nid2 = elem.nodes + c1, c2 = elem.c1, elem.c2 + i = dof_map[(nid1, c1)] + j = dof_map[(nid2, c2)] + k = ki * np.array([[1, -1,], + [-1, 1]]) + ibe = [ + (i, 0), + (j, 1), + ] + for ib1, ie1 in ibe: + for ib2, ie2 in ibe: + Kbb[ib1, ib2] += k[ie1, ie2] + #Kbb[j, i] += ki + #Kbb[i, j] += ki + #del i, j, ki, nid1, nid2, c1, c2 + +def _build_kbbi_celas34(Kbb, dof_map: DOF_MAP, + elem: Union[CELAS3, CELAS4], ki: float) -> None: + """fill the CELASx Kbb matrix""" + nid1, nid2 = elem.nodes + #print(dof_map) + i = dof_map[(nid1, 0)] + j = dof_map[(nid2, 0)] + k = ki * np.array([[1, -1,], + [-1, 1]]) + ibe = [ + (i, 0), + (j, 1), + ] + for ib1, ie1 in ibe: + for ib2, ie2 in ibe: + Kbb[ib1, ib2] += k[ie1, ie2] + #Kbb[j, i] += ki + #Kbb[i, j] += ki + #del i, j, ki, nid1, nid2, c1, c2 + +def _build_kbb_cbar(model, Kbb, dof_map: DOF_MAP, fdtype: str='float64') -> int: + """fill the CBAR Kbb matrix using an Euler-Bernoulli beam""" + eids = model._type_to_id_map['CBAR'] + nelements = len(eids) + if nelements == 0: + return nelements + + for eid in eids: + elem = model.elements[eid] # type: CBAR + nid1, nid2 = elem.nodes + is_passed, K = ke_cbar(model, elem, fdtype=fdtype) + assert is_passed + + i1 = dof_map[(nid1, 1)] + i2 = dof_map[(nid2, 1)] + n_ijv = [ + i1, + i1 + 1, i1 + 2, + i1 + 3, i1 + 4, i1 + 5, + + i2, + i2 + 1, i2 + 2, + i2 + 3, i2 + 4, i2 + 5, + ] + for i, i1 in enumerate(n_ijv): + for j, i2 in enumerate(n_ijv): + ki = K[i, j] + if abs(ki) > 0.: + Kbb[i1, i2] += ki + return nelements + +def ke_cbar(model: BDF, elem: CBAR, fdtype: str='float64'): + """get the elemental stiffness matrix in the basic frame""" + pid_ref = elem.pid_ref + mat = pid_ref.mid_ref + + #is_passed, (wa, wb, ihat, jhat, khat) = elem.get_axes(model) + #T = np.vstack([ihat, jhat, khat]) + #z = np.zeros((3, 3), dtype='float64') + prop = elem.pid_ref + mat = prop.mid_ref + I1 = prop.I11() + I2 = prop.I22() + unused_I12 = prop.I12() + pa = elem.pa + pb = elem.pb + #J = prop.J() + #E = mat.E() + #G = mat.G() + z = np.zeros((3, 3), dtype='float64') + T = z + #unused_Teb = np.block([ + #[T, z], + #[z, T], + #]) + is_failed, (wa, wb, ihat, jhat, khat) = elem.get_axes(model) + assert is_failed is False + #print(wa, wb) + xyz1 = elem.nodes_ref[0].get_position() + wa + xyz2 = elem.nodes_ref[1].get_position() + wb + dxyz = xyz2 - xyz1 + L = np.linalg.norm(dxyz) + pid_ref = elem.pid_ref + mat = pid_ref.mid_ref + T = np.vstack([ihat, jhat, khat]) + z = np.zeros((3, 3), dtype=fdtype) + Teb = np.block([ + [T, z, z, z], + [z, T, z, z], + [z, z, T, z], + [z, z, z, T], + ]) + k1 = pid_ref.k1 + k2 = pid_ref.k2 + Ke = _beami_stiffness(pid_ref, mat, L, I1, I2, k1=k1, k2=k2, pa=pa, pb=pb) + K = Teb.T @ Ke @ Teb + is_passed = not is_failed + return is_passed, K + +def _build_kbb_crod(model: BDF, Kbb, dof_map: DOF_MAP) -> None: + """fill the CROD Kbb matrix""" + elem = model.crod + if elem.n == 0: + return elem.n + prop = model.prod + mat = model.mat1 + + xyz_cid0 = model.grid.xyz_cid0() + nodes = elem.nodes + pids = elem.property_id + inid = model.grid.index(nodes) + inid1 = inid[:, 0] + inid2 = inid[:, 1] + xyz1 = xyz_cid0[inid1, :] + xyz2 = xyz_cid0[inid2, :] + dxyz = xyz2 - xyz1 + length = np.linalg.norm(dxyz, axis=1) + assert len(length) == elem.n + + prop2 = prop.slice_card_by_id(pids, assume_sorted=True) + area = prop2.area() + Js = prop2.J + material_id = prop2.material_id + mat1 = mat.slice_card_by_material_id(material_id) + assert elem.n == prop.n + assert elem.n == mat1.n + for nodes, dxyzi, L, A, E, G, J in zip(elem.nodes, dxyz, length, area, mat1.E, mat1.G, Js): + _build_kbbi_conrod_crod(Kbb, dof_map, nodes, dxyzi, L, A, E, G, J) + return len(elem) + +def _build_kbb_ctube(model: BDF, Kbb, dof_map: DOF_MAP) -> None: + """fill the CTUBE Kbb matrix""" + elem = model.ctube + if elem.n == 0: + return elem.n + prop = model.ptube + mat = model.mat1 + + xyz_cid0 = model.grid.xyz_cid0() + nodes = elem.nodes + pids = elem.property_id + inid = model.grid.index(nodes) + inid1 = inid[:, 0] + inid2 = inid[:, 1] + xyz1 = xyz_cid0[inid1, :] + xyz2 = xyz_cid0[inid2, :] + dxyz = xyz2 - xyz1 + length = np.linalg.norm(dxyz, axis=1) + assert len(length) == elem.n + + prop2 = prop.slice_card_by_id(pids, assume_sorted=True) + area = prop2.area() + Js = area * 0. + material_id = prop2.material_id + mat1 = mat.slice_card_by_material_id(material_id) + assert elem.n == prop.n + assert elem.n == mat1.n + for nodes, dxyzi, L, A, E, G, J in zip(elem.nodes, dxyz, length, area, mat1.E, mat1.G, Js): + _build_kbbi_conrod_crod(Kbb, dof_map, nodes, dxyzi, L, A, E, G, J) + return len(elem) + +def _build_kbb_conrod(model: BDF, Kbb, dof_map: DOF_MAP) -> int: + """fill the CONROD Kbb matrix""" + elem = model.conrod + if elem.n == 0: + return elem.n + prop = model.conrod + mat = model.mat1 + + xyz_cid0 = model.grid.xyz_cid0() + nodes = elem.nodes + inid = model.grid.index(nodes) + inid1 = inid[:, 0] + inid2 = inid[:, 1] + xyz1 = xyz_cid0[inid1, :] + xyz2 = xyz_cid0[inid2, :] + dxyz = xyz2 - xyz1 + length = np.linalg.norm(dxyz, axis=1) + assert len(length) == elem.n + + area = prop.area() + Js = prop.J + material_id = prop.material_id + mat1 = mat.slice_card_by_material_id(material_id) + assert elem.n == prop.n + assert elem.n == mat1.n + for nodes, dxyzi, L, A, E, G, J in zip(elem.nodes, dxyz, length, area, mat1.E, mat1.G, Js): + _build_kbbi_conrod_crod(Kbb, dof_map, nodes, dxyzi, L, A, E, G, J) + return len(elem) + +def _build_kbbi_conrod_crod(Kbb, dof_map: DOF_MAP, nodes, + dxyz12, L, A, E, G, J, fdtype='float64') -> None: + """fill the ith rod Kbb matrix""" + nid1, nid2 = nodes + #mat = elem.mid_ref + #xyz1 = elem.nodes_ref[0].get_position() + #xyz2 = elem.nodes_ref[1].get_position() + #dxyz12 = xyz1 - xyz2 + #L = np.linalg.norm(dxyz12) + #E = mat.E + #G = mat.G() + #J = elem.J() + #A = elem.Area() + #print(f'A = {A}') + #L = elem.Length() + k_axial = A * E / L + k_torsion = G * J / L + + assert isinstance(k_axial, float), k_axial + assert isinstance(k_torsion, float), k_torsion + #Kbb[i, i] += ki[0, 0] + #Kbb[i, j] += ki[0, 1] + #Kbb[j, i] = ki[1, 0] + #Kbb[j, j] = ki[1, 1] + k = np.array([[1., -1.], + [-1., 1.]]) # 1D rod + Lambda = lambda1d(dxyz12, debug=False) + K = Lambda.T @ k @ Lambda + #i11 = dof_map[(n1, 1)] + #i12 = dof_map[(n1, 2)] + + #i21 = dof_map[(n2, 1)] + #i22 = dof_map[(n2, 2)] + + nki, nkj = K.shape + K2 = np.zeros((nki*2, nkj*2), dtype=fdtype) + + ni1 = dof_map[(nid1, 1)] + nj1 = dof_map[(nid2, 1)] + + i1 = 0 + i2 = 3 # dof_map[(n1, 2)] + if k_torsion == 0.0: # axial; 2D or 3D + K2 = K * k_axial + n_ijv = [ + # axial + ni1, ni1 + 1, ni1 + 2, # node 1 + nj1, nj1 + 1, nj1 + 2, # node 2 + ] + dofs = np.array([ + i1, i1+1, i1+2, + i2, i2+1, i2+2, + ], dtype='int32') + elif k_axial == 0.0: # torsion; assume 3D + K2 = K * k_torsion + n_ijv = [ + # torsion + ni1 + 3, ni1 + 4, ni1 + 5, # node 1 + nj1 + 3, nj1 + 4, nj1 + 5, # node 2 + ] + dofs = np.array([ + i1, i1+1, i1+2, + i2, i2+1, i2+2, + ], dtype='int32') + else: # axial + torsion; assume 3D + # u1fx, u1fy, u1fz, u2fx, u2fy, u2fz + K2[:nki, :nki] = K * k_axial + + # u1mx, u1my, u1mz, u2mx, u2my, u2mz + K2[nki:, nki:] = K * k_torsion + + dofs = np.array([ + i1, i1+1, i1+2, + i2, i2+1, i2+2, + + i1+3, i1+4, i1+5, + i2+3, i2+4, i2+5, + ], dtype='int32') + n_ijv = [ + # axial + ni1, ni1 + 1, ni1 + 2, # node 1 + nj1, nj1 + 1, nj1 + 2, # node 2 + + # torsion + ni1 + 3, ni1 + 4, ni1 + 5, # node 1 + nj1 + 3, nj1 + 4, nj1 + 5, # node 2 + ] + for dof1, i1 in zip(dofs, n_ijv): + for dof2, i2 in zip(dofs, n_ijv): + ki = K2[dof1, dof2] + if abs(ki) > 0.: + #print(nij1, nij2, f'({i1}, {i2});', (dof1, dof2), ki) + Kbb[i1, i2] += ki + #print(K2) + #print(Kbb) + return + +def _build_kbb_cbeam(model: BDF, Kbb, dof_map: DOF_MAP, + all_nids, xyz_cid0, idtype='int32', fdtype='float64') -> int: + """TODO: Timoshenko beam, warping, I12""" + str(all_nids) + str(xyz_cid0) + eids = np.array(model._type_to_id_map['CBEAM'], dtype=idtype) + nelements = len(eids) + if nelements == 0: + return nelements + + for eid in eids: + elem = model.elements[eid] + nid1, nid2 = elem.nodes + xyz1 = elem.nodes_ref[0].get_position() + xyz2 = elem.nodes_ref[1].get_position() + dxyz = xyz2 - xyz1 + L = np.linalg.norm(dxyz) + pid_ref = elem.pid_ref + mat = pid_ref.mid_ref + is_failed, (wa, wb, ihat, jhat, khat) = elem.get_axes(model) + #print(wa, wb, ihat, jhat, khat) + assert is_failed is False + T = np.vstack([ihat, jhat, khat]) + z = np.zeros((3, 3), dtype=fdtype) + Teb = np.block([ + [T, z, z, z], + [z, T, z, z], + [z, z, T, z], + [z, z, z, T], + ]) + Iy = pid_ref.I11() + Iz = pid_ref.I22() + k1 = pid_ref.k1 + k2 = pid_ref.k2 + pa = elem.pa + pb = elem.pb + Ke = _beami_stiffness(pid_ref, mat, L, Iy, Iz, pa, pb, k1=k1, k2=k2) + K = Teb.T @ Ke @ Teb + i1 = dof_map[(nid1, 1)] + j1 = dof_map[(nid2, 1)] + n_ijv = [ + i1, i1 + 1, i1 + 2, i1 + 3, i1 + 4, i1 + 5, # node 1 + j1, j1 + 1, j1 + 2, j1 + 3, j1 + 4, j1 + 5, # node 2 + ] + for i, i1 in enumerate(n_ijv): + for j, i2 in enumerate(n_ijv): + ki = K[i, j] + if abs(ki) > 0.: + Kbb[i1, i2] += ki + return nelements + +def _beami_stiffness(prop: Union[PBAR, PBARL, PBEAM, PBEAML], + mat: MAT1, + L: float, Iy: float, Iz: float, + pa: int, pb: int, + k1: Optional[float]=None, + k2: Optional[float]=None): + """gets the ith Euler-Bernoulli beam stiffness""" + E = mat.E() + G = mat.G() + A = prop.Area() + J = prop.J() + + kaxial = E * A / L + ktorsion = G * J / L + + L2 = L * L + if k1 is not None: + phiy = 12. * E * Iy / (k1 * G * A * L2) + if k2 is not None: + phiz = 12. * E * Iy / (k2 * G * A * L2) + + phiy = 1.0 + phiz = 1.0 + ky = E * Iy / (L * phiy) + kz = E * Iz / (L * phiz) + + K = np.zeros((12, 12), dtype='float64') + # axial + K[0, 0] = K[6, 6] = kaxial + K[6, 0] = K[0, 6] = -kaxial + + # torsion + K[3, 3] = K[9, 9] = ktorsion + K[9, 3] = K[3, 9] = -ktorsion + + #Fx - 0, 6 + #Fy - 1, 7** + #Fz - 2, 8 + #Mx - 3, 9 + #My - 4, 10 + #Mz - 5, 11** + # bending z (Fy/1/7, Mz/5/11) + # 1 5 7 11 + # 1 [12 & 6L & -12 & 6L + # 5 [6L & 4L^2 & -6L & 2L^2 + # 7 [-12 &-6L & 12 & -6L + # 11 [6L & 2L^2 & -6L & 4L^2 + K[1, 1] = K[7, 7] = 12. * kz + K[1, 7] = K[1, 7] = -12. * kz + K[1, 5] = K[5, 1] = K[11, 1] = K[1, 11] = 6. * L * kz + + K[5, 7] = K[7, 5] = K[7, 11] = K[11, 7] = -6. * L * kz + K[5, 11] = K[11, 5] = 2. * L2 * kz * (2 - phiz) + K[5, 5] = K[11, 11] = 4. * L2 * kz * (4 + phiz) + + #Fx - 0, 6 + #Fy - 1, 7 + #Fz - 2, 8** + #Mx - 3, 9 + #My - 4, 10** + #Mz - 5, 11 + # bending y (Fz/2/8, My/4/10) + # 2 4 8 10 + # 2 [12 & 6L & -12 & 6L + # 4 [6L & 4L^2 & -6L & 2L^2 + # 8 [-12 &-6L & 12 & -6L + # 10 [6L & 2L^2 & -6L & 4L^2 + K[2, 2] = K[8, 8] = 12. * ky + K[2, 8] = K[2, 8] = -12. * ky + K[2, 4] = K[4, 2] = K[10, 2] = K[2, 10] = 6. * L * ky + + K[4, 8] = K[8, 4] = K[8, 10] = K[10, 8] = -6. * L * ky + K[4, 10] = K[10, 4] = 2. * L * L * ky * (2. - phiy) + K[4, 4] = K[10, 10] = 4. * L * L * ky * (4. + phiy) + + if pa != 0: + assert pa > 0 + for pas in str(pa): # 123456 + pai = int(pas) - 1 # 012345 (0-5) + K[pai, :] = 0 + K[:, pai] = 0 + if pb != 0: + assert pb > 0 + for pbs in str(pb): # 123456 + pbi = int(pbs) + 5 # (6 - 11) + K[pbi, :] = 0 + K[:, pbi] = 0 + return K + +def Kbb_to_Kgg(model: BDF, Kbb: NDArrayNNfloat, + ngrid: int, ndof_per_grid: int, inplace=True) -> NDArrayNNfloat: + """does an in-place transformation""" + assert isinstance(Kbb, (np.ndarray, sci_sparse.csc.csc_matrix)), type(Kbb) + #assert isinstance(Kbb, (np.ndarray, sci_sparse.csc.csc_matrix, sci_sparse.dok.dok_matrix)), type(Kbb) + if not isinstance(Kbb, np.ndarray): + Kbb = Kbb.tolil() + + ndof = Kbb.shape[0] + assert ndof > 0, f'ngrid={ngrid} card_count={model.card_count}' + nids = model._type_to_id_map['GRID'] + + Kgg = Kbb + if not inplace: + Kgg = copy.deepcopy(Kgg) + + for i, nid in enumerate(nids): + node = model.nodes[nid] + if node.cd: + model.log.debug(f'node {nid} has a CD={node.cd}') + cd_ref = node.cd_ref + T = cd_ref.beta_n(n=2) + i1 = i * ndof_per_grid + i2 = (i+1) * ndof_per_grid + Ki = Kbb[i1:i2, i1:i2] + Kgg[i1:i2, i1:i2] = T.T @ Ki @ T + return Kgg diff --git a/pyNastran/dev/bdf_vectorized3/solver/recover/__init__.py b/pyNastran/dev/bdf_vectorized3/solver/recover/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pyNastran/dev/bdf_vectorized3/solver/recover/utils.py b/pyNastran/dev/bdf_vectorized3/solver/recover/utils.py new file mode 100644 index 000000000..d25ff8639 --- /dev/null +++ b/pyNastran/dev/bdf_vectorized3/solver/recover/utils.py @@ -0,0 +1,25 @@ +from __future__ import annotations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: # pragma: no cover + from pyNastran.bdf.bdf import Subcase + +def get_plot_request(subcase: Subcase, request: str) -> tuple[str, bool, bool, bool]: + """ + request = 'SPCFORCES' + """ + if request not in subcase: + return '', False, False, True + + value, options = subcase.get_parameter(request) + write_f06 = False + write_f06 = True + if 'PRINT' in options: + write_f06 = True + if 'PLOT' in options: + write_op2 = True + if not(write_f06 or write_op2): + write_op2 = True + quick_return = not write_f06 and not write_op2 + nids_write = value + return nids_write, write_f06, write_op2, quick_return diff --git a/pyNastran/dev/bdf_vectorized3/solver/solver.py b/pyNastran/dev/bdf_vectorized3/solver/solver.py new file mode 100644 index 000000000..e50415297 --- /dev/null +++ b/pyNastran/dev/bdf_vectorized3/solver/solver.py @@ -0,0 +1,2027 @@ +import os +import copy +from datetime import date +from collections import defaultdict +from itertools import count +from typing import Union, Any + +import numpy as np +import scipy as sp +import scipy.sparse as sci_sparse + +import pyNastran +from pyNastran.nptyping_interface import ( + NDArrayNbool, NDArrayNint, NDArrayN2int, NDArrayNfloat, NDArrayNNfloat) +#from pyNastran.bdf.bdf import BDF, Subcase +from pyNastran.dev.bdf_vectorized3.bdf import BDF, Subcase + +from pyNastran.f06.f06_writer import make_end +from pyNastran.f06.tables.oload_resultant import Resultant + +from pyNastran.op2.op2 import OP2 +from pyNastran.op2.op2_interface.op2_classes import ( + RealDisplacementArray, RealSPCForcesArray, RealLoadVectorArray, + RealEigenvalues) +from pyNastran.op2.result_objects.grid_point_weight import make_grid_point_weight +#from pyNastran.bdf.mesh_utils.loads import get_ndof + +from .recover.static_force import recover_force_101 +#from .recover.static_stress import recover_stress_101 +#from .recover.static_strain import recover_strain_101 +#from .recover.strain_energy import recover_strain_energy_101 +from .recover.utils import get_plot_request +from .build_stiffness import build_Kgg, DOF_MAP, Kbb_to_Kgg +from pyNastran.dev.bdf_vectorized3.bdf_interface.breakdowns import NO_MASS # , NO_VOLUME + +class Solver: + """defines the Nastran knockoff class""" + def __init__(self, model: BDF): + self.model = model + self.superelement_id = 0 + self.op2 = OP2(log=model.log, mode='nx') + self.log = model.log + + d = date.today() + # the date stamp used in the F06 + self.op2.date = (d.month, d.day, d.year) + + # the "as solved" a-set displacement (after AUTOSPC is applied) + self.xa_ = None + self.Kaa = None + self.Kgg = None + self.aset = None + self.sset = None + + base_name = os.path.splitext(model.bdf_filename)[0] + self.f06_filename = base_name + '.solver.f06' + self.op2_filename = base_name + '.solver.op2' + + def run(self): + page_num = 1 + model = self.model + model.write_bdf('junk.bdf') + sol = model.sol + solmap = { + 101 : self.run_sol_101, # static + 103 : self.run_sol_103, # modes + } + model.cross_reference() + self._update_card_count() + + title = '' + title = f'pyNastran {pyNastran.__version__}' + for subcase in model.subcases.values(): + if 'TITLE' in subcase: + title = subcase.get_parameter('TITLE') + break + + today = None + page_stamp = self.op2.make_stamp(title, today) # + '\n' + with open(self.f06_filename, 'w') as f06_file: + f06_file.write(self.op2.make_f06_header()) + self.op2._write_summary(f06_file, card_count=model.card_count) + f06_file.write('\n') + if sol in [101, 103, 105, 107, 109, 111, 112]: + for subcase_id, subcase in sorted(model.subcases.items()): + if subcase_id == 0: + continue + self.log.debug(f'subcase_id={subcase_id}') + #isubcase = subcase.id + subtitle = f'SUBCASE {subcase_id}' + label = '' + if 'SUBTITLE' in subcase: + subtitle = subcase.get_parameter('SUBTITLE') + if 'LABEL' in subcase: + label = subcase.get_parameter('LABEL') + + runner = solmap[sol] + end_options = runner( + subcase, f06_file, page_stamp, + title=title, subtitle=subtitle, label=label, + page_num=page_num, + idtype='int32', fdtype='float64') + else: + raise NotImplementedError(sol) + end_flag = True + f06_file.write(make_end(end_flag, end_options)) + + def _update_card_count(self) -> None: + for card_type, values in self.model._type_to_id_map.items(): + self.model.card_count[card_type] = len(values) + + def build_xg(self, dof_map: DOF_MAP, + ndof: int, subcase: Subcase) -> NDArrayNfloat: + """ + Builds the {xg} vector, which has all SPCs in the analysis (cd) frame + (called global g by NASTRAN) + + {s} = {sb} + {sg} + {sb} = SPC set on SPC/SPC1/SPCADD cards (boundary) + {sg} = SPCs from PS field on GRID card (grid) + + """ + model = self.model + xspc = np.full(ndof, np.nan, dtype='float64') + #get_parameter(self, param_name, msg='', obj=False) + spc_id, unused_options = subcase['SPC'] + if 'SPC' not in subcase: + model.log.warning(f'no spcs...{spc_id}') + model.log.warning(str(subcase)) + return xspc + spc_id, unused_options = subcase['SPC'] + spcs = [] + #for spc in model.spcs: + #if spc.n == 0: + #continue + #spci = spc.slice_card_by_id(spc_id) + #spcs.append(spci) + spcs = [spc.slice_card_by_id(spc_id) for spc in model.spcs + if spc.n > 0] + model.spc1 + #spcs = model.get_reduced_spcs(spc_id, consider_spcadd=True, stop_on_failure=True) + + spc_set = [] + sset = np.zeros(ndof, dtype='bool') + for spc in spcs: + if spc.type == 'SPC1': + #print(spc.get_stats()) + dofs_missed = [] + for comp, (inode0, inode1) in zip(spc.components, spc.inode): + for dof in str(comp): + dofi = int(dof) + spc_nodes = spc.node_id[inode0:inode1] + for nid in spc_nodes: + try: + idof = dof_map[(nid, dofi)] + except Exception: + dofs_missed.append((nid, dofi)) + #print('dof_map =', dof_map) + #print((nid, dofi)) + continue + sset[idof] = True + spc_set.append(idof) + xspc[idof] = 0. + if dofs_missed: + dof_str = ', '.join(str(dofi) for dofi in dofs_missed) + self.log.warning(f'Missing (nid,dof) pairs:') + self.log.warning(f' {dof_str}\n{spc.rstrip()}') + #=({nid},{dofi}) doesn't exist...skipping + del dofs_missed + elif spc.type == 'SPC': + for nid, components, enforcedi in zip(spc.nodes, spc.components, spc.enforced): + for component in components: + dofi = int(component) + idof = dof_map[(nid, dofi)] + sset[idof] = True + spc_set.append(idof) + xspc[idof] = enforcedi + else: + raise NotImplementedError(spc) + spc_set = np.array(spc_set, dtype='int32') + #print('spc_set =', spc_set, xspc) + return spc_set, sset, xspc + + + def build_Fb(self, xg: NDArrayNfloat, sset_b, + dof_map: DOF_MAP, + ndof: int, subcase: Subcase) -> NDArrayNfloat: + model = self.model + model.log.info('starting build_Fb') + + Fb = np.zeros(ndof, dtype='float32') + if 'LOAD' not in subcase: + return Fb + + load_id, unused_options = subcase['LOAD'] + #print('load_id =', load_id) + reduced_loads = model.get_reduced_static_load() + + #loads, scales, is_grav = model.get_reduced_loads( + #load_id, scale=1., consider_load_combinations=True, + #skip_scale_factor0=False, stop_on_failure=True, msg='') + loads = reduced_loads[load_id] + #loads : list[loads] + #a series of load objects + #scale_factors : list[float] + #the associated scale factors + #is_grav : bool + #is there a gravity card + for (scale, load) in loads: + if load.type == 'SLOAD': + #print(load.get_stats()) + for mag, nid in zip(load.mags, load.nodes): + i = dof_map[(nid, 0)] # TODO: wrong... + Fb[i] = mag * scale + elif load.type == 'FORCE': + # TODO: wrong because it doesn't handle SPOINTs + fxyz_myz = load.sum_forces_moments() + fxyz = fxyz_myz[:, :3] + nids = load.node_id + self.log.debug(f' FORCE nids={nids} Fxyz={fxyz}') + for fxyzi, nid in zip(fxyz, nids): + assert len(fxyzi) == 3 + fi = dof_map[(nid, 1)] + Fb[fi:fi+3] = fxyzi + + elif load.type == 'MOMENT': + fxyz_myz = load.sum_forces_moments() + mxyz = fxyz_myz[:, 3:] + nids = load.node_id + self.log.debug(f' MOMENT nid={nids} Mxyz={mxyz}') + for mxyzi, nid in zip(mxyz, nids): + fi = dof_map[(nid, 4)] + assert len(mxyzi) == 3 + Fb[fi:fi+3] = mxyzi + elif load.type == 'SPCD': + for nid, components, enforced in zip(load.nodes, load.components, load.enforced): + for component in components: + dof = int(component) + fi = dof_map[(nid, dof)] + #print(f'(nid, dof) = ({nid}, {dof}) => {fi}') + xg[fi] = enforced + Fb[fi] = np.nan + sset_b[fi] = True + else: + print(load.get_stats()) + raise NotImplementedError(load) + #print(subcase) + model.log.info('end of build_Fb') + return Fb + + def get_mpc_constraints(self, subcase: Subcase, + dof_map: DOF_MAP, + fdtype: str='float64') -> Any: + ndof = len(dof_map) + model = self.model + #Fb = np.zeros(ndof, dtype='float32') + constraints = [] + if 'MPC' not in subcase: + return constraints + + log = model.log + mpc_id, unused_options = subcase['MPC'] + mpcs = model.get_reduced_mpcs(mpc_id, consider_mpcadd=True, stop_on_failure=True) + #print('mpc_id =', mpc_id) + + ieq = 0 + ieqs_list = [] + dofs_list = [] + coefficients_list = [] + dependents_list = [] + independents_list = [] + independents_eq = defaultdict(list) + for element in model.rigid_elements.values(): + #etype = element.type + #ieq += 1 + raise NotImplementedError(element.get_stats()) + + nequations = len(mpcs) + Cmpc = sci_sparse.dok_matrix((nequations, ndof), dtype=fdtype) + for j, mpc in enumerate(mpcs): + mpc_type = mpc.type + if mpc_type == 'MPC': + print(mpc) + # The first degree-of-freedom (G1, C1) in the sequence is defined to + # be the dependent DOF. + # A dependent DOF assigned by one MPC entry cannot be assigned + # dependent by another MPC entry or by a rigid element. + + #: Component number. (Any one of the Integers 1 through 6 for grid + #: points; blank or zero for scalar points.) + #self.components = components + + #: Coefficient. (Real; Default = 0.0 except A1 must be nonzero.) + #self.coefficients = coefficients + + for i, nid, component, coeff in zip(count(), mpc.nodes, mpc.components, mpc.coefficients): + self.log.debug(f'ieq={ieq} (g,c)={(nid, component)} coeff={coeff}') + assert isinstance(component, int), component + idof = dof_map[(nid, component)] + ieqs_list.append(i) + Cmpc[j, idof] = coeff + + dofs_list.append(idof) + coefficients_list.append(coeff) + if i == 0: + dependents_list.append(idof) + else: + independents_list.append(idof) + independents_eq[ieq].append(idof) + else: + raise NotImplementedError(mpc.get_stats()) + ieq += 1 + + #print(f'Cmpc = {Cmpc}') + dofs = np.array(dofs_list, dtype='int32') + ieqs = np.array(ieqs_list, dtype='int32') + independents = np.array(independents_list, dtype='int32') + dependents = np.array(dependents_list, dtype='int32') + independents_dependents = np.hstack([independents, dependents]) + coefficients = np.array(coefficients_list, dtype='float64') + #print(f'ieqs = {ieqs}') + #print(f'dofs = {dofs}') + #print(f'coefficients = {coefficients}') + #print(f'dependents = {dependents}') + #print(f'independents = {independents}') + + #remove_dependents() + udependents = np.unique(dependents) + assert len(udependents) == len(dependents) + uindependents = np.unique(independents) + dofs = independents + dependents + udofs, idofs = np.unique(independents_dependents, return_counts=True) + + while 1: + for udof in uindependents: + n_dof = np.where(udof == udofs)[0][0] + log.debug(f'dof={udof} N={n_dof}') + if n_dof == 1: + break + else: + raise RuntimeError('cannot find a DOF to remove') + #np.where(condition) + + break + #for udof, in udofs: + + assert nequations > 0, 'No MPC equations despite MPCs being found' + + CmpcI = sci_sparse.dok_matrix((nequations, ndof*2), dtype=fdtype) + CmpcI[:, :ndof] = Cmpc + for row in range(nequations): + CmpcI[row, ndof+row] = 1 + log.info(f'CmpcI = {CmpcI}') + + log.info(f'Cmpc = {Cmpc}') + print('rows:') + for row in range(nequations): + dense_row = Cmpc[row, :].toarray() + abs_row = abs_row = np.abs(dense_row) + max_val = abs_row.max() + imax = np.where(dense_row == max_val)[0][0] + if max_val == 0.: + continue + dense_row = CmpcI[row, :].toarray() + for row2 in range(row + 1, nequations): + val = CmpcI[row2, imax] + print(row, row2, imax, val) + CmpcI[row2, :] -= dense_row * (val / max_val) + + print(dense_row) + print(f'max={max_val} imax={imax}') + print(CmpcI) + #self.log.info(constraints) + + #uindependents, nicount = np.unique(independents, return_counts=True) + #print(udofs, idofs) + return constraints + + def run_sol_101(self, subcase: Subcase, + f06_file, + page_stamp: str, title: str='', subtitle: str='', label: str='', + page_num: int=1, + idtype: str='int32', fdtype: str='float64') -> Any: + """ + Runs a SOL 101 + + Analysis (ASET): This set contains all boundary DOFs of the superelement. + It is considered fixed by default. + Fixed Boundary (BSET): This subset of the ASET contains all fixed boundary DOFs. + Free Boundary (CSET): This subset of the ASET contains all free boundary DOFs. + + SOL 101 Sets + ------------ + b = DOFs fixed during component mode analysis or dynamic reduction. + c = DOFs that are free during component mode synthesis or dynamic reduction. + lm = Lagrange multiplier DOFs created by the rigid elements + r = Reference DOFs used to determine free body motion. + l = b + c + lm + t = l + r + q = Generalized DOFs assigned to component modes and residual vectors + a = t + q + #------------------------- + {s} = {sb} + {sg} # done + f = + f = a + o unconstrained (free) structural DOFs + n = f + s all DOFs not constrained by multipoint constraints + ne = n + e all DOFs not constrained by multipoint constraints plus extra DOFs + m = mp + mr all DOFs eliminated by multipoint constraints + g = n + m all DOFs including scalar DOFs + + Per Mystran + ----------- + n = g - m all DOFs not constrained by multipoint constraints + f = n - s unconstrained (free structural DOFs) + a = f - o ananlysis??? set + L = a - r + + My Simplification + ----------------- + g = n + (mp + mr) + g = f + s + (mp + mr) + g = f + (sb + sg) + (mp + mr) + g = (a + o) + (sb + sg) + (mp + mr) + a + o = g - (sb + sg) + (mp + mr) + a = g - o - (sb + sg) + (mp + mr) + + l = b + c # left over dofs + t = l + r # total set of DOFs for super + a = t + q + d = a + e # dynamic analysis + f = a + o + n = f + s + g = n + m + p = g + e + ps = p + sa + pa = ps + k + fe = f + e + ne = n + e + fr = f - q - r + v = o + c + r + + New eqs + f = n - s + a = f - o + t = a - q + l = t - r + b = l - c ??? + c = l - b ??? + """ + self.log.debug(f'run_sol_101') + end_options = [ + 'SEMG', # STIFFNESS AND MASS MATRIX GENERATION STEP + 'SEMR', # MASS MATRIX REDUCTION STEP (INCLUDES EIGENVALUE SOLUTION FOR MODES) + 'SEKR', # STIFFNESS MATRIX REDUCTION STEP + 'SELG', # LOAD MATRIX GENERATION STEP + 'SELR', # LOAD MATRIX REDUCTION STEP + ] + #basic + itime = 0 + ntimes = 1 # static + isubcase = subcase.id + #----------------------------------------------------------------------- + model = self.model + model.setup(run_geom_check=True) + + log = model.log + + dof_map, ps = _get_dof_map(model) + + node_gridtype = _get_node_gridtype(model, idtype=idtype) + ngrid, ndof_per_grid, ndof = get_ndof(model, subcase) + + gset_b = ps_to_sg_set(ndof, ps) + Kgg = build_Kgg(model, dof_map, + ndof, ngrid, ndof_per_grid, + idtype='int32', fdtype=fdtype) + Mbb = build_Mbb(model, subcase, dof_map, ndof, fdtype=fdtype) + #print(self.op2.grid_point_weight) + reference_point, MO = grid_point_weight(model, Mbb, dof_map, ndof) + weight = make_grid_point_weight( + reference_point, MO, + approach_code=1, table_code=13, + title=title, subtitle=subtitle, label=label, + superelement_adaptivity_index='') + self.op2.grid_point_weight[label] = weight + page_num = weight.write_f06(f06_file, page_stamp, page_num) + + self.get_mpc_constraints(subcase, dof_map) + + Mgg = Kbb_to_Kgg(model, Mbb, ngrid, ndof_per_grid, inplace=False) + del Mbb, Mgg + + gset = np.arange(ndof, dtype=idtype) + #gset_b = np.ones(ndof, dtype='bool') + sset, sset_b, xg = self.build_xg(dof_map, ndof, subcase) + #sset_b = np.union1d(sset_b, sset_g) + #print(sset_g) + #print(sset_b) + #print(sset) + Fb = self.build_Fb(xg, sset_b, dof_map, ndof, subcase) + + # Constrained set + # sset = sb_set | sg_set + + # free structural DOFs + #fset = ~sset; # & ~mset + fset_b = gset_b & sset_b # g-b + #self.log.debug(f'gset_b = {gset_b}') + #self.log.debug(f'sset_b = {sset_b}') + #self.log.debug(f'fset_b = {fset_b}') + aset, tset = get_residual_structure(model, dof_map, fset_b) + + asetmap = get_aset(model) + if asetmap: + aset = apply_dof_map_to_set(asetmap, dof_map, idtype=idtype) + else: + aset = np.setdiff1d(gset, sset) # a = g-s + naset = aset.sum() + nsset = sset.sum() + if naset == 0 and nsset == 0: + raise RuntimeError('no residual structure found') + # The a-set and o-set are created in the following ways: + # 1. If only OMITi entries are present, then the o-set consists + # of DOFs listed explicitly on OMITi entries. The remaining + # f-set DOFs are placed in the b-set, which is a subset of + # the a-set. + # 2. If ASETi or QSETi entries are present, then the a-set consists + # of all DOFs listed on ASETi entries and any entries listing its + # subsets, such as QSETi, SUPORTi, CSETi, and BSETi entries. Any + # OMITi entries are redundant. The remaining f-set DOFs are placed + # in the o-set. + # 3. If there are no ASETi, QSETi, or OMITi entries present but there + # are SUPORTi, BSETi, or CSETi entries present, then the entire + # f-set is placed in the a-set and the o-set is not created. + + page_num = write_oload(Fb, dof_map, isubcase, ngrid, ndof_per_grid, + f06_file, page_stamp, page_num, log) + + Fg = Fb + + # aset - analysis set + # sset - SPC set + #print('aset = ', aset) + #print('sset = ', sset) + + # u1 = Kaa^-1 * (F1k - Kas*u2k) + abs_xg = np.abs(xg) + finite_xg = np.any(np.isfinite(abs_xg)) + if finite_xg and np.nanmax(abs_xg) > 0.: + self.log.info(f'SPCD found') + self.log.info(f' xg = {xg}') + self.log.info(f' Fg = {Fg}') + set0 = xg == 0. + set0_ = np.where(set0) + + #sset = np.array([]) + sset = np.setdiff1d(sset, set0_) + #self.log.info(f' set0 = {set0}') + self.log.info(f' set0_ = {set0_}') + self.log.info(f' aset = {aset}') + self.log.info(f' sset = {sset}') + + #self.log.info(f' Fa_solve = {Fa_solve}') + #Fa = Fa.reshape(len(Fa), 1) + #xa = Fa.reshape(len(xa), 1) + #print(f'Fa.shape = {Fa.shape}') + #print(f'xa.shape = {xa.shape}') + #print(f'Kas.shape = {Kas.shape}') + #Kas_xa = Ksa @ xa + #print(f'Kas @ xa.shape = {Kas_xa.shape}') + #Fa_solve = Fa - Kas @ xa + #Kspc = Kas[ixa, :][:, ixa] + #print(Kspc.shape) + else: + set0 = sset + sset = [] + #self.sset = sset + self.sset = sset + self.set0 = set0 + self.aset = aset + + Fa, Fs = partition_vector2(Fb, [['a', aset], ['s', sset]]) + del Fb, Fs + + xa, xs, x0 = partition_vector3(xg, [['a', aset], ['s', sset], ['0', set0]]) + #self.log.info(f'xg = {xg}') + del xg + #self.log.info(f'xa = {xa}') + #self.log.info(f'xs = {xs}') + #self.log.info(f'x0 = {x0}') + del x0 + + #print(Kgg) + self.Kgg = Kgg.toarray() + K = partition_matrix(Kgg, [('a', aset), ('s', sset), ('0', set0)]) + Kaa = K['aa'] + Kss = K['ss'] + #Kas = K['as'] + Ksa = K['sa'] + #Ks = partition_matrix(Kggs, [['a', aset], ['s', sset], ['0', set0]]) + #Kaas = Ks['aa'] + #assert Kaa.shape == Kaas.shape + + self.Kaa = Kaa + + #M = partition_matrix(Mgg, [['a', aset], ['s', sset]]) + #Maa = M['aa'] + #Kss = K['ss'] + Kas = K['as'] + Ksa = K['sa'] + K0a = K['0a'] + K0s = K['0s'] + #self.Maa = Maa + #[Kaa]{xa} + [Kas]{xs} = {Fa} + #[Ksa]{xa} + [Kss]{xs} = {Fs} + + #{xa} = [Kaa]^-1 * ({Fa} - [Kas]{xs}) + #{Fs} = [Ksa]{xa} + [Kss]{xs} + + #print(Kaa) + #print(Kas) + #print(Kss) + Fa_solve = Fa + is_sset = len(sset) + is_set0 = len(set0) + is_aset = len(aset) + if is_sset: + Fa_solve = Fa - Kas @ xs + self.log.info(f' Fa_solve = {Fa_solve}') + + Fg_oload = Fg.copy() + if is_aset: + xa_, ipositive, inegative = solve(Kaa, Fa_solve, aset, log, idtype=idtype) + Fa_ = Fa[ipositive] + + log.info(f'aset_ = {ipositive}') + log.info(f'xa_ = {xa_}') + log.info(f'Fa_ = {Fa_}') + + xa[ipositive] = xa_ + xa[inegative] = 0. + self.xa_ = xa_ + self.Fa_ = Fa_ + else: + self.log.warning('A-set is empty; all DOFs are constrained') + self.xa_ = [] + self.Fa_ = [] + + + xg = np.full(ndof, np.nan, dtype=fdtype) + #print('aset =', aset) + #print('sset =', sset) + #print('set0 =', set0) + xg[aset] = xa + xg[sset] = xs + xg[set0] = 0. + Fg[aset] = Fa + #print(xg) + self.xg = xg + self.Fg = Fg + + fspc = np.full(ndof, 0., dtype=fdtype) + + if is_sset: + log.info(f'fspc_s recovery') + log.debug(f' aset = {aset}') + log.debug(f' sset = {sset}') + log.debug(f' xa = {xa}') + log.debug(f' xs = {xs}') + fspc_s = Ksa @ xa + Kss @ xs + log.debug(f' fspc_s = {fspc_s}') + log.debug(f' Ksa @ xa = {Ksa @ xa}') + log.debug(f' Kss @ xs = {Kss @ xs}') + log.debug(f' sum = {Ksa @ xa + Kss @ xs}') + Fg[sset] = fspc_s + fspc[sset] = fspc_s + if is_set0: + log.info(f'fspc_0 recovery') + fspc_0 = K0a @ xa + K0s @ xs + log.debug(f' fspc_0 = {fspc_0}') + Fg[set0] = fspc_0 + fspc[set0] = fspc_0 + + log.debug(f'xa = {xa}') + log.debug(f'xs = {xs}') + log.debug(f'fspc = {fspc}') + + xb = xg_to_xb(model, xg, ngrid, ndof_per_grid) + Fb = xg_to_xb(model, Fg, ngrid, ndof_per_grid) + + #log.debug(f'Fs = {Fs}') + #log.debug(f'Fb = {Fb}') + #log.debug(f'xb = {xb}') + + self._save_displacment( + f06_file, + subcase, itime, ntimes, + node_gridtype, xg, + ngrid, ndof_per_grid, + title=title, subtitle=subtitle, label=label, + fdtype=fdtype, page_num=page_num, page_stamp=page_stamp) + + self._save_applied_load( + f06_file, + subcase, itime, ntimes, + node_gridtype, Fg_oload, + ngrid, ndof_per_grid, + title=title, subtitle=subtitle, label=label, + fdtype=fdtype, page_num=page_num, page_stamp=page_stamp) + + self._save_spc_forces( + f06_file, + subcase, itime, ntimes, + node_gridtype, fspc, + ngrid, ndof_per_grid, + title=title, subtitle=subtitle, label=label, + fdtype=fdtype, page_num=page_num, page_stamp=page_stamp) + + op2 = self.op2 + page_stamp += '\n' + if 'FORCE' in subcase: + recover_force_101(f06_file, op2, self.model, dof_map, subcase, xb, + title=title, subtitle=subtitle, label=label, + page_stamp=page_stamp) + + #if 'STRAIN' in subcase: + #recover_strain_101(f06_file, op2, self.model, dof_map, subcase, xb, + #title=title, subtitle=subtitle, label=label, + #page_stamp=page_stamp) + #if 'STRESS' in subcase: + #recover_stress_101(f06_file, op2, self.model, dof_map, subcase, xb, + #title=title, subtitle=subtitle, label=label, + #page_stamp=page_stamp) + #if 'ESE' in subcase: + #recover_strain_energy_101(f06_file, op2, self.model, dof_map, subcase, xb, + #title=title, subtitle=subtitle, label=label, + #page_stamp=page_stamp) + #Fg[sz_set] = -1 + #xg[sz_set] = -1 + op2.write_op2(self.op2_filename, post=-1, endian=b'<', skips=None, nastran_format='nx') + self.log.info('finished') + return end_options + + def _save_displacment(self, f06_file, + subcase: Subcase, itime: int, ntimes: int, + node_gridtype: NDArrayN2int, + xg: NDArrayNfloat, + ngrid: int, ndof_per_grid: int, + title: str='', subtitle: str='', label: str='', + fdtype: str='float32', + page_num: int=1, page_stamp: str='PAGE %s') -> int: + f06_request_name = 'DISPLACEMENT' + table_name = 'OUGV1' + #self.log.debug(f'xg = {xg}') + page_num = self._save_static_table( + f06_file, + subcase, itime, ntimes, + node_gridtype, xg, + RealDisplacementArray, f06_request_name, table_name, self.op2.displacements, + ngrid, ndof_per_grid, + title=title, subtitle=subtitle, label=label, + fdtype=fdtype, page_num=page_num, page_stamp=page_stamp) + return page_num + + def _save_spc_forces(self, f06_file, + subcase: Subcase, itime: int, ntimes: int, + node_gridtype: NDArrayN2int, fspc: NDArrayNfloat, + ngrid: int, ndof_per_grid: int, + title: str='', subtitle: str='', label: str='', + fdtype: str='float32', + page_num: int=1, page_stamp: str='PAGE %s') -> int: + f06_request_name = 'SPCFORCES' + table_name = 'OQG1' + #self.log.debug(f'Fg = {Fg}') + page_num = self._save_static_table( + f06_file, + subcase, itime, ntimes, + node_gridtype, fspc, + RealSPCForcesArray, f06_request_name, table_name, self.op2.spc_forces, + ngrid, ndof_per_grid, + title=title, subtitle=subtitle, label=label, + fdtype=fdtype, page_num=page_num, page_stamp=page_stamp) + return page_num + + def _save_applied_load(self, f06_file, + subcase: Subcase, itime: int, ntimes: int, + node_gridtype: NDArrayN2int, + Fg: NDArrayNfloat, + ngrid: int, ndof_per_grid: int, + title: str='', subtitle: str='', label: str='', + fdtype: str='float32', + page_num: int=1, page_stamp: str='PAGE %s') -> int: + f06_request_name = 'OLOAD' + table_name = 'OPG1' + #self.log.debug(f'Fg = {Fg}') + page_num = self._save_static_table( + f06_file, + subcase, itime, ntimes, + node_gridtype, Fg, + RealLoadVectorArray, f06_request_name, table_name, self.op2.load_vectors, + ngrid, ndof_per_grid, + title=title, subtitle=subtitle, label=label, + fdtype=fdtype, page_num=page_num, page_stamp=page_stamp) + return page_num + + def _recast_data(self, idtype: str, fdtype: str): + idtype = 'int32' + fdtype = 'float32' + return idtype, fdtype + + def _save_static_table(self, f06_file, + subcase: Subcase, itime: int, ntimes: int, + node_gridtype: NDArrayN2int, Fg: NDArrayNfloat, + obj: Union[RealSPCForcesArray], + f06_request_name: str, + table_name: str, slot: dict[Any, RealSPCForcesArray], + ngrid: int, ndof_per_grid: int, + title: str='', subtitle: str='', label: str='', + idtype: str='int32', fdtype: str='float32', + page_num: int=1, page_stamp: str='PAGE %s') -> int: + idtype, fdtype = self._recast_data(idtype, fdtype) + isubcase = subcase.id + #self.log.debug(f'saving {f06_request_name} -> {table_name}') + unused_nids_write, write_f06, write_op2, quick_return = get_plot_request( + subcase, f06_request_name) + if quick_return: + return page_num + nnodes = node_gridtype.shape[0] + data = np.zeros((ntimes, nnodes, 6), dtype=fdtype) + ngrid_dofs = ngrid * ndof_per_grid + if ndof_per_grid == 6: + _fgi = Fg[:ngrid_dofs].reshape(ngrid, ndof_per_grid) + data[itime, :ngrid, :] = _fgi + else: + raise NotImplementedError(ndof_per_grid) + data[itime, ngrid:, 0] = Fg[ngrid_dofs:] + + spc_forces = obj.add_static_case( + table_name, node_gridtype, data, isubcase, + is_sort1=True, is_random=False, is_msc=True, + random_code=0, title=title, subtitle=subtitle, label=label) + if write_f06: + page_num = spc_forces.write_f06( + f06_file, header=None, + page_stamp=page_stamp, page_num=page_num, + is_mag_phase=False, is_sort1=True) + f06_file.write('\n') + if write_op2: + slot[isubcase] = spc_forces + return page_num + + def run_sol_103(self, subcase: Subcase, f06_file, + page_stamp: str, title: str='', subtitle: str='', label: str='', + page_num: int=1, + idtype: str='int32', fdtype: str='float64'): + """ + [M]{xdd} + [C]{xd} + [K]{x} = {F} + [M]{xdd} + [K]{x} = {F} + -[M]{xdd}λ^2 + [K]{x} = {0} + {X}(λ^2 - [M]^-1[K]) = {0} + λ^2 - [M]^-1[K] = {0} + λ^2 = [M]^-1[K] + [A][X] = [X]λ^2 + """ + self.log.debug(f'run_sol_101') + end_options = [ + 'SEMR', # MASS MATRIX REDUCTION STEP (INCLUDES EIGENVALUE SOLUTION FOR MODES) + 'SEKR', # STIFFNESS MATRIX REDUCTION STEP + ] + model = self.model + op2 = self.op2 + #log = model.log + #write_f06 = True + + dof_map, ps = _get_dof_map(model) + node_gridtype = _get_node_gridtype(model, idtype=idtype) + ngrid, ndof_per_grid, ndof = get_ndof(self.model, subcase) + Kgg = build_Kgg(model, dof_map, + ndof, ngrid, + ndof_per_grid, + idtype='int32', fdtype=fdtype) + + Mbb = build_Mbb(model, subcase, dof_map, ndof, fdtype=fdtype) + + Mgg = Kbb_to_Kgg(model, Mbb, ngrid, ndof_per_grid) + del Mbb + + gset = np.arange(ndof, dtype=idtype) + sset, sset_b, xg = self.build_xg(dof_map, ndof, subcase) + aset = np.setdiff1d(gset, sset) # a = g-s + + # aset - analysis set + # sset - SPC set + xa, xs = partition_vector2(xg, [['a', aset], ['s', sset]]) + del xg + #print(f'xa = {xa}') + #print(f'xs = {xs}') + #print(Kgg) + M = partition_matrix(Mgg, [['a', aset], ['s', sset]]) + Maa = M['aa'] + + K = partition_matrix(Kgg, [['a', aset], ['s', sset]]) + Kaa = K['aa'] + #Kss = K['ss'] + #Kas = K['as'] + #Ksa = K['sa'] + + #[Kaa]{xa} + [Kas]{xs} = {Fa} + #[Ksa]{xa} + [Kss]{xs} = {Fs} + + #{xa} = [Kaa]^-1 * ({Fa} - [Kas]{xs}) + #{Fs} = [Ksa]{xa} + [Kss]{xs} + + # TODO: apply AUTOSPCs correctly + #print(Kaa) + #print(Kas) + #print(Kss) + #Maa_, ipositive, inegative, unused_sz_set = remove_rows(Maa, aset) + Kaa_, ipositive, unused_inegative, unused_sz_set = remove_rows(Kaa, aset) + Maa_ = Maa[ipositive, :][:, ipositive] + #Fs = np.zeros(ndof, dtype=fdtype) + #print(f'Fg = {Fg}') + #print(f'Fa = {Fa}') + #print(f'Fs = {Fs}') + #Fa_ = Fa[ipositive] + # [A]{x} = {b} + # [Kaa]{x} = {F} + # {x} = [Kaa][F] + #print(f'Kaa:\n{Kaa}') + #print(f'Fa: {Fa}') + + #print(f'Kaa_:\n{Kaa_}') + #print(f'Fa_: {Fa_}') + #na = Kaa_.shape[0] + ndof_ = Kaa_.shape[0] + neigenvalues = 10 + if ndof_ < neigenvalues: + eigenvalues, xa_ = sp.linalg.eigh(Kaa_.toarray(), Maa_) + else: + #If M is specified, solves ``A * x[i] = w[i] * M * x[i]`` + eigenvalues, xa_ = sp.sparse.linalg.eigsh( + Kaa_, k=neigenvalues, M=Maa_, + sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, + return_eigenvectors=True, Minv=None, OPinv=None, mode='normal') + nmodes = len(eigenvalues) + model.log.debug(f'eigenvalues = {eigenvalues}') + #print(f'xa_ = {xa_} {xa_.shape}') + #xa2 = xa_.reshape(nmodes, na, na) + + xg_out = np.full((nmodes, ndof), np.nan, dtype=fdtype) + xa_out = np.full((nmodes, len(xa)), np.nan, dtype=fdtype) + #xa[ipositive] = xa_ + xa_out[:, ipositive] = xa_ + xg = np.arange(ndof, dtype=fdtype) + #xg[aset] = xa + #xg[sset] = xs + xg_out[:, aset] = xa + xg_out[:, sset] = xs + + isubcase = subcase.id + mode_cycles = eigenvalues + unused_eigenvalues = RealEigenvalues(title, 'LAMA', nmodes=0) + #op2.eigenvalues[title] = eigenvalues + + nnodes = node_gridtype.shape[0] + data = xg_out.reshape((nmodes, nnodes, 6)) + table_name = 'OUGV1' + modes = np.arange(1, nmodes + 1, dtype=idtype) + unused_eigenvectors = RealDisplacementArray.add_modal_case( + table_name, node_gridtype, data, isubcase, modes, eigenvalues, mode_cycles, + is_sort1=True, is_random=False, is_msc=True, random_code=0, + title=title, subtitle=subtitle, label=label) + #op2.eigenvectors[isubcase] = eigenvectors + + write_f06 = True + if write_f06: + str(page_num) + #page_num = eigenvectors.write_f06( + #f06_file, header=None, + #page_stamp=page_stamp, page_num=page_num, + #is_mag_phase=False, is_sort1=True) + #f06_file.write('\n') + #fspc = Ksa @ xa + Kss @ xs + #Fs[ipositive] = Fsi + + #Fg[aset] = Fa + #Fg[sset] = fspc + #log.debug(f'xa = {xa}') + #log.debug(f'Fs = {Fs}') + #log.debug(f'xg = {xg}') + #log.debug(f'Fg = {Fg}') + + str(f06_file) + str(page_stamp) + op2.write_op2(self.op2_filename, post=-1, endian=b'<', skips=None, nastran_format='nx') + return end_options + #raise NotImplementedError(subcase) + + def run_sol_111(self, subcase: Subcase, f06_file, + idtype: str='int32', fdtype: str='float64'): + """ + frequency + [M]{xdd} + [C]{xd} + [K]{x} = {F} + {φ} ([M]s^2 + [C]{s} + [K]) = {F} + {φ} ([M]s^2 + [C]{s} + [K]) = {F} + {φ} ([M]s^2 + [K]) = {F} + + {d}*sin(wt) ([M]s^2 + [K]) = {0} + [M]{xdd} + [K]{x} = {0} + x = A*sin(wt) + xd = dx/dt = A*w*cos(wt) + xdd = -A*w^2 * sin(wt) + -w^2[M]{A}*sin(wt) + [K]{A}*sin(wt) = {0} + (-w^2[M] + [K]){A} = {0} + -w^2[M] + [K] = 0 + w^2 = [M]^-1 * [K] + + -w^2[M]{A}*sin(wt) + [K]{A}*sin(wt) = {F}sin(wt) + (-w^2[M] + [K]){A} = {F} + {A} = (-w^2[M] + [K])^-1 * {F} + {A} = ([K] - w^2[M])^-1 * {F} + + {φ}^T[M]{φ}{qdd} + {φ}^T[B]{φ}{qd} {φ}^T[K]{φ}{q} = {φ}^T{P(t)} + {qdd} + 2*zeta*w_n{qd} w_n^2{q} = 1/M_n {φ}^T{P(t)} + {u_total} = {u_dynamic} + {u_static} = [PHI]{q} + {u_static} + + """ + str(subcase) + str(f06_file) + str(fdtype) + str(idtype) + + +def partition_matrix(matrix, sets: list[tuple[str, np.ndarray]]) -> dict[tuple[str, str], NDArrayNNfloat]: + """partitions a matrix""" + matrices = {} + for aname, aset in sets: + for bname, bset in sets: + matrices[aname + bname] = matrix[aset, :][:, bset] + return matrices + +def partition_vector(vector, sets, fdtype: str='float64') -> list[NDArrayNfloat]: # pragma: no cover + """partitions a vector""" + vectors = [] + for unused_aname, aset in sets: + if len(aset) == 0: + vectors.append(np.array([], dtype=fdtype)) + continue + vectori = vector[aset] + vectors.append(vectori) + return vectors + +def partition_vector2(vector, sets, + fdtype: str='float64') -> tuple[NDArrayNfloat, NDArrayNfloat]: + """partitions a vector""" + assert len(sets) == 2, sets + #vectors = partition_vector(vector, sets, fdtype=fdtype) + #return tuple(vectors) + (unused_name0, set0), (unused_name1, set1) = sets + vectors = (vector[set0], vector[set1]) + return vectors + +def partition_vector3(vector, sets, + fdtype: str='float64') -> tuple[NDArrayNfloat, NDArrayNfloat, NDArrayNfloat]: + """partitions a vector""" + assert len(sets) == 3, sets + #vectors = partition_vector(vector, sets, fdtype=fdtype) + (unused_name0, set0), (unused_name1, set1), (unused_name2, set2) = sets + vectors = (vector[set0], vector[set1], vector[set2]) + return tuple(vectors) + +def remove_rows(Kgg: NDArrayNNfloat, aset: NDArrayNint, idtype='int32') -> NDArrayNNfloat: + """ + Applies AUTOSPC to the model (sz) + + mp DOFs eliminated by multipoint constraints. + mr DOFs eliminated by multipoint constraints created by the rigid + elements using the LGELIM method on the Case Control command RIGID. + sb* DOFs eliminated by single-point constraints that are included + in boundary condition changes and by the AUTOSPC feature. + (See the sz set) + sg* DOFs eliminated by single-point constraints that are specified + on the PS field on GRID Bulk Data entries. + sz DOFs eliminated by the AUTOSPC feature. + o DOFs omitted by structural matrix partitioning. + q Generalized DOFs assigned to component modes and residual vectors. + r Reference DOFs used to determine free body motion. + c DOFs that are free during component mode synthesis or dynamic reduction. + b DOFs fixed during component mode analysis or dynamic reduction. + lm Lagrange multiplier DOFs created by the rigid elements + using the LAGR method on the Case Control command, RIGID. + e Extra DOFs introduced in dynamic analysis. + sa Permanently constrained aerodynamic DOFs. + k Aerodynamic mesh point set for forces and displacements on the aero mesh. + j Aerodynamic mesh collocation point set (exact physical + interpretation is dependent on the aerodynamic theory). + + + Input Sets + ---------- + mp DOFs eliminated by multipoint constraints. + mr DOFs eliminated by multipoint constraints created by the rigid + elements using the LGELIM method on the Case Control command RIGID. + g all DOFs including scalar DOFs + o Omit set + s SPC sets (sb + sg) + + Source of Sets + -------------- + g 6-DOFs per GRID, 1-DOF SPOINTs, + Nmodes-DOF EPOINTs (Extra Dynamic/Modal Point; ), + nRBE-DOF LPOINTs ("Lagrange" Point; e.g., RBE3), + nHarmonic-DOF HPOINTs (Harmonic Point; e.g., RINGFL) + m MPC, MPCADD, RBAR/RBAR1, RBE1/RBE2/RBE3, RROD, RSPLINE, + RJOINT, RSSCON, RTRPLT/RTRPLT1, GMBC, GMSPC* + r SUPORT/SUPORT1, SUPAX + s SPC/SPC1, SPCADD, BNDGRID, AUTOSPC + sb SPC/SPC1, SPCADD, SPCAX, FLSYM, GMSPC*, BNDGRID, PARAM,AUTOSPC,YES + sz PARAM,AUTOSPC,YES + sg GRID, GRIDB, GRDSET (PS field) + o OMIT/OMIT1, GRID (SEID field), SESET + q QSET/QSET1 + sa CAEROi + a ASET/ASET1, CSUPEXT, superelement exterior DOFs + c CSET/CSET1, BNDREE/BNDFRE1 + b BSET/BSET1, BNDFIX/BNDFIX1 + ap ACCSPT + rv RVDOF/RVDOF1 + u1-u6 USET/USET1, SEUSET/SEUSET1 + + * placed in the "m" set only if constraints are not specified in + the basic coordinate system + + + Set Name Description + -------- ----------- + s = sb + sg all DOFs eliminated by single point constraints + l = b + c + lm the DOFs remaining after the reference DOFs are removed (DOF left over) + t = l + r the total set of physical boundary DOF for superelements + a = t + q the analysis set used in eigensolution + d = a + e the set used in dynamic analysis by the direct method + f = a + o unconstrained (free) structural DOFs + fe = f + e free DOFs plus extra DOFs + n = f + s all DOFs not constrained by multipoint constraints + ne = n + e all DOFs not constrained by multipoint constraints plus extra DOFs + m = mp + mr all DOFs eliminated by multipoint constraints + g = n + m all DOFs including scalar DOFs + p = g + e all physical DOFs including extra point DOFs + ks = k + sa the union of k and the re-used s-set (6 dof per grid) + js = j + sa the union of j and the re-used s-set (6 dof per grid) + fr = o + l statically independent set minus the statically determinate supports (fr = f – q – r) + v = o + c + r the set free to vibrate in dynamic reduction and component mode synthesis + al = a – lm a-set without Lagrange multiplier DOFs + dl = d – lm d-set without Lagrange multiplier DOFs + gl = g – lm g-set without Lagrange multiplier DOFs + ll = l – lm l-set without Lagrange multiplier DOFs + nf = ne – lm ne-set without Lagrange multiplier DOFs + pl = p – lm p-set without Lagrange multiplier DOFs + tl = t – lm t-set without Lagrange multiplier DOFs + nl = n – lm n-set without Lagrange multiplier DOFs + fl = f – lm f-set without Lagrange multiplier DOFs + ff = fe – lm fe-set without Lagrange multiplier DOFs + + + Per MSC Nastran + --------------- + js = j + sa the union of j and the re-used s-set (6 dof per grid) + ks = k + sa the union of k and the re-used s-set (6 dof per grid) + + [K]{x} = {F} + a - active + s - SPC + [Kaa Kas]{xa} = {Fa} + [Ksa Kss]{xs} {Fs} + """ + abs_kgg = np.abs(Kgg) + col_kgg = abs_kgg.max(axis=0) + row_kgg = abs_kgg.max(axis=1) + #print(abs_kgg) + #print(col_kgg) + #print(row_kgg) + #izero = np.where((col_kgg == 0.) & (row_kgg == 0))[0] + if isinstance(Kgg, np.ndarray): + ipositive = np.where((col_kgg > 0.) | (row_kgg > 0))[0] + elif isinstance(Kgg, (sci_sparse.csc.csc_matrix, sci_sparse.lil.lil_matrix)): + ipositive1 = col_kgg.todense().nonzero()[1] + ipositive2 = row_kgg.todense().T.nonzero()[1] + ipositive = np.union1d(ipositive1, ipositive2) + if isinstance(Kgg, sci_sparse.lil.lil_matrix): + Kgg = Kgg.tocsc() + else: + raise NotImplementedError(type(Kgg)) + all_rows = np.arange(Kgg.shape[0], dtype=idtype) + inegative = np.setdiff1d(all_rows, ipositive) + apositive = aset[ipositive] + sz_set = np.setdiff1d(aset, apositive) + Kaa = Kgg[ipositive, :][:, ipositive] + return Kaa, ipositive, inegative, sz_set + +def guyan_reduction(matrix, set1, set2): + """ + https://en.wikipedia.org/wiki/Guyan_reduction + + [A11 A12]{d1} = {F1} + [A21 A22]{d2} = {0} + [A11]{d1} + [A12]{d2} = {F} + [A21]{d1} + [A22]{d2} = {0} + {d2} = -[A22]^-1[A21]{d1} + [A11]{d1} + [A12](-[A22]^-1[A21]{d1}) = {F} + ([A11] + ([A12]-[A22]^-1[A21]){d1} = {F} + + (AB)^-1 = B^-1 A^-1 + (cA)^-1 = A^-1/c + (A)^-n = (A^-1)^n + (A^-1)^T = (A^T)^-1 + + https://math.stackexchange.com/questions/17776/inverse-of-the-sum-of-matrices + (A + B)^-1 = A^-1 - 1/(1+g)A^-1 B A^-1; g=trace(B A^-1) where g != -1 + = A^-1 - A^-1*B*(A+B^-1) + (A + B)^-1 = A^-1 + X + (A^-1 + X)(A + B) = I + A^-1 A + X A + A^-1 B + X B = I + X(A + B) = A^-1 B + X = -A^-1 B (A^-1 + B)^-1 + X = -A^-1 B (A^-1 + X) + (I + A^-1 B) X = A^-1 B A^-1 + X = -(I + A^-1 B)^-1 (A^-1 B A^-1) + """ + sets = [ + ['1', set1], + ['2', set2], + ] + A = partition_matrix(matrix, sets) + A11 = A['11'] + A22 = A['22'] + #A12 = A['12'] + A21 = A['21'] + nA11 = A11.shape[0] + + # ([A11] + ([A12]-[A22]^-1[A21]){d1} = {F} + A22m1 = np.linalg.inv(A22) + #A12_A22m1_A21 = np.linalg.multi_dot([A11, A22m1, A21]) + T = np.vstack([np.eye(nA11), A22m1 @ A21]) + return np.linalg.multi_dot([T.T, A, T]) + #return A11 + A12_A22m1_A21 + +def _get_node_gridtype(model: BDF, idtype: str='int32') -> NDArrayN2int: + """ + Helper method for results post-processing + + Point type (per NX 10; OUG table; p.5-663): + =1, GRID Point + =2, Scalar Point + =3, Extra Point + =4, Modal + =5, p-elements, 0-DOF + -6, p-elements, number of DOF + + """ + node_gridtype = [] + if model.grid.n: + nodes = model.grid.node_id + ones = np.ones(model.grid.n, nodes.dtype) + node_gridtype.append(np.column_stack([nodes, ones])) + + if model.spoint.n: + #raise NotImplementedError() + spoint_id = model.spoint.spoint_id + spoint_id.sort() + twos = np.ones(model.spoint.n, nodes.dtype) * 2 + node_gridtype.append(np.column_stack([spoint_id, twos])) + + assert len(node_gridtype) > 0 + #print(node_gridtype) + node_gridtype_array = np.vstack(node_gridtype) + return node_gridtype_array + +def get_aset(model: BDF) -> set[tuple[int, int]]: + aset_map = set() + return aset_map + for aset in model.asets: + if aset.type == 'ASET1': + comp = aset.components + for nid in aset.ids: + for compi in comp: + aset_map.add((nid, int(compi))) + elif aset.type == 'ASET': + for nid, comp in zip(aset.ids, aset.components): + for compi in comp: + aset_map.add((nid, int(compi))) + else: + raise NotImplementedError(aset) + return aset_map + +def get_bset(model: BDF) -> set[tuple[int, int]]: + """creates the b-set""" + bset_map = set() + return bset_map + for bset in model.bsets: + if bset.type == 'BSET1': + comp = bset.components + for nid in bset.ids: + for compi in comp: + bset_map.add((nid, int(compi))) + elif bset.type == 'BSET': + for nid, comp in zip(bset.ids, bset.components): + for compi in comp: + bset_map.add((nid, int(compi))) + else: + raise NotImplementedError(bset) + return bset_map + +def get_cset(model: BDF) -> set[tuple[int, int]]: + """creates the c-set""" + cset_map = set() + return cset_map + for cset in model.csets: + if cset.type == 'CSET1': + comp = cset.components + for nid in cset.ids: + for compi in comp: + cset_map.add((nid, int(compi))) + elif cset.type == 'CSET': + for nid, comp in zip(cset.ids, cset.components): + for compi in comp: + cset_map.add((nid, int(compi))) + else: + raise NotImplementedError(cset) + return cset_map + +def get_omit_set(model: BDF) -> set[tuple[int, int]]: + """creates the o-set""" + omit_set_map = set() + return omit_set_map + for omit in model.omits: + if omit.type == 'OMIT1': + comp = omit.components + for nid in omit.ids: + for compi in comp: + omit_set_map.add((nid, int(compi))) + elif omit.type == 'OMIT': + for nid, comp in zip(omit.ids, omit.components): + for compi in comp: + omit_set_map.add((nid, int(compi))) + else: + raise NotImplementedError(omit) + return omit_set_map + +def get_rset(model: BDF) -> set[tuple[int, int]]: + """creates the r-set""" + rset_map = set() + return rset_map + for rset in model.suport: + for nid, comp in zip(rset.ids, rset.components): + for compi in comp: + rset_map.add((nid, int(compi))) + + for suport in model.suport1: + comp = suport.components + for nid in suport.ids: + for compi in comp: + rset_map.add((nid, int(compi))) + return rset_map + +def get_qset(model: BDF) -> set[tuple[int, int]]: + """creates the q-set""" + qset_map = set() + return qset_map + for qset in model.qsets: + if qset.type == 'QSET1': + comp = qset.components + for nid in qset.ids: + for compi in comp: + qset_map.add((nid, int(compi))) + elif qset.type == 'QSET': + for nid, comp in zip(qset.ids, qset.components): + for compi in comp: + qset_map.add((nid, int(compi))) + else: + raise NotImplementedError(qset) + return qset_map + +def get_residual_structure(model: BDF, dof_map: DOF_MAP, + fset: NDArrayNbool, idtype: str='int32') -> NDArrayNbool: + """gets the residual structure dofs""" + asetmap = get_aset(model) + bsetmap = get_bset(model) + csetmap = get_cset(model) + rsetmap = get_rset(model) + qsetmap = get_qset(model) + osetmap = get_omit_set(model) + aset = apply_dof_map_to_set(asetmap, dof_map, idtype=idtype, use_ints=False) + bset = apply_dof_map_to_set(bsetmap, dof_map, idtype=idtype, use_ints=False) + cset = apply_dof_map_to_set(csetmap, dof_map, idtype=idtype, use_ints=False) + rset = apply_dof_map_to_set(rsetmap, dof_map, idtype=idtype, use_ints=False) + oset = apply_dof_map_to_set(osetmap, dof_map, idtype=idtype, use_ints=False) + qset = apply_dof_map_to_set(qsetmap, dof_map, idtype=idtype, use_ints=False) + + #ndof = len(dof_map) + #print('aset =', aset) + #print('qset =', qset) + #print('fset =', fset) + #print('oset =', oset) + + aqo_set = np.vstack([aset, qset, oset]) + bcr_set = np.vstack([bset, cset, rset]) + # The a-set and o-set are created in the following ways: + # 1. If only OMITi entries are present, then the o-set consists + # of degrees-of-freedom listed explicitly on OMITi entries. + # The remaining f-set degrees-of-freedom are placed in the + # b-set, which is a subset of the a-set. + if np.any(oset) and ~np.any([aset, bset, cset, qset, rset]): + # b = f - o + bset = np.setdiff1d(fset, oset) + aset = bset + # 2. If ASETi or QSETi entries are present, then the a-set + # consists of all degrees-of-freedom listed on ASETi entries + # and any entries listing its subsets, such as QSETi, SUPORTi + # CSETi, and BSETi entries. Any OMITi entries are redundant. + # The remaining f-set degrees-of-freedom are placed in the + # o-set. + elif np.any([aset, qset]): + abcqr_set = np.vstack([aset, bset, cset, qset, rset]) + #print(abcqr_set) + aset = np.sum(abcqr_set, axis=0).astype('bool') # dim=2 -> row + if np.any(oset): # check no overlap with O set + ao_set = aset | oset + if np.any(ao_set): + raise RuntimeError('OMITi entries cannot overlap with ASETi entries ' + 'or any ASET subsets, such as QSETi, ' + 'SUPORTi, CSETi, and BSETi entries.') + + oset = fset & ~aset # assign remaining to O set + # 3. If there are no ASETi, QSETi, or OMITi entries present but + # there are SUPORTi, BSETi, or CSETi entries present, then + # the entire f-set is placed in the a-set and the o-set is + # not created. + elif (not np.any(aqo_set) and np.any(bcr_set)): + aset = fset + # 4. There must be at least one explicit ASETi, QSETi, or OMITi + # entry for the o-set to exist, even if the ASETi, QSETi, or + # OMITi entry is redundant. (related to item 3) + else: + # No model reduction - same as previous option + aset = fset + # Add ASET to residual structure TSET + tset = aset & ~qset + + # Exclusive Degrees-of-freedom sets + # --------------------------------- + # m # ([nGdof,1] logical) Degrees-of-freedom eliminated by multiple constraints + # sb # ([nGdof,numSID] logical) Degrees-of-freedom eliminated by single-point constraints that are included in boundary conditions + # sg # ([nGdof,1] logical) Degrees-of-freedom eliminated by single-point constraints that are specified on the PS field on node entries + # o # ([nGdof,1] logical) Degrees-of-freedom omitted by structural matrix partitioning + # q # ([nGdof,1] logical) Generalized degrees-of-freedom for dynamic reduction or component mode synthesis + # r # ([nGdof,1] logical) Reference degrees-of-freedom used to determine free body motion + # c # ([nGdof,1] logical) Degrees-of-freedom that are free during component mode synthesis or dynamic reduction + # b # ([nGdof,1] logical) Degrees-of-freedom fixed during component mode analysis or dynamic reduction + # e # ([nGdof,1] logical) extra degrees-of-freedom introduced in dynamic analysis + # sa # Permanently constrained aerodynamic degrees-of-freedom + # k # Aerodynamic degrees-of-freedom + + # Nonexclusive Degrees-of-freedom sets + # ------------------------------------ + # s # ([nGdof,numSID] logical) [sb + sg] Degrees-of-freedom eliminated by single point constraints + # l # ([nGdof,1] logical) [b + c] Structural degrees-of-freedom remaining after the reference degrees-of-freedom are removed (degrees-of-freedom left over) + # # ([nGdof,1] logical) [l + r] Total set of physical boundary degrees-of-freedom for superelements + # # ([nGdof,1] logical) [t + q] Set assembled in superelement analysis + # d # ([nGdof,1] logical) [a + e] Set used in dynamic analysis by the direct method + # # ([nGdof,1] logical) [a + o] Unconstrained (free) structural degrees-of-freedom + # fe # ([nGdof,1] logical) [f + e] Free structural degrees-of-freedom plus extra degrees-of-freedom + # # ([nGdof,1] logical) [f + s] Degrees-of-freedom not constrained by multipoint constraints + # ne % ([nGdof,1] logical) [n + e] Structural degrees-of-freedom not constrained by multipoint constraints plus extra degrees-of-freedom + # g = true(nGdof,1) [n + m] All structural degrees-of-freedom including scalar degrees-of-freedom + # p = [g + e] Physical degrees-of-freedom + # ps = [p + sa] Physical and constrained (SPCi) aerodynamic degrees-of-freedom + # pa = [ps + k] Physical set for aerodynamics + # fr = [f ? q ? r] Statically independent set minus the statically determinate supports + # v = [o + c + r] Set free to vibrate in dynamic reduction and component mode synthesis + return aset, tset + +def apply_dof_map_to_set(set_map, dof_map: DOF_MAP, idtype: str='int32', use_ints: bool=True) -> NDArrayNbool: + """changes a set defined in terms of (nid, comp) into an array of integers""" + if use_ints: + ndof = len(set_map) + aset = np.full(ndof, 0, dtype=idtype) + for i, dofi in enumerate(set_map): + aset[i] = dof_map[dofi] + aset.sort() + else: + ndof = len(dof_map) + aset = np.full(ndof, 0, dtype='bool') + for dofi in set_map: + i = dof_map[dofi] + aset[i] = True + return aset + +def xg_to_xb(model, xg: NDArrayNfloat, ngrid: int, ndof_per_grid: int, + inplace: bool=True) -> NDArrayNfloat: + assert isinstance(xg, np.ndarray) + str(ngrid) + + xb = xg + if not inplace: + xb = copy.deepcopy(xb) + + nids = model._type_to_id_map['GRID'] + for i, nid in enumerate(nids): + node = model.nodes[nid] + if node.cd: + model.log.debug(f'node {nid} has a CD={node.cd}') + cd_ref = node.cd_ref + T = cd_ref.beta_n(n=2) + i1 = i * ndof_per_grid + i2 = (i+1) * ndof_per_grid + xi = xg[i1:i2] + xb[i1:i2] = xi @ T # TODO: verify the transform; I think it's right + return xb + +def write_oload(Fb: NDArrayNfloat, + dof_map: DOF_MAP, + isubcase: int, ngrid: int, ndof_per_grid: int, + f06_file, page_stamp: str, page_num: int, log) -> int: + """writes the OLOAD RESULTANT table""" + str(ngrid) + str(dof_map) + fxyz_mxyz = Fb[:ngrid*ndof_per_grid].reshape(ngrid, ndof_per_grid) + fxyz_mxyz_sum = fxyz_mxyz.sum(axis=0) + + f06_file.write( + ' *** USER INFORMATION MESSAGE 7310 (VECPRN)\n' + ' ORIGIN OF SUPERELEMENT BASIC COORDINATE SYSTEM WILL BE USED AS REFERENCE LOCATION.\n' + ' RESULTANTS ABOUT ORIGIN OF SUPERELEMENT BASIC COORDINATE SYSTEM IN ' + 'SUPERELEMENT BASIC SYSTEM COORDINATES.\n' + ) + oload = Resultant('OLOAD', fxyz_mxyz_sum, isubcase) + log.info(f'OLOAD {fxyz_mxyz_sum}') + page_num = oload.write_f06(f06_file, page_stamp, page_num) + return page_num + 1 + +def solve(Kaa, Fa_solve, aset, log, idtype='int32'): + """solves [K]{u} = {F}""" + log.info("starting solve") + Kaa_, ipositive, inegative, unused_sz_set = remove_rows(Kaa, aset, idtype=idtype) + + #if np.linalg.det(Kaa) == 0.: + #log.error('singular Kaa') + + isolve = len(ipositive) + if isolve == 0: + log.error(f' ipositive = {ipositive}') + log.error(f' Kaa_ = {Kaa_}') + #print(Kaa_) + raise RuntimeError('no residual structure found') + + #Maa_ = Maa[ipositive, :][:, ipositive] + + #print(f'Fg = {Fg}') + #print(f'Fa = {Fa}') + #print(f'Fs = {Fs}') + + Fa_ = Fa_solve[ipositive] + # [A]{x} = {b} + # [Kaa]{x} = {F} + # {x} = [Kaa][F] + #print(f'Kaa:\n{Kaa}') + #print(f'Fa: {Fa}') + + log.debug(f' Kaas_:\n{Kaa_.toarray()}') + log.debug(f' Kaa_:\n{Kaa_}') + log.debug(f' Fa_: {Fa_}') + xa_ = np.linalg.solve(Kaa_.toarray(), Fa_) + xas_ = sci_sparse.linalg.spsolve(Kaa_, Fa_) + #xas_ = np.linalg.solve(Kaas_.toarray(), Fa_) + sparse_error = np.linalg.norm(xa_ - xas_) + if sparse_error > 1e-12: + log.warning(f' sparse_error = {sparse_error}') + log.info("finished solve") + return xas_, ipositive, inegative + +def build_Mbb(model: BDF, + subcase: Subcase, + dof_map: DOF_MAP, + ndof: int, fdtype='float64') -> NDArrayNNfloat: + """builds the mass matrix in the basic frame, [Mbb]""" + log = model.log + log.info('starting build_Mbb') + wtmass = 1. + #wtmass = model.get_param('WTMASS', 1.0) + #Mbb = np.eye(ndof, dtype=fdtype) + Mbb = np.zeros((ndof, ndof), dtype=fdtype) + #Mbb = np.eye(ndof, dtype='int32') + str(model) + str(subcase) + #no_mass = { + #'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4', + #'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', + #} + + mass_rod_2x2 = np.array([ + [2, 0, 1, 0], + [0, 2, 0, 1], + [1, 0, 2, 0], + [0, 1, 0, 2], + ], dtype='float64') / 3. + mass_tri = np.array([ + [4, 0, 2, 0, 1, 0], + [0, 4, 0, 2, 0, 1], + [2, 0, 4, 0, 2, 0], + [0, 2, 0, 4, 0, 2], + [1, 0, 2, 0, 4, 0], + [0, 1, 0, 2, 0, 4], + ], dtype='float64') / 6. + #mass_quad_1x1 = np.array([ + #[1, 0, 1, 0, 1, 0, 1, 0], + #[0, 1, 0, 1, 0, 1, 0, 1], + #[1, 0, 1, 0, 1, 0, 1, 0], + #[0, 1, 0, 1, 0, 1, 0, 1], + #[1, 0, 1, 0, 1, 0, 1, 0], + #[0, 1, 0, 1, 0, 1, 0, 1], + #[1, 0, 1, 0, 1, 0, 1, 0], + #[0, 1, 0, 1, 0, 1, 0, 1], + #], dtype='float64') / 36. + + mass_quad_2x2 = np.array([ + [4, 0, 2, 0, 1, 0, 2, 0], + [0, 4, 0, 2, 0, 1, 0, 2], + [2, 0, 4, 0, 2, 0, 1, 0], + [0, 2, 0, 4, 0, 2, 0, 1], + [1, 0, 2, 0, 4, 0, 2, 0], + [0, 1, 0, 2, 0, 4, 0, 2], + [2, 0, 1, 0, 2, 0, 4, 0], + [0, 2, 0, 1, 0, 2, 0, 4], + ], dtype='float64') / 36. + + mass_total = 0. + if model.conm1: + nid = elem.nid + nid_ref = elem.nid_ref + if nid_ref.type == 'GRID': + i1 = dof_map[(nid, 1)] + + # TODO: support CID + if nid_ref.cd != elem.cid: + log.warning(f' CONM1 eid={eid} nid={nid} CD={nid_ref.cd} to cid={elem.cid} is not supported') + else: # pragma: no cover + print(elem.get_stats()) + raise NotImplementedError(elem) + Mbb[i1:i1+6, i1:i1+6] += elem.mass_matrix + + # has possibility of mass + has_mass = False + + for elem in model.elements: + etype = elem.type + if etype in NO_MASS: + continue + if elem.n == 0: + continue + + has_mass = True + if etype == 'CONM2': + mass_total = conm2_fill_Mbb(model, mass_total, Mbb, dof_map) + + elif etype in ['CROD', 'CONROD', 'CTUBE']: + # verified + mass = elem.mass() + if mass == 0.0: + log.warning(f' no mass for {etype} eid={eid}') + continue + + nids1 = elem.nodes[:, 0] + nids2 = elem.nodes[:, 1] + for nid1, nid2 in zip(nids1, nids2): + i1 = dof_map[(nid1, 1)] + j1 = dof_map[(nid2, 1)] + ii = [i1, i1 + 1, + j1, j1 + 1] + #Mbb[i1, i1] = Mbb[i1+1, i1+1] = \ + #Mbb[j1, j1] = Mbb[j1+1, j1+1] = mass / 3 + + #Mbb[i1, j1] = Mbb[j1, i1] = \ + #Mbb[i1+1, j1+1] = Mbb[j1+1, i1+1] = mass / 6 + iii, jjj = np.meshgrid(ii, ii) + Mbb[iii, jjj] += mass_rod_2x2 * mass + #ii = [i1, i1 + 1, j1, j1 + 1] + #print(Mbb[ii, :][:, ii]) + elif etype in ['CBAR', 'CBEAM']: + # TODO: verify + # TODO: add rotary inertia + mass = elem.Mass() + if mass == 0.0: + log.warning(f' no mass for {etype} eid={eid}') + continue + nid1, nid2 = elem.nodes + i1 = dof_map[(nid1, 1)] + j1 = dof_map[(nid2, 1)] + + #Mbb[i1, i1] = Mbb[i1+1, i1+1] = \ + #Mbb[j1, j1] = Mbb[j1+1, j1+1] = mass / 3 + + #Mbb[i1, j1] = Mbb[j1, i1] = \ + #Mbb[i1+1, j1+1] = Mbb[j1+1, i1+1] = mass / 6 + ii = [i1, i1 + 1, + j1, j1 + 1] + iii, jjj = np.meshgrid(ii, ii) + Mbb[iii, jjj] += mass_rod_2x2 * mass + elif etype == 'CTRIA3': + # TODO: verify + # TODO: add rotary inertia + mass = elem.Mass() + nid1, nid2, nid3 = elem.nodes + i1 = dof_map[(nid1, 1)] + i2 = dof_map[(nid2, 1)] + i3 = dof_map[(nid3, 1)] + ii = [ + i1, i1 + 1, + i2, i2 + 1, + i3, i3 + 1, + ] + iii, jjj = np.meshgrid(ii, ii) + Mbb[iii, jjj] += mass_tri * mass + #Mbb[i1, i1] = Mbb[i1+1, i1+1] = Mbb[i1+2, i1+2] = \ + #Mbb[i2, i2] = Mbb[i2+1, i2+1] = Mbb[i2+2, i2+2] = \ + #Mbb[i3, i3] = Mbb[i3+1, i3+1] = Mbb[i3+2, i3+2] = mass / 3 + elif etype == 'CQUAD4': + # TODO: verify + # TODO: add rotary inertia + try: + mass = elem.Mass() + except AttributeError: + pid_ref = elem.pid_ref + if pid_ref.mid1 is None and pid_ref.mid2 is None: + log.warning(f' no mass for CQUAD4 eid={eid}') + continue + #raise + #mid_ref = elem.mid_ref + #rho = mid_ref.Rho() + nid1, nid2, nid3, nid4 = elem.nodes + i1 = dof_map[(nid1, 1)] + i2 = dof_map[(nid2, 1)] + i3 = dof_map[(nid3, 1)] + i4 = dof_map[(nid4, 1)] + if mass == 0.: + pid_ref = elem.pid_ref + ptype = pid_ref.type + if ptype == 'PSHELL': + mid_ref = pid_ref.mid_ref + rho = mid_ref.rho + log.warning(f' no mass for CQUAD4 eid={eid} ptype={ptype} rho={rho}') + else: + log.warning(f' no mass for CQUAD4 eid={eid} ptype={ptype}') + else: + pid_ref = elem.pid_ref + ptype = pid_ref.type + log.info(f' mass={mass} for CQUAD4 eid={eid} ptype={ptype}') + + ii = [ + i1, i1 + 1, + i2, i2 + 1, + i3, i3 + 1, + i4, i4 + 1, + ] + iii, jjj = np.meshgrid(ii, ii) + Mbb[iii, jjj] += mass_quad_2x2 * mass + #if 0: # pragma: no cover + #mass4 = mass / 9. # 4/36 + #mass2 = mass / 18. # 2/36 + #mass1 = mass / 36. + #print(mass1, mass2, mass4) + #Mbb[i1, i1] += mass4 + #Mbb[i1+1, i1+1] += mass4 + #Mbb[i2, i2] += mass4 + #Mbb[i2+1, i2+1] += mass4 + #Mbb[i3, i3] += mass4 + #Mbb[i3+1, i3+1] += mass4 + #Mbb[i4, i4] += mass4 + #Mbb[i4+1, i4+1] += mass4 + + #Mbb[i1, i3] += mass1 + #Mbb[i1, i2] += mass2 + #Mbb[i1, i4] += mass2 + #Mbb[i3, i1] += mass1 + #Mbb[i2, i1] += mass2 + #Mbb[i4, i1] += mass2 + + #Mbb[i1+1, i3+1] += mass1 + #Mbb[i1+1, i2+1] += mass1 + #Mbb[i1+1, i4+1] += mass2 + #Mbb[i3+1, i1+1] += mass1 + #Mbb[i2+1, i1+1] += mass2 + #Mbb[i4+1, i1+1] += mass2 + + #Mbb[i2, i4] += mass1 + #Mbb[i2+1, i4+1] += mass1 + #Mbb[i2, i3] += mass2 + #Mbb[i2+1, i3+1] += mass2 + + #Mbb[i4, i2] += mass1 + #Mbb[i4+1, i2+1] += mass1 + #Mbb[i3, i2] += mass2 + #Mbb[i3+1, i2+1] += mass2 + + #Mbb[i3, i4] += mass2 + #Mbb[i3+1, i4+1] = mass2 + #Mbb[i4, i3] += mass2 + #Mbb[i4+1, i3+1] += mass2 + + #Mbb[i2+1, i3+1] = 1 + #Mbb[i2+1, i2+1] = Mbb[i1+1, i4+1] = 2 + + #Mbb[i2, i1+1] = Mbb[i3, i1+1] = Mbb[i4, i1+1] = 1 + #Mbb[i1, i2+1] = Mbb[i3, i2+1] = Mbb[i4, i2+1] = 1 + #Mbb[i1, i3+1] = Mbb[i2, i3+1] = Mbb[i4, i3+1] = 1 + #Mbb[i1, i4+1] = Mbb[i2, i4+1] = Mbb[i3, i4+1] = 1 + #print(Mbb) + #print(Mbb[ii, :][:, ii]) + else: # pragma: no cover + print(elem.get_stats()) + raise NotImplementedError(elem) + + if wtmass != 1.0: + Mbb *= wtmass + + # TODO: working on a hack to NOTt fake the mass when there are no SPOINTs + # as mass can be easily added to a spring model + # + # TODO: non-zero mass case doesn't handle SPOINTs + # + has_special_points = 'SPOINT' in model.card_count or 'EPOINT' in model.card_count + is_all_grids = not(has_special_points) + unused_can_dof_slice = is_all_grids and not has_mass + if Mbb.sum() != 0.0: + #if Mbb.sum() != 0.0 or can_dof_slice: + #print(f'is_all_grids={is_all_grids} has_mass={has_mass}; can_dof_slice={can_dof_slice} Mbb.shape={Mbb.shape}') + i = np.arange(0, ndof).reshape(ndof//6, 6)[:, :3].ravel() + #print(Mbb[i, i]) + massi = Mbb[i, i].sum() + log.info(f'finished build_Mbb; M={massi:.6g}; mass_total={mass_total:.6g}') + else: + Mbb = np.eye(ndof, dtype=fdtype) + log.error(f'finished build_Mbb; faking mass; M={Mbb.sum()} ndof={ndof}') + return Mbb + +def grid_point_weight(model: BDF, Mbb, dof_map: DOF_MAP, ndof: int): + str(dof_map) + str(ndof) + z = np.zeros((3, 3), dtype='float64') + nnodes = model.grid.n + nspoints = model.spoint.n + #nepoints = len(model.epoints) + ndof_grid = 6 * nnodes + ndof2 = 6 * nnodes + nspoints + #print(f'ndof={ndof} ndof2={ndof2}') + D = np.zeros((ndof2, 6), dtype='float64') + D[ndof_grid:, 0] = 1 + #print('D.shape ', D.shape) + inid = 0 + #print(model.nodes) + #print(f'D.shape = {D.shape}') + reference_point = 0 # model.get_param('GRDPNT', 0) + if reference_point == 0: + dxyz = np.zeros(3, dtype=Mbb.dtype) + else: + dxyz = model.nodes[reference_point].get_position() + + coord = model.coord + xyz_cid0 = model.grid.xyz_cid0() + cds = model.grid.cd + cids = coord.coord_id + #icds = coord.index(cds, assume_sorted=False, inverse=False) + beta = coord.j + for xyz0, cd in zip(xyz_cid0, cds): + #coord = model.coord.s + #print(f'nid={nid}') + + # TODO: what about otuput coordinate frames; specifically cylindrical and spherical frames? + xi, yi, zi = xyz0 - dxyz + Tr = np.array([ + [0, zi, -yi], + [-zi, 0, xi], + [yi, -xi, 0], + ], dtype='float64') + #cd_ref = node.cd_ref + beta = coord.xyz_to_global_transform[cd] + Ti = beta + TiT_Tr = Ti.T @ Tr + d = np.block([ + [Ti.T, TiT_Tr], + [z, Ti.T], + ]) + #print(f'd.shape={d.T.shape}') + #print(f'd={d}') + #print(f'D[:, {inid*6}:{(inid+1)*6}]') + D[inid*6:(inid+1)*6, :] = d.T + inid += 1 + + # Location Basic System (CP Fields) Mass Global System (CD Fields) + # Grid ID xb yb zb xCD yCD zCD + # 1 0 0 0 2 3 5 + # 2 1 0 0 2 3 5 + # 3 0.5 1 0 2 3 5 + # 4 0.5 0 1 2 3 5 + #print(f'Dt.shape = {D.T.shape}') + #print(f'Mbb.shape = {Mbb.shape}') + #print(f'D.shape = {D.shape}') + #print(f'D.T =\n{D.T}') + M0 = D.T @ Mbb @ D + return reference_point, M0 + +def dof_map_to_tr_set(dof_map, ndof: int) -> tuple[NDArrayNbool, NDArrayNbool]: + """creates the translation/rotation sets""" + trans_set = np.zeros(ndof, dtype='bool') + rot_set = np.zeros(ndof, dtype='bool') + for (unused_nid, dof), idof in dof_map.items(): + if dof in [0, 1, 2, 3]: + trans_set[idof] = True + else: + rot_set[idof] = True + return trans_set, rot_set + +def ps_to_sg_set(ndof: int, ps: list[int]): + """creates the SPC on the GRID (PS-field) set, {sg}""" + # all DOFs are initially assumed to be active + sg_set = np.ones(ndof, dtype='bool') + + # False means it's constrained + sg_set[ps] = False + return sg_set + + +def _get_dof_map(model: BDF) -> tuple[DOF_MAP, list[int]]: + """helper method for ``get_static_force_vector_from_subcase_id``""" + i = 0 + dof_map = {} + spoints = [] + ps = [] + for nid, psi in zip(model.grid.node_id, model.grid.ps): + for dof in range(1, 7): + dof_map[(nid, dof)] = i + i += 1 + if psi != 0: + asf + for psii in str(psi): + nid_dof = (nid, int(psii)) + j = dof_map[nid_dof] + ps.append(j) + + # we want the GRID points to be first + assert len(spoints) == 0, spoints + + if model.spoint.n: + for nid in model.spoint.spoint_id: + key = (nid, 0) + if key not in dof_map: + dof_map[key] = i + i += 1 + assert len(dof_map) > 0 + return dof_map, ps + +def get_ndof(model: BDF, subcase: Subcase) -> tuple[int, int, int]: + ndof_per_grid = 6 + if 'HEAT' in subcase: + ndof_per_grid = 1 + ngrid = model.grid.n + nspoint = model.spoint.n + #nepoint = model.epoint.n + nepoint = 0 + ndof = ngrid * ndof_per_grid + nspoint + nepoint + #print(f'ngrid={ngrid} nspoint={nspoint}') + assert ndof > 0, model.card_count + return ngrid, ndof_per_grid, ndof + +def conm2_fill_Mbb(model: BDF, + mass_total: float, + Mbb, + dof_map: dict[tuple[int, int], int]) -> float: + eye3 = np.eye(3, dtype='float64') + conm2 = model.conm2 + log = model.log + #mass = elem.Mass() + #nid = elem.nid + #nid_ref = elem.nid_ref + inid = model.grid.index(conm2.node_id) + cds = model.grid.cd[inid] + for eid, nid, cd, cid, mass, elem_x, elem_i in zip( + conm2.element_id, conm2.node_id, cds, conm2.coord_id, conm2.mass(), + conm2.xyz_offset, conm2.inertia): + i1 = dof_map[(nid, 1)] + if cd != cid: + log.warning(f' CONM2 eid={eid} nid={nid} CD={cd} to CONM2 cid={cid} is not supported') + #Mbb[i1, i1] = mass + #Mbb[i1+1, i1+1] = mass + #Mbb[i1+2, i1+2] = mass + # TODO: support CID + I11, I21, I22, I31, I32, I33 = elem_i + x1, x2, x3 = elem_x + mxx = np.array([ + [x1 * x1, -x1 * x2, -x1 * x3], + [-x2 * x1, x2 * x2, -x2 * x3], + [-x3 * x1, x3 * x2, x3 * x3], + ]) * mass + Tr = np.array([ + [0, x3, -x2], + [-x3, 0, x1], + [x2, -x1, 0], + ], dtype='float64') + mx = Tr * mass + I = np.array([ + [I11, -I21, I31], + [-I21, I22, -I32], + [-I31, -I32, I33], + ]) + mxx + + #[mass, 01, 02, 03, mass * X3, -mass * X2] + #[10, mass, 12, -mass * X3, 14, mass * X1] + #[20, 21, mass, mass * X2, -mass * X1, 25] + #[30, -mass * X3, mass * X2, I11 + mass * X2 * X2 + mass * X3 * X3, -I21 - mass * X2 * X1, -I31 - mass * X3 * X1] + #[mass * X3, 41, -mass * X1, -I21 - mass * X2 * X1, I22 + mass * X1 * X1 + mass * X3 * X3, -I32 - mass * X3 * X2] + #[-mass * X2, mass * X1, 52, -I31 - mass * X3 * X1, -I32 - mass * X3 * X2, I33 + mass * X2 * X2 + mass * X1 * X1] + + Mbb[i1:i1+3, i1:i1+3] += eye3 * mass + Mbb[i1:i1+3, i1+3:i1+6] += mx + Mbb[i1+3:i1+6, i1:i1+3] += mx.T + mass_total += mass + Mbb[i1+3:i1+6, i1+3:i1+6] += I + return mass_total diff --git a/pyNastran/dev/bdf_vectorized3/solver/test_solver_springs.py b/pyNastran/dev/bdf_vectorized3/solver/test_solver_springs.py new file mode 100644 index 000000000..15374a78b --- /dev/null +++ b/pyNastran/dev/bdf_vectorized3/solver/test_solver_springs.py @@ -0,0 +1,1066 @@ +import os +import pathlib +import unittest +import numpy as np +from cpylog import SimpleLogger +import pyNastran +from pyNastran.dev.solver.solver import Solver, BDF +from pyNastran.dev.solver.solver import Solver as SolverOld, BDF as BDFold +#from .solver import Solver, BDF +from pyNastran.bdf.case_control_deck import CaseControlDeck + +PKG_PATH = pathlib.Path(pyNastran.__path__[0]) +TEST_DIR = PKG_PATH / 'dev' / 'solver' + +class TestSolverSpring(unittest.TestCase): + def test_conm2(self): + """Tests a CMASS1/PMASS""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'celas1.bdf' + model.add_grid(1, [0., 0., 0.], cd=1) + model.add_grid(2, [1., 0., 0.]) + model.add_grid(3, [0.5, 1., 0.], cd=3) + model.add_grid(4, [0.5, 0., 1.]) + + origin = [0., 0., 0.] + zaxis = [0., 0., 1] + xzplane = [1., 1., 0.] + model.add_cord2r(1, origin, zaxis, xzplane, rid=0, setup=True, comment='') + + origin = [0.5, 1., 0.] + zaxis = [0., 0., 1] + xzplane = [0.5, 2., 0.] + model.add_cord2r(3, origin, zaxis, xzplane, rid=0, setup=True, comment='') + + model.add_conm2(1, 1, mass=2.0, cid=0, X=None, I=None, comment='') + model.add_conm2(2, 2, mass=3.0, cid=0, X=None, I=None, comment='') + model.add_conm2(3, 3, mass=3.0, cid=0, X=None, I=None, comment='') + model.add_conm2(4, 4, mass=5.0, cid=0, X=None, I=None, comment='') + + nids = [1, 2] + eid = 1 + k = 1000. + model.add_celas2(eid, k, nids, c1=1, c2=2, ge=0., s=0., comment='') + + load_id = 2 + spc_id = 3 + # model.add_sload(load_id, 2, 20.) + fxyz = [1., 0., 0.] + mag = 20. + model.add_force(load_id, 2, mag, fxyz, cid=0, comment='') + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + + solver = Solver(model) + model.sol = 101 + solver.run() + + def test_celas1(self): + """Tests a CELAS1/PELAS""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'celas1.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [0., 0., 0.]) + nids = [1, 2] + eid = 1 + pid = 2 + model.add_celas1(eid, pid, nids, c1=1, c2=1, comment='') + k = 1000. + + load_id = 2 + spc_id = 3 + model.add_pelas(pid, k, ge=0., s=0., comment='') + # model.add_sload(load_id, 2, 20.) + fxyz = [1., 0., 0.] + mag = 20. + model.add_force(load_id, 2, mag, fxyz, cid=0, comment='') + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + + solver = Solver(model) + model.sol = 101 + solver.run() + + eid = 2 + nid = 2 + mass = 1. + model.add_conm2(eid, nid, mass, cid=0, X=None, I=None, comment='') + model.sol = 103 + solver.run() + + # F = k * d + # d = F / k + # 20=1000.*d + d = mag / k + assert np.allclose(solver.xa_[0], d) + os.remove(solver.f06_filename) + os.remove(solver.op2_filename) + + def test_celas2_cd(self): + """Tests a CELAS2""" + log = SimpleLogger(level='warning', encoding='utf-8') + model_old = BDFold(log=log, mode='msc') + modelv = BDF(log=log, mode='msc') + models = [model_old, modelv] + for model in models: + model.bdf_filename = TEST_DIR / 'celas2.bdf' + model.add_grid(1, [0., 0., 0.]) + cd = 100 + model.add_grid(2, [0., 0., 0.], cd=cd) + origin = [0., 0., 0.] + zaxis = [0., 0., 1.] + xzplane = [0., 1., 0.] + model.add_cord2r(cd, origin, zaxis, xzplane, rid=0, setup=True, comment='') + nids = [1, 2] + eid = 1 + k = 1000. + model.add_celas2(eid, k, nids, c1=1, c2=2, ge=0., s=0., comment='') + + load_id = 2 + spc_id = 3 + # model.add_sload(load_id, 2, 20.) + fxyz = [1., 0., 0.] + mag = 20. + model.add_force(load_id, 2, mag, fxyz, cid=0, comment='') + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + + solver_old = SolverOld(model_old) + solver_old.run() + + solverv = Solver(modelv) + solverv.run() + + # F = k * d + d = mag / k + Fg1 = solver_old.Fg + Fg2 = solverv.Fg + + Kgg1 = solver_old.Kgg + Kgg2 = solverv.Kgg + + Kaa1 = solver_old.Kaa.toarray() + Kaa2 = solverv.Kaa.toarray() + assert np.allclose(Fg1, Fg2) + assert np.allclose(Kgg1, Kgg2) + assert np.allclose(Kaa1, Kaa2) + assert np.allclose(solver_old.xa_[0], d) + assert np.allclose(solverv.xa_[0], d) + + def test_celas3(self): + """Tests a CELAS3/PELAS""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'celas3.bdf' + #model.add_grid(1, [0., 0., 0.]) + #model.add_grid(2, [0., 0., 0.]) + model.add_spoint([1, 2]) + nids = [1, 2] + eid = 1 + pid = 2 + model.add_celas3(eid, pid, nids, comment='') + k = 1000. + + load_id = 2 + spc_id = 3 + model.add_pelas(pid, k, ge=0., s=0., comment='') + # model.add_sload(load_id, 2, 20.) + #fxyz = [1., 0., 0.] + mag = 20. + # model.add_force(load_id, 2, mag, fxyz, cid=0, comment='') + nids = 2 + mags = mag + model.add_sload(load_id, nids, mags, comment='') + + components = 0 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + + solver = Solver(model) + #model.sol = 103 + #solver.run() + + model.sol = 101 + solver.run() + + # F = k * d + # d = F / k + # 20=1000.*d + d = mag / k + assert np.allclose(solver.xa_[0], d), solver.xa_ + os.remove(solver.f06_filename) + os.remove(solver.op2_filename) + + def test_celas4_cd(self): + """Tests a CELAS4""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'celas4.bdf' + model.add_spoint([1, 2]) + #origin = [0., 0., 0.] + #zaxis = [0., 0., 1.] + #xzplane = [0., 1., 0.] + #model.add_cord2r(cd, origin, zaxis, xzplane, rid=0, setup=True, comment='') + nids = [1, 2] + eid = 1 + k = 1000. + model.add_celas4(eid, k, nids, comment='') + + load_id = 2 + spc_id = 3 + # model.add_sload(load_id, 2, 20.) + #fxyz = [1., 0., 0.] + mag = 20. + model.add_sload(load_id, 2, mag) + + components = 0 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + + solver = Solver(model) + solver.run() + + # F = k * d + d = mag / k + assert np.allclose(solver.xa_[0], d) + + +class TestSolverRod(unittest.TestCase): + """tests the rods""" + def test_crod_axial(self): + """Tests a CROD/PROD""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'crod_axial.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + nids = [1, 2] + eid = 1 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.1, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + model.add_crod(eid, pid, nids) + model.add_prod(pid, mid, A=1.0, j=0., c=0., nsm=0.) + + load_id = 2 + spc_id = 3 + nid = 2 + mag = 1. + fxyz = [1., 0., 0.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + solver = Solver(model) + #with self.assertRaises(RuntimeError): + solver.run() + + def test_crod_torsion(self): + """Tests a CROD/PROD""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'crod_torsion.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + nids = [1, 2] + eid = 1 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.1, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + model.add_crod(eid, pid, nids) + model.add_prod(pid, mid, A=0.0, j=2., c=0., nsm=1.) + + load_id = 2 + spc_id = 3 + nid = 2 + mag = 1. + fxyz = [1., 0., 0.] + model.add_moment(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + solver = Solver(model) + solver.run() + + def test_crod_spcd(self): + """Tests a CROD/PROD with an SPCD and no free DOFs""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'crod_spcd.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + nids = [1, 2] + eid = 1 + pid = 2 + mid = 3 + E = 42. + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.1, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + model.add_crod(eid, pid, nids) + model.add_prod(pid, mid, A=1.0, j=0., c=0., nsm=0.) + + load_id = 2 + spc_id = 3 + nid = 2 + mag = 1. + fxyz = [0., 0., 0.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + nodes = 1 + components = 123456 + #model.add_spc1(spc_id, components, nodes, comment='') + model.add_spc(spc_id, nodes, components, 0., comment='') + + #components = 123456 + #nodes = 1 + #enforced = 0.1 + #model.add_spc(spc_id, nodes, components, enforced, comment='') + nodes = 2 + components = 1 + enforced = 0.1 + model.add_spcd(load_id, nodes, components, enforced, comment='') + + components = '1' + model.add_spcd(9999999, nodes, components, enforced, comment='') + + components = 123456 + #model.add_spc1(spc_id, components, nodes, comment='') + model.add_spc(spc_id, nodes, components, 0., comment='') + setup_case_control(model) + solver = Solver(model) + solver.run() + # F = kx + dx = enforced + k = E + F = k * dx + assert len(solver.xa_) == 0, solver.xa_ + assert len(solver.Fa_) == 0, solver.Fa_ + assert np.allclose(solver.xg[6], dx), solver.xa_ + assert np.allclose(solver.Fg[6], F), solver.xa_ + #assert np.allclose(solver.Fa_[0], 0.), solver.Fa_ + + + def test_crod(self): + """Tests a CROD/PROD""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'crod.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + nids = [1, 2] + eid = 1 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.1, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + model.add_crod(eid, pid, nids) + model.add_prod(pid, mid, A=1.0, j=2., c=0., nsm=0.) + + load_id = 2 + spc_id = 3 + nid = 2 + mag = 1. + fxyz = [1., 0., 0.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + solver = Solver(model) + solver.run() + + def test_crod_aset(self): + """ + Tests a CROD/PROD using an ASET + + same answer as ``test_crod`` + """ + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'crod_aset.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + nids = [1, 2] + eid = 1 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.1, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + model.add_crod(eid, pid, nids) + model.add_prod(pid, mid, A=1.0, j=2., c=0., nsm=0.) + + load_id = 2 + spc_id = 3 + nid = 2 + mag = 1. + fxyz = [1., 0., 0.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + + aset_ids = 2 + aset_components = '456' + model.add_aset(aset_ids, aset_components) + + aset_ids = [2, 2] + aset_components = ['456', '123'] + model.add_aset1(aset_ids, aset_components, comment='') + + setup_case_control(model) + solver = Solver(model) + solver.run() + + def test_crod_mpc(self): + """Tests a CROD/PROD""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'crod_mpc.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + model.add_grid(3, [1., 0., 0.]) + nids = [1, 2] + eid = 1 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.1, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + model.add_crod(eid, pid, nids) + model.add_prod(pid, mid, A=1.0, j=2., c=0., nsm=0.) + mpc_id = 10 + nodes = [2, 3] + components = [1, 3] + coefficients = [1., -1.] + model.add_mpc(mpc_id, nodes, components, coefficients) + + load_id = 2 + spc_id = 3 + nid = 3 + mag = 1. + fxyz = [0., 0., 1.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model, extra_case_lines=['MPC=10']) + solver = Solver(model) + solver.run() + + def test_ctube(self): + """Tests a CTUBE/PTUBE""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'ctube.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + nids = [1, 2] + eid = 1 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.1, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + model.add_ctube(eid, pid, nids) + OD1 = 1.0 + t = 0.1 + model.add_ptube(pid, mid, OD1, t=t, nsm=0., OD2=None, comment='') + + load_id = 2 + spc_id = 3 + nid = 2 + mag = 1. + fxyz = [1., 0., 0.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + solver = Solver(model) + solver.run() + + ID = OD1 - 2 * t + # F = k * x + # x = F / k + L = 1.0 + A = np.pi * (OD1 ** 2 - ID ** 2) / 4 + kaxial = A * E / L + F = 1.0 + dx = F / kaxial + assert np.allclose(solver.xg[6], dx), f'dx={dx} xg[6]={solver.xg[6]}' + assert np.allclose(solver.Fg[6], F), solver.Fg + + def test_conrod(self): + """Tests a CONROD""" + log = SimpleLogger(level='warning', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'conrod.bdf' + L = 1. + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [L, 0., 0.]) + + nids = [1, 2] + eid = 1 + mid = 3 + E = 200. + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.1, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + A = 1.0 + J = 2.0 + model.add_conrod(eid, mid, nids, A=A, j=J, c=0.0, nsm=0.0) + + load_id = 2 + spc_id = 3 + nid = 2 + mag_axial = 10000. + fxyz = [1., 0., 0.] + model.add_force(load_id, nid, mag_axial, fxyz, cid=0) + + mag_torsion = 20000. + fxyz = [1., 0., 0.] + model.add_moment(load_id, nid, mag_torsion, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + + #nodes = 2 + #model.add_spc1(spc_id, 23456, nodes, comment='') + setup_case_control(model) + solver = Solver(model) + solver.run() + + # TODO: why is this a torsional result??? + G = E / (2 * (1 + nu)) + kaxial = A * E / L + ktorsion = G * J / L + # F = k * d + daxial = mag_axial / kaxial + dtorsion = mag_torsion / ktorsion + #print(solver.xa_) + assert np.allclose(solver.xa_[0], daxial), f'daxial={daxial} kaxial={kaxial} xa_={solver.xa_}' + assert np.allclose(solver.xa_[1], dtorsion), f'dtorsion={dtorsion} ktorsion={ktorsion} xa_={solver.xa_}' + + #L = 1.0 + #A = np.pi * (OD1 ** 2 - ID ** 2) / 4 + #kaxial = A * E / L + #F = 1.0 + #dx = F / kaxial + assert np.allclose(solver.xg[6], daxial), f'dx={daxial} xg[6]={solver.xg[6]}' + assert np.allclose(solver.xg[9], dtorsion), f'dx={dtorsion} xg[6]={solver.xg[9]}' + assert np.allclose(solver.Fg[6], mag_axial), f'F={mag_axial} Fg[6]={solver.Fg[6]}' + assert np.allclose(solver.Fg[9], mag_torsion), f'F={mag_torsion} Fg[9]={solver.Fg[9]}' + + +class TestSolverBar(unittest.TestCase): + """tests the CBARs""" + def test_cbar(self): + """Tests a CBAR/PBAR""" + log = SimpleLogger(level='debug', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'cbar.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + L = 1.0 + + nids = [1, 2] + eid = 1 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + + x = [0., 1., 0.] + g0 = None + model.add_cbar(eid, pid, nids, x, g0, + offt='GGG', pa=0, pb=0, wa=None, wb=None, comment='') + + A = 1. + k_axial = A * E / L + model.add_pbar(pid, mid, + A=A, i1=1., i2=1., i12=1., j=1., + nsm=0., + c1=0., c2=0., d1=0., d2=0., + e1=0., e2=0., f1=0., f2=0., + k1=1.e8, k2=1.e8, comment='') + load_id = 2 + spc_id = 3 + nid = 2 + mag = 1. + fxyz = [1., 0., 0.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + solver = Solver(model) + solver.run() + + # F = kx + fmag = 1.0 + dx = fmag / k_axial + assert dx == solver.xg[6], f'dx={dx} xg={xg}' + + def test_cbar2(self): + """Tests a CBAR/PBAR""" + log = SimpleLogger(level='debug', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'cbar.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [0.5, 0., 0.]) + model.add_grid(3, [1., 0., 0.]) + L = 1.0 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + + eid = 1 + x = [0., 1., 0.] + g0 = None + nids = [1, 2] + model.add_cbar(eid, pid, nids, x, g0, + offt='GGG', pa=0, pb=0, wa=None, wb=None, comment='') + + eid = 2 + nids = [2, 3] + model.add_cbar(eid, pid, nids, x, g0, + offt='GGG', pa=0, pb=0, wa=None, wb=None, comment='') + + A = 1. + k_axial = A * E / L + model.add_pbar(pid, mid, + A=A, i1=1., i2=1., i12=1., j=1., + nsm=0., + c1=0., c2=0., d1=0., d2=0., + e1=0., e2=0., f1=0., f2=0., + k1=1.e8, k2=1.e8, comment='') + load_id = 2 + spc_id = 3 + nid = 3 + mag = 1. + fxyz = [1., 0., 0.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + solver = Solver(model) + solver.run() + + # F = kx + fmag = 1.0 + dx = fmag / k_axial + assert dx == solver.xg[6*2], f'dx={dx} xg={xg}' + + def test_cbeam(self): + """Tests a CBEAM/PBEAM""" + log = SimpleLogger(level='debug', encoding='utf-8') + model = BDF(log=log, mode='msc') + model.bdf_filename = TEST_DIR / 'cbeam.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + L = 1.0 + nids = [1, 2] + eid = 1 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + + x = [0., 1., 0.] + g0 = None + model.add_cbeam(eid, pid, nids, x, g0, offt='GGG', bit=None, + pa=0, pb=0, wa=None, wb=None, sa=0, sb=0, comment='') + #beam_type = 'BAR' + xxb = [0.] + #dims = [[1., 2.]] + #model.add_pbeaml(pid, mid, beam_type, xxb, dims, + #so=None, nsm=None, group='MSCBML0', comment='') + A = 1.0 + so = ['YES'] + area = [1.0] + i1 = [1.] + i2 = [2.] + i12 = [0.] + j = [2.] + model.add_pbeam(pid, mid, xxb, so, area, i1, i2, i12, j, nsm=None, + c1=None, c2=None, d1=None, d2=None, e1=None, e2=None, f1=None, f2=None, + k1=1., k2=1., s1=0., s2=0., nsia=0., nsib=None, cwa=0., cwb=None, + m1a=0., m2a=0., m1b=None, m2b=None, n1a=0., n2a=0., n1b=None, n2b=None, + comment='') + load_id = 2 + spc_id = 3 + nid = 2 + mag = 1. + fxyz = [1., 0., 0.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + solver = Solver(model) + solver.run() + + # F = kx + k_axial = A * E / L + fmag = 1.0 + dx = fmag / k_axial + assert dx == solver.xg[6], f'dx={dx} xg={xg}' + + + def test_cbeam2(self): + """Tests a CBEAM/PBEAM""" + model = BDF(debug=True, log=None, mode='msc') + model.bdf_filename = TEST_DIR / 'cbeam.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [.5, 0., 0.]) + model.add_grid(3, [1., 0., 0.]) + L = 1.0 + pid = 2 + mid = 3 + E = 3.0e7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0, St=0.0, + Sc=0.0, Ss=0.0, mcsid=0) + + eid = 1 + nids = [1, 2] + x = [0., 1., 0.] + g0 = None + model.add_cbeam(eid, pid, nids, x, g0, offt='GGG', bit=None, + pa=0, pb=0, wa=None, wb=None, sa=0, sb=0, comment='') + + eid = 2 + nids = [2, 3] + x = [0., 1., 0.] + g0 = None + model.add_cbeam(eid, pid, nids, x, g0, offt='GGG', bit=None, + pa=0, pb=0, wa=None, wb=None, sa=0, sb=0, comment='') + + + #beam_type = 'BAR' + xxb = [0.] + #dims = [[1., 2.]] + #model.add_pbeaml(pid, mid, beam_type, xxb, dims, + #so=None, nsm=None, group='MSCBML0', comment='') + A = 1.0 + so = ['YES'] + area = [1.0] + i1 = [1.] + i2 = [2.] + i12 = [0.] + j = [2.] + model.add_pbeam(pid, mid, xxb, so, area, i1, i2, i12, j, nsm=None, + c1=None, c2=None, d1=None, d2=None, e1=None, e2=None, f1=None, f2=None, + k1=1., k2=1., s1=0., s2=0., nsia=0., nsib=None, cwa=0., cwb=None, + m1a=0., m2a=0., m1b=None, m2b=None, n1a=0., n2a=0., n1b=None, n2b=None, + comment='') + load_id = 2 + spc_id = 3 + nid = 3 + mag = 1. + fxyz = [1., 0., 0.] + model.add_force(load_id, nid, mag, fxyz, cid=0) + + components = 123456 + nodes = 1 + model.add_spc1(spc_id, components, nodes, comment='') + setup_case_control(model) + solver = Solver(model) + solver.run() + + # F = kx + k_axial = A * E / L + fmag = 1.0 + dx = fmag / k_axial + assert dx == solver.xg[6*2], f'dx={dx} xg={xg}' + + + +def setup_case_control(model, extra_case_lines=None): + lines = [ + 'STRESS(PLOT,PRINT) = ALL', + 'STRAIN(PLOT,PRINT) = ALL', + 'FORCE(PLOT,PRINT) = ALL', + 'DISP(PLOT,PRINT) = ALL', + 'GPFORCE(PLOT,PRINT) = ALL', + 'SPCFORCE(PLOT,PRINT) = ALL', + 'MPCFORCE(PLOT,PRINT) = ALL', + 'OLOAD(PLOT,PRINT) = ALL', + 'ESE(PLOT,PRINT) = ALL', + 'SUBCASE 1', + ' LOAD = 2', + ' SPC = 3', + ] + if extra_case_lines is not None: + lines += extra_case_lines + cc = CaseControlDeck(lines, log=model.log) + model.sol = 101 + model.case_control_deck = cc + +class TestSolverShell(unittest.TestCase): + """tests the shells""" + def test_cquad4_bad_normal(self): + """test that the code crashes with a bad normal""" + model = BDF(debug=None, log=None, mode='msc') + model.bdf_filename = TEST_DIR / 'cquad4_bad_normal.bdf' + mid = 3 + model.add_grid(1, [0., 0., 0.]) + model.add_grid(3, [1., 0., 0.]) + model.add_grid(2, [1., 0., 2.]) + model.add_grid(4, [0., 0., 2.]) + nids = [1, 2, 3, 4] + model.add_cquad4(5, 5, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_pcomp(5, [mid], [0.1], thetas=None, + souts=None, nsm=0., sb=0., ft=None, tref=0., ge=0., lam=None, z0=None, comment='') + + E = 1.0E7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0, + St=0.0, Sc=0.0, Ss=0.0, mcsid=0, comment='') + spc_id = 3 + load_id = 2 + components = '123456' + nodes = [1, 2] + model.add_spc1(spc_id, components, nodes, comment='') + model.add_force(load_id, 3, 1.0, [0., 0., 1.], cid=0, comment='') + model.add_force(load_id, 4, 1.0, [0., 0., 1.], cid=0, comment='') + + setup_case_control(model) + solver = Solver(model) + + with self.assertRaises(RuntimeError): + # invalid normal vector + solver.run() + #os.remove(model.bdf_filename) + + def test_cquad4_bad_jacobian(self): + """ + Tests that the code crashes with a really terrible CQUAD4 + + The Jacobian is defined between [-1, 1] + """ + model = BDF(debug=None, log=None, mode='msc') + model.bdf_filename = TEST_DIR / 'cquad4_bad_jacobian.bdf' + mid = 3 + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [0.5, 100., 0.]) + model.add_grid(3, [1., 0., 0.]) + model.add_grid(4, [0.5, 1., 0.]) + nids = [1, 2, 3, 4] + model.add_cquad4(5, 5, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_pcomp(5, [mid], [0.1], thetas=None, + souts=None, nsm=0., sb=0., ft=None, tref=0., ge=0., lam=None, z0=None, comment='') + + E = 1.0E7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0, + St=0.0, Sc=0.0, Ss=0.0, mcsid=0, comment='') + spc_id = 3 + load_id = 2 + components = '123456' + nodes = [1, 2] + model.add_spc1(spc_id, components, nodes, comment='') + model.add_force(load_id, 3, 1.0, [0., 0., 1.], cid=0, comment='') + model.add_force(load_id, 4, 1.0, [0., 0., 1.], cid=0, comment='') + + setup_case_control(model) + solver = Solver(model) + with self.assertRaises(RuntimeError): + solver.run() + #os.remove(model.bdf_filename) + + def test_cquad4_pshell_mat1(self): + """Tests a CQUAD4/PSHELL/MAT1""" + model = BDF(debug=None, log=None, mode='msc') + model.bdf_filename = TEST_DIR / 'cquad4_pshell_mat1.bdf' + model.add_grid(1, [0., 0., 0., ]) + model.add_grid(2, [1., 0., 0., ]) + model.add_grid(3, [1., 0., 2., ]) + model.add_grid(4, [0., 0., 2., ]) + area = 2.0 + nsm_area = 0.0 + thickness = 0.3 + rho = 0.1 + mass = area * (thickness * rho + nsm_area) + mid = 3 + nids = [1, 2, 3, 4] + model.add_cquad4(1, 1, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_cquad4(2, 2, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_cquad4(3, 3, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_cquad4(4, 4, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_cquad4(5, 5, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + + model.add_pshell(1, mid1=mid, t=thickness, mid2=None, twelveIt3=1.0, + mid3=None, tst=0.833333, nsm=0.0, z1=None, z2=None, + mid4=None, comment='') + model.add_pshell(2, mid1=None, t=thickness, mid2=mid, twelveIt3=1.0, + mid3=None, tst=0.833333, nsm=0.0, z1=None, z2=None, + mid4=None, comment='') + model.add_pshell(3, mid1=None, t=thickness, mid2=None, twelveIt3=1.0, + mid3=mid, tst=0.833333, nsm=0.0, z1=None, z2=None, + mid4=None, comment='') + model.add_pshell(4, mid1=None, t=thickness, mid2=None, twelveIt3=1.0, + mid3=None, tst=0.833333, nsm=0.0, z1=None, z2=None, + mid4=mid, comment='') + model.add_pcomp(5, [mid], [thickness], thetas=None, + souts=None, nsm=0., sb=0., ft=None, tref=0., ge=0., lam=None, z0=None, comment='') + + E = 1.0E7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=1.0, a=0.0, tref=0.0, ge=0.0, + St=0.0, Sc=0.0, Ss=0.0, mcsid=0, comment='') + spc_id = 3 + load_id = 2 + components = '123456' + nodes = [1, 2] + model.add_spc1(spc_id, components, nodes, comment='') + model.add_force(load_id, 3, 1.0, [0., 0., 1.], cid=0, comment='') + model.add_force(load_id, 4, 1.0, [0., 0., 1.], cid=0, comment='') + + setup_case_control(model) + solver = Solver(model) + #with self.assertRaises(RuntimeError): + solver.run() + #print('total_mass =', mass * 5) + #os.remove(model.bdf_filename) + #os.remove(solver.f06_filename) + #os.remove(solver.op2_filename) + + def test_cquad8_pshell_mat1(self): + """Tests a CQUAD8/PSHELL/MAT1""" + # 4--7--3 + # | | + # 8 6 + # | | + # 1--5--2 + model = BDF(debug=None, log=None, mode='msc') + model.bdf_filename = TEST_DIR / 'cquad8_pshell_mat1.bdf' + model.add_grid(1, [0., 0., 0.]) + model.add_grid(2, [1., 0., 0.]) + model.add_grid(3, [1., 0., 2.]) + model.add_grid(4, [0., 0., 2.]) + + model.add_grid(5, [0.5, 0., 0.]) + model.add_grid(6, [1., 0., 1.]) + model.add_grid(7, [0.5, 0., 2.]) + model.add_grid(8, [0., 0., 1.]) + + mid = 3 + nids = [1, 2, 3, 4, 5, 6, 7, 8] + model.add_cquad8(1, 1, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_cquad8(2, 2, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_cquad8(3, 3, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_cquad8(4, 4, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + model.add_cquad8(5, 5, nids, theta_mcid=0.0, zoffset=0., tflag=0, + T1=None, T2=None, T3=None, T4=None, comment='') + + model.add_pshell(1, mid1=mid, t=0.3, mid2=None, twelveIt3=1.0, + mid3=None, tst=0.833333, nsm=0.0, z1=None, z2=None, + mid4=None, comment='') + model.add_pshell(2, mid1=None, t=0.3, mid2=mid, twelveIt3=1.0, + mid3=None, tst=0.833333, nsm=0.0, z1=None, z2=None, + mid4=None, comment='') + model.add_pshell(3, mid1=None, t=0.3, mid2=None, twelveIt3=1.0, + mid3=mid, tst=0.833333, nsm=0.0, z1=None, z2=None, + mid4=None, comment='') + model.add_pshell(4, mid1=None, t=0.3, mid2=None, twelveIt3=1.0, + mid3=None, tst=0.833333, nsm=0.0, z1=None, z2=None, + mid4=mid, comment='') + model.add_pcomp(5, [mid], [0.1], thetas=None, + souts=None, nsm=0., sb=0., ft=None, tref=0., ge=0., lam=None, z0=None, comment='') + + E = 1.0E7 + G = None + nu = 0.3 + model.add_mat1(mid, E, G, nu, rho=0.0, a=0.0, tref=0.0, ge=0.0, + St=0.0, Sc=0.0, Ss=0.0, mcsid=0, comment='') + spc_id = 3 + load_id = 2 + components = '123456' + nodes = [1, 2, 5] + model.add_spc1(spc_id, components, nodes, comment='') + model.add_force(load_id, 3, 1.0, [0., 0., 1.], cid=0, comment='') + model.add_force(load_id, 4, 1.0, [0., 0., 1.], cid=0, comment='') + + setup_case_control(model) + solver = Solver(model) + with self.assertRaises(RuntimeError): + solver.run() + #os.remove(model.bdf_filename) + #os.remove(solver.f06_filename) + #os.remove(solver.op2_filename) + +if __name__ == '__main__': # pragma: no cover + unittest.main() diff --git a/pyNastran/dev/bdf_vectorized3/test/all_tests.py b/pyNastran/dev/bdf_vectorized3/test/all_tests.py index 03185c2ae..a18fba691 100644 --- a/pyNastran/dev/bdf_vectorized3/test/all_tests.py +++ b/pyNastran/dev/bdf_vectorized3/test/all_tests.py @@ -4,13 +4,13 @@ from pyNastran.dev.bdf_vectorized3.test.test_numpy_utils import * #from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_aero import * -#from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_beams import * +from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_beams import * #from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_loads import * # good from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_dmig import * from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_bars import * -#from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_coords import * +from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_coords import * from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_shells import TestShells from pyNastran.dev.bdf_vectorized3.cards.test.test_vector_solids import *