Skip to content

Commit

Permalink
LALALALALALA
Browse files Browse the repository at this point in the history
  • Loading branch information
Lilaa3 committed Jan 14, 2024
1 parent 5d53022 commit 7aca431
Show file tree
Hide file tree
Showing 6 changed files with 408 additions and 1,582 deletions.
92 changes: 48 additions & 44 deletions fast64_internal/sm64/animation/c_parser.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from dataclasses import dataclass, field
import dataclasses
import re
from typing import List, Union

Expand All @@ -13,8 +14,8 @@ class IfDefMacro:

@dataclass
class ParsedValue:
value: Union[str, int, float]
if_def: IfDefMacro
value: Union[str, int, float] = 0
if_def: IfDefMacro = IfDefMacro()

def set_or_add(self, value):
if isinstance(self.value, list):
Expand All @@ -41,6 +42,8 @@ class Include:
@dataclass
class Initialization(ParsedValue):
keywords: List[str] = field(default_factory=list)
name: str = ""

is_extern: bool = False
is_static: bool = False
is_const: bool = False
Expand Down Expand Up @@ -84,7 +87,8 @@ def array_to_struct_dict(self, struct_definition: list[str]):
'"',
"'",
"|",
"\\" "/",
"\\",
"/",
"%",
"*",
".",
Expand All @@ -94,29 +98,43 @@ def array_to_struct_dict(self, struct_definition: list[str]):
"<",
)

delimiters_pattern = "|".join(map(re.escape, delimiters))
DELIMITERS_PATTERN = "|".join(map(re.escape, delimiters))

token_pattern = re.compile(
r"""
rf"""
(?: # Non-capturing group for alternatives
[^{delimiters_pattern}\s"']+ # Match characters that are not delimiters or whitespace or quotes
[^{DELIMITERS_PATTERN}\s"']+ # Match characters that are not delimiters or whitespace or quotes
|
"[^"]*" # Match double-quoted strings
|
'[^']*' # Match single-quoted strings
)
|
[{delimiters_pattern}] # Match any of the delimiters
""".format(
delimiters_pattern=re.escape(delimiters_pattern)
),
[{DELIMITERS_PATTERN}] # Match any of the delimiters
""",
re.VERBOSE,
)

comment_pattern = re.compile(r"/\*.*?\*/|//.*?$", re.DOTALL | re.MULTILINE)


@dataclass
class CParser:
values: list[Initialization] = dataclasses.field(default_factory=list)
values_by_name: dict[str, Initialization] = dataclasses.field(default_factory=dict)

cur_initializer: Initialization = Initialization()
reading_array_size: bool = False
reading_keywords: bool = True
reading_function: bool = False # Used for stack stuff, functions are not supported
reading_macro: bool = False

stack: list[ParsedValue] = dataclasses.field(default_factory=list)
accumulated_tokens: list[str] = dataclasses.field(default_factory=list)
accumulated_macro_tokens: list[str] = dataclasses.field(default_factory=list)
if_defs: list[IfDefMacro] = dataclasses.field(default_factory=list)
origin_path: str = ""

def get_tabs(self):
return "\t" * len(self.stack)

Expand Down Expand Up @@ -154,7 +172,6 @@ def read_macro(self, prev_token: str, cur_token: str):
self.stack[-1].set_or_add(Include(self.accumulated_macro_tokens[1]))
elif macro_type in {"ifdef", "if", "ifndef"}:
self.if_defs.append(IfDefMacro(macro_type, " ".join(self.accumulated_macro_tokens[1:])))
pass
elif macro_type in {"elif", "else"}:
self.if_defs.pop()
self.if_defs.append(IfDefMacro(macro_type, " ".join(self.accumulated_macro_tokens[1:])))
Expand All @@ -169,7 +186,7 @@ def read_macro(self, prev_token: str, cur_token: str):

self.accumulated_macro_tokens.append(cur_token)

def read_values(self, prev_token: str, cur_token: str):
def read_values(self, cur_token: str):
if cur_token == "=":
designated_value = DesignatedValue(
None, self.if_defs.copy(), "".join(self.accumulated_tokens).strip().replace(".", "", 1)
Expand All @@ -188,9 +205,6 @@ def read_values(self, prev_token: str, cur_token: str):

array = ParsedValue([], self.if_defs.copy())

if cur_token == "(":
array.name = prev_token

self.stack[-1].set_or_add(array)
self.stack.append(array)
elif cur_token in {"}", ")"} or (cur_token == ";" and not self.reading_function):
Expand All @@ -203,10 +217,10 @@ def read_values(self, prev_token: str, cur_token: str):
if len(self.stack) == 0:
self.reading_function = False
self.reading_keywords = True
self.cur_initializer = Initialization(None, IfDefMacro())
self.cur_initializer = Initialization()
elif isinstance(self.stack[-1], DesignatedValue):
self.stack.pop()
elif cur_token == ";" or cur_token == ",":
elif cur_token in {";", ","}:
self.handle_accumulated_tokens()
else:
self.accumulated_tokens.append(cur_token)
Expand All @@ -216,12 +230,10 @@ def read_keywords(self, prev_token: str, cur_token: str):
if cur_token == "]":
self.reading_array_size = False
return
else:
if cur_token == "[":
self.reading_array_size = True
return
if cur_token == "[":
self.reading_array_size = True
return

add_token = False
if cur_token == "static":
self.cur_initializer.is_static = True
elif cur_token == "const":
Expand All @@ -234,13 +246,8 @@ def read_keywords(self, prev_token: str, cur_token: str):
self.cur_initializer.is_struct = True
elif cur_token == "*":
self.cur_initializer.pointer_depth += 1
else:
add_token = True

if not add_token:
return

if cur_token in {"=", "{", ";"}:
elif cur_token in {"=", "{", ";"}:
self.values.append(self.cur_initializer)
if prev_token == ")" and cur_token == "{":
self.reading_function = True
Expand All @@ -253,20 +260,21 @@ def read_keywords(self, prev_token: str, cur_token: str):
self.cur_initializer.if_def = self.if_defs.copy()
self.reading_keywords = False

elif not cur_token in {"\n"}:
elif not cur_token == "\n":
self.cur_initializer.keywords.append(cur_token)

def read_c_text(self, text: str, origin_path: str = ""):
self.cur_initializer = Initialization(None, IfDefMacro())
self.reading_array_size = False
self.reading_keywords = True
self.reading_function = False # Used for stack stuff, functions are not supported
self.reading_macro = False

self.stack: list[ParsedValue] = []
self.accumulated_tokens: list[str] = []
self.accumulated_macro_tokens: list[str] = []
self.if_defs: list[IfDefMacro] = []
self.cur_initializer = Initialization()
self.reading_array_size, self.reading_keywords, self.reading_function, self.reading_macro = (
False,
True,
False,
False,
)
self.stack.clear()
self.accumulated_tokens.clear()
self.accumulated_macro_tokens.clear()
self.if_defs.clear()

self.origin_path = origin_path

Expand All @@ -288,8 +296,4 @@ def read_c_text(self, text: str, origin_path: str = ""):
if cur_token == "=":
continue # HACK!!!
if not self.reading_keywords:
self.read_values(prev_token, cur_token)

def __init__(self) -> None:
self.values: list[Initialization] = []
self.values_by_name: dict[str, Initialization] = {}
self.read_values(cur_token)
93 changes: 23 additions & 70 deletions fast64_internal/sm64/animation/importing.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
from collections import OrderedDict
import bpy
from bpy.types import Object, bpy_prop_collection

import dataclasses
from io import BufferedReader
import math
import os
from typing import Optional
from io import BufferedReader
import dataclasses

import bpy
from bpy.types import Object
from mathutils import Euler, Vector, Quaternion

from ...utility import PluginError, decodeSegmentedAddr, path_checks
from ...utility_anim import stashActionInArmature
from ..sm64_utility import import_rom_checks
from ..sm64_level_parser import parseLevelAtPointer
from ..sm64_constants import (
level_pointers,
Expand Down Expand Up @@ -55,17 +54,16 @@ def __init__(self):
def read_pairs(self, pairs: list[SM64_AnimPair]):
array: list[int] = []

maxFrame = max([len(pair.values) for pair in pairs])
for frame in range(maxFrame):
max_frame = max(len(pair.values) for pair in pairs)
for frame in range(max_frame):
array.append([x.getFrame(frame) for x in pairs])
return array

def read_translation(self, pairs: list[SM64_AnimPair], scale: float):
translation_frames = self.read_pairs(pairs)

for translation_frame in translation_frames:
scaledTrans = [(1.0 / scale) * x for x in translation_frame]
self.translation.append(scaledTrans)
self.translation.append([(1.0 / scale) * x for x in translation_frame])

def read_rotation(self, pairs: list[SM64_AnimPair]):
rotation_frames: list[Vector] = self.read_pairs(pairs)
Expand Down Expand Up @@ -126,8 +124,8 @@ def animation_data_to_blender(armature_obj: Object, blender_to_sm64_scale: float
index=property_index,
action_group=pose_bone.name,
)
for frame in range(len(bone_data.translation)):
f_curve.keyframe_points.insert(frame, bone_data.translation[frame][property_index])
for frame, translation in enumerate(bone_data.translation):
f_curve.keyframe_points.insert(frame, translation[property_index])
is_root = False

for property_index in range(4):
Expand All @@ -136,8 +134,8 @@ def animation_data_to_blender(armature_obj: Object, blender_to_sm64_scale: float
index=property_index,
action_group=pose_bone.name,
)
for frame in range(len(bone_data.rotation)):
f_curve.keyframe_points.insert(frame, bone_data.rotation[frame][property_index])
for frame, rotation in enumerate(bone_data.rotation):
f_curve.keyframe_points.insert(frame, rotation[property_index])


def import_animation_from_c_header(
Expand Down Expand Up @@ -184,7 +182,7 @@ def import_c_animations(path: str, animations: dict[str, SM64_Anim], table: SM64
print(f"Exception while attempting to parse file {filepath}: {str(e)}")
# Should I raise here?

print(f"All files have been parsed")
print("All files have been parsed")

table_initialization: None | Initialization = None
all_headers: OrderedDict[SM64_AnimHeader] = OrderedDict()
Expand Down Expand Up @@ -236,24 +234,24 @@ def import_binary_header(

@dataclasses.dataclass
class DMATableEntrie:
offsetFromTable: int
address: int
offset: int
size: int
address: int


def read_binary_dma_table_entries(rom_data: BufferedReader, address: int) -> list[DMATableEntrie]:
dma_entries: list[DMATableEntrie] = []
def read_binary_dma_table_entries(rom_data: BufferedReader, address: int):
entries: list[DMATableEntrie] = []
dma_table_reader = RomReading(rom_data, address)

numEntries = dma_table_reader.read_value(4)
addrPlaceholder = dma_table_reader.read_value(4)
num_entries = dma_table_reader.read_value(4) # numEntries
dma_table_reader.read_value(4) # addrPlaceholder

for i in range(numEntries):
for _ in range(num_entries):
offset = dma_table_reader.read_value(4)
size = dma_table_reader.read_value(4)
dma_entries.append(DMATableEntrie(offset, address + offset, size))
entries.append(DMATableEntrie(offset, size, address + offset))

return dma_entries
return entries


def import_binary_dma_animation(
Expand All @@ -270,13 +268,12 @@ def import_binary_dma_animation(
header = import_binary_header(rom_data, entrie.address, True, None, animations)
table.elements.append(header)
else:
if not (0 <= table_index < len(entries)):
if not 0 <= table_index < len(entries):
raise PluginError("Entrie outside of defined table.")

entrie: DMATableEntrie = entries[table_index]
header = import_binary_header(rom_data, entrie.address, True, None, animations)
table.elements.append(header)
return header


def import_binary_table(
Expand Down Expand Up @@ -342,47 +339,3 @@ def import_binary_animations(
import_binary_header(rom_data, address, False, segment_data, animations)
else:
raise PluginError("Unimplemented binary import type.")


def import_animation_to_blender(
armature_obj: Object,
import_type: str,
sm64_to_blender_scale: float,
table_elements: bpy_prop_collection,
c_path: str = "",
import_rom_path: str = "",
address: int = 0,
is_segmented_pointer: bool = True,
level: str = "IC",
binary_import_type: str = "Animation",
read_entire_table: bool = False,
table_index: int = 0,
ignore_null: bool = False,
):
animations: dict[str, SM64_Anim] = {}
table = SM64_AnimTable()

if import_type == "Binary":
import_rom_checks(import_rom_path)
with open(import_rom_path, "rb") as rom_data:
import_binary_animations(
rom_data,
binary_import_type,
is_segmented_pointer,
address,
level,
animations,
read_entire_table,
table_index,
ignore_null,
table,
)
elif import_type == "C":
import_c_animations(c_path, animations, table)
else:
raise PluginError("Unimplemented Import Type.")

for data in animations.values():
animation_data_to_blender(armature_obj, sm64_to_blender_scale, data)

table.to_blender(table_elements)
Loading

0 comments on commit 7aca431

Please sign in to comment.