fix: alterado o parser.py com os valores a procurar

This commit is contained in:
2025-11-08 15:33:21 -01:00
parent 065ecea3b2
commit 81cb6f21d6
4 changed files with 137 additions and 64 deletions

View File

@@ -1,3 +1,5 @@
# pyright: basic
import pandas as pd import pandas as pd
import parser import parser
import earthquakes as eq import earthquakes as eq
@@ -46,9 +48,7 @@ def read_table_row(df, event_id, row_number_1):
info = [] info = []
for (i, col) in enumerate(tableCols): for (i, col) in enumerate(tableCols):
info.append(f"{i+1} {col}: {row[col]}") info.append(f"{i+1} {col}: {row[col]}")
# TODO corrigir numeros acima de 10 arruinando o alinhamento return f"Linha {row_number_1:02d} do evento {event_id}:\n" + "\n".join(info)
infoString = f"Linha {row_number_1} do evento {event_id}:\n" + "\n".join(info)
return infoString
def update_table_row(df, event_id, row_number_1, new_data): def update_table_row(df, event_id, row_number_1, new_data):
# atualiza uma linha específica da tabela do evento # atualiza uma linha específica da tabela do evento
@@ -133,4 +133,4 @@ if __name__ == "__main__":
df, msg = insert_table_row(df, 5, 3) df, msg = insert_table_row(df, 5, 3)
eq.guardar_csv(df, "dados.csv") eq.guardar_csv(df, "dados.csv")
eq.guardar_df(df, "data.txt") eq.guardar_df(df, "data.txt")
''' '''

View File

@@ -1,7 +1,23 @@
#! /usr/bin/env python #! /usr/bin/env python
import pandas as pd # pyright: basic
import json import json
from parser import parse import os
import pandas as pd
import parser
import crud
HEADER = """=== Terramotos ==="""
MENU ="""[1] Criar a base de dados
[2] Atualizar uma entrada
[3] Apagar uma entrada
[4] Visualizar uma entrada
[Q] Sair
"""
def guardar_df(df: pd.DataFrame, fname: str) -> bool: def guardar_df(df: pd.DataFrame, fname: str) -> bool:
with open(fname, "w") as fp: with open(fname, "w") as fp:
@@ -29,9 +45,62 @@ def guardar_csv(df: pd.DataFrame, fname: str):
return True return True
def main(): def main():
pass isRunning = True
db = parser.parse("dados.txt")
retInfo = None
while isRunning:
os.system("cls")
print(HEADER + "\n" + MENU)
usrIn = input("Opção: ").lower()
match usrIn:
case "1":
os.system("cls")
print(HEADER + "\nCRIAR")
fname = input("Nome do ficheiro com os dados. (Branco para dados.txt)")
if fname == "":
fname = "dados.txt"
if _file_exists(fname):
db = parser.parse(fname)
else:
retInfo = "Nenhum ficheiro encontrado!"
pass
case "2":
if db is None:
continue
pass
case "3":
if db is None:
continue
pass
case "4":
if db is not None:
ids = crud.read_ids(db)
print(ids)
input()
else:
retInfo = "Base de dados não encontrada!"
case "q":
isRunning = False
continue
case _:
pass
if retInfo:
print(retInfo)
retInfo = None
input("Clique Enter para continuar")
def _file_exists(name: str) -> bool:
currFiles = os.listdir(os.getcwd())
if name in currFiles:
return True
return False
if __name__ == '__main__': if __name__ == '__main__':

BIN
info/descricao.docx Normal file

Binary file not shown.

118
parser.py
View File

@@ -1,3 +1,4 @@
# pyright: basic
import io import io
import warnings import warnings
@@ -6,6 +7,12 @@ from datetime import datetime
import pandas as pd import pandas as pd
# --- globals ---
DIST_IND = {"L": "Local", "R": "Regional", "D": "Distante"}
TYPE = {"Q": "Quake", "V": "Volcanic", "U": "Unknown", "E": "Explosion"}
# --- helper funcs ---
def is_blank(l: str) -> bool: def is_blank(l: str) -> bool:
return len(l.strip(" ")) == 0 return len(l.strip(" ")) == 0
@@ -32,22 +39,24 @@ def into_dataframe(data) -> pd.DataFrame:
return pd.DataFrame(data=aux) return pd.DataFrame(data=aux)
# ------------ principal def _concat(preamble, df: pd.DataFrame):
for (k,v) in preamble.items():
df.insert(len(df.columns)-1, k, [v for _ in range(len(df))])
def parse(fname="dados.txt"): return df
# --- principal ---
def parse(fname):
fp = open(fname) fp = open(fname)
data = [l for l in fp.read().split("\n")] data = [l for l in fp.read().split("\n")]
chunks = boundaries(data) chunks = boundaries(data)
df = pd.DataFrame() df = pd.DataFrame()
for (idx,c) in enumerate(chunks): for (idx,c) in enumerate(chunks):
a = parse_chunk(data[c[0]:c[1]], idx) a = parse_chunk(data[c[0]:c[1]])
aux = pd.concat([df, a], axis=0, ignore_index=True) aux = pd.concat([df, a], axis=0, ignore_index=True)
df = aux df = aux
# print(df)
aux = df.loc[df["ID"] == 14]
# print(aux)
fp.close() fp.close()
return df return df
def boundaries(data: list[str]): def boundaries(data: list[str]):
@@ -63,23 +72,18 @@ def boundaries(data: list[str]):
start = None start = None
return boundaries return boundaries
def parse_chunk(chunk_lines: list[str]):
def parse_chunk(chunk_lines: list[str], iD):
hIdx = None hIdx = None
for (idx, l) in enumerate(chunk_lines): for (idx, l) in enumerate(chunk_lines):
if l[-1] == "7": if l[-1] == "7":
hIdx = idx hIdx = idx
break break
headersRet = parse_header(chunk_lines[:hIdx]) preambleRet = _parse_preamble(chunk_lines[:hIdx])
phaseRet = parse_type_7(chunk_lines[hIdx:]) phaseRet = _parse_type_7(chunk_lines[hIdx:])
hDF = into_dataframe(headersRet) return _concat(preambleRet, phaseRet)
hDF["ID"] = iD
phaseRet["ID"] = iD
return pd.concat([hDF, phaseRet])
def parse_header(hLines: list[str]): def _parse_preamble(hLines: list[str]):
aux = defaultdict(list) aux = defaultdict(list)
for line in hLines: for line in hLines:
@@ -91,13 +95,15 @@ def parse_header(hLines: list[str]):
case "6": case "6":
aux[6].append(line) aux[6].append(line)
case "E": case "E":
aux["E"].append(line) pass
# aux["E"].append(line)
case "I": case "I":
aux["I"].append(line) aux["I"].append(line)
case "F": case "F":
aux["F"].append(line) pass
case unknown: # aux["F"].append(line)
warnings.warn(f"header type not implemented: {unknown}") case _:
pass
headerDict = dict() headerDict = dict()
for (k,v) in aux.items(): for (k,v) in aux.items():
@@ -106,19 +112,7 @@ def parse_header(hLines: list[str]):
return headerDict return headerDict
def parse_mag(line: str): def _parse_type_1(data: list[str]):
magnitudes = []
base = 55
while base < 79:
m = line[base:base+4]
mt = line[base+4]
if not is_blank(m):
magnitudes.append({"M": m, "T": mt})
base += 8
return magnitudes
def parse_type_1(data: list[str]):
aux = data[0] aux = data[0]
y = int(aux[1:5]) y = int(aux[1:5])
mo = int(aux[6:8]) mo = int(aux[6:8])
@@ -129,55 +123,65 @@ def parse_type_1(data: list[str]):
mil = int(aux[19]) * 10**5 mil = int(aux[19]) * 10**5
dt = datetime(y,mo,d,h,m,s,mil) dt = datetime(y,mo,d,h,m,s,mil)
dist_ind = aux[21] dist_ind = DIST_IND[aux[21]]
eId = aux[22] ev_type = TYPE[aux[22]]
lat = float(aux[23:30]) lat = float(aux[23:30])
long = float(aux[30:38]) long = float(aux[30:38])
depth = float(aux[38:43]) depth = float(aux[38:43])
rep_ag = aux[45:48] no_stat = int(aux[48:51])
hypo = {"DateTime": dt.isoformat(), "Distance Indicator": dist_ind, "Event ID": eId, "Lat": lat, "Long": long, "Depth": depth, "Agency": rep_ag, "Magnitudes": list()} hypo = {"Data": dt.isoformat(), "Distancia": dist_ind, "Event Type": ev_type, "Lat": lat, "Long": long, "Depth": depth, "No. Stations": no_stat, "Magnitudes": list()}
for l in data: for l in data:
hypo["Magnitudes"] = hypo["Magnitudes"] + parse_mag(l) hypo["Magnitudes"] = hypo["Magnitudes"] + _parse_mag(l)
return hypo return hypo
def parse_type_3(data: list[str]): def _parse_mag(line: str):
comments = [] magnitudes = []
base = 55
while base < 79:
m = line[base:base+4]
mt = line[base+4]
if not is_blank(m):
magnitudes.append({"Magnitude": m, "Tipo": mt})
base += 8
return magnitudes
def _parse_type_3(data: list[str]):
comments = {}
for line in data: for line in data:
comments.append(line[:-2].strip()) if line.startswith(" SENTIDO") or line.startswith(" REGIAO"):
return {"Comments": comments} c, v = line[:-2].strip().split(": ", maxsplit=1)
comments[c.capitalize()] = v
return comments
def parse_type_6(data: list[str]): def _parse_type_6(data: list[str]):
waves = [] waves = []
for l in data: for l in data:
waves.append(l.strip().split(" ")[0]) waves.append(l.strip().split(" ")[0])
return {"Wave": waves} return {"Wave": waves}
def parse_type_7(data: list[str]):
def _parse_type_7(data: list[str]):
aux = io.StringIO("\n".join(data)) aux = io.StringIO("\n".join(data))
dados = pd.read_fwf(aux, colspecs=[(1,5), (6,8), (9,10), (10,15), (16,17), (18,22), (23,28), (29,33), (34,40), (41,45), (46,50), (51,56), (57,60), (61,63), (64,68), (69,70), (72,75), (76,79)]) dados = pd.read_fwf(aux, colspecs=[(1,5), (6,8),(10,15), (18,20), (20,22), (23,28), (34,38)])
return dados return dados
def _parse_type_e(data: list[str]):
def parse_type_e(data: list[str]):
aux = data[0] aux = data[0]
error = {"Gap": int(aux[5:8]), "Origin": float(aux[14:20]), "Error_lat": float(aux[24:30]), "Error_long": float(aux[32:38]), "Error_depth": float(aux[38:43]), "Cov_xy": float(aux[43:55]), "Cov_xz": float(aux[55:67]), "Cov_yz": float(aux[67:79])} error = {"Gap": int(aux[5:8]), "Origin": float(aux[14:20]), "Error_lat": float(aux[24:30]), "Error_long": float(aux[32:38]), "Error_depth": float(aux[38:43]), "Cov_xy": float(aux[43:55]), "Cov_xz": float(aux[55:67]), "Cov_yz": float(aux[67:79])}
return error return error
def parse_type_f(data: list[str]): def _parse_type_i(data: list[str]):
return {}
def parse_type_i(data: list[str]):
aux = data[0] aux = data[0]
dt = datetime.strptime(aux[12:26], "%y-%m-%d %H:%M") return {"ID":int(aux[60:74])}
return {"Action": aux[8:11], "Action Extra": {"Date": dt.isoformat(), "OP": aux[30:35].strip(), "Status": aux[42:57].strip(), "ID":int(aux[60:74])}}
FUNCS = {1: parse_type_1, 3: parse_type_3, 6: parse_type_6, "E": parse_type_e, "F": parse_type_f, "I": parse_type_i} FUNCS = {1: _parse_type_1, 3: _parse_type_3, 6: _parse_type_6, "E": _parse_type_e, "I": _parse_type_i}
parse() parse("dados.txt")