Compare commits

..

7 Commits

Author SHA1 Message Date
b7719295ab feat: R e D do crud feitos 2025-11-08 16:30:24 -01:00
81cb6f21d6 fix: alterado o parser.py com os valores a procurar 2025-11-08 15:33:21 -01:00
smolsbs
065ecea3b2 Merge pull request #2 from smolsbs/dev
CRUD & guardar_csv
2025-11-06 20:11:11 -01:00
aulojor
b30f931e61 feat: CRUD
Acho que está mais ou menos completo mas os returns ainda estão um pouco inconsistentes entre as funções visto que algumas retornam uma string após completar a ação, outras um novo df para substituir o antigo e outras ambos.
2025-11-06 09:29:29 -01:00
aulojor
3417c59332 feat: guardar_csv
adicionei uma função para guardar csv porque estava a precisar de alguma forma de visualizar a tabela enquanto testava o CRUD.
2025-11-06 09:28:47 -01:00
aulojor
6f65c237d3 feat: update requirements.txt
adicionar o pandas aos requirements.txt
2025-11-05 09:22:36 -01:00
aulojor
0e38283e6f feat: Read
Read:
dos IDs de evento disponiveis;
do header de um evento;
da tabela de fases um evento
de uma linha da tabela

Output numerado para selação no menu de texto posteriormente mas ainda com um problema de alinhamento quando o indice é maior que 9.
2025-11-05 09:13:04 -01:00
5 changed files with 306 additions and 60 deletions

136
crud.py Normal file
View File

@@ -0,0 +1,136 @@
# pyright: basic
import pandas as pd
import parser
import earthquakes as eq
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 150)
def read_ids(df):
# Lista de IDs únicos no DataFrame
return sorted(set(df["ID"]))
def read_header(df, event_id):
# Informações do header do evento
row = df[df["ID"] == event_id].iloc[0]
cols = list(df.columns)
headerCols = ["DateTime", "Distance Indicator", "Event ID", "Lat", "Long", "Depth", "Agency", "Magnitudes"]
# end = cols.index("ID") - 1
# header_cols = cols[:end]
# Para selecionar todas as colunas em vez de só algumas
info = []
for (i, col) in enumerate(headerCols):
info.append(f"{i+1} {col}: {row[col]}")
infoString = f"Header do evento {event_id}:\n" + "\n".join(info)
return infoString
def get_table(df, event_id):
# retorna a tabela de dados do evento
rows = df[df["ID"] == event_id]
cols = list(df.columns)
start = cols.index("ID") + 1
table = rows[cols[start:]].iloc[1:]
return table
def read_table_row(df, event_id, row_number_1):
# retorna uma linha específica da tabela
row_number_0 = row_number_1 - 1
table = get_table(df, event_id)
if row_number_0 < 0 or row_number_0 >= len(table):
return f"Linha {row_number_1} não pertence ao evento {event_id}."
row = table.iloc[row_number_0]
cols = list(df.columns)
start = cols.index("STAT")
tableCols = cols[start:]
info = []
for (i, col) in enumerate(tableCols):
info.append(f"{i+1} {col}: {row[col]}")
return f"Linha {row_number_1:02d} do evento {event_id}:\n" + "\n".join(info)
def update_table_row(df, event_id, row_number_1, new_data):
# atualiza uma linha específica da tabela do evento
row_number_0 = row_number_1 - 1
table = get_table(df, event_id)
if row_number_0 < 0 or row_number_0 >= len(table):
return f"Linha {row_number_1} não pertence ao evento {event_id}."
for key, value in new_data.items():
if key in table.columns:
df.loc[(df["ID"] == event_id) & (df.index == table.index[row_number_0]), key] = value
return f"Linha {row_number_1} do evento {event_id} atualizada com sucesso."
def update_header(df, event_id, new_data):
# atualiza o header de um evento
for key, value in new_data.items():
if key in df.columns:
df.loc[(df["ID"] == event_id) | df.iloc[0], key] = value
return f"Header do evento {event_id} atualizado com sucesso."
def delete_event(df, event_id):
# Apaga um evento inteiro (header + tabela)
new_df = df.drop(df[df["ID"] == event_id].index)
new_df.loc[df["ID"] > event_id, "ID"] -= 1
return new_df
def delete_table_row(df, event_id, row_number_1):
# Apaga uma linha específica da tabela do evento
row_number_0 = row_number_1 - 1
table = get_table(df, event_id)
if row_number_0 < 0 or row_number_0 >= len(table):
return f"Linha {row_number_1} não pertence ao evento {event_id}."
new_df = df.drop(table.index[row_number_0])
return new_df
def create_blank_event(df, event_id):
# Criar um evento vazio com linha de header e 1 linha de coluna
df.loc[df["ID"] >= event_id, "ID"] += 1
blank_row_df = pd.DataFrame(columns=df.columns, index=[0, 1])
blank_row_df["ID"] = event_id
blank_row_df = blank_row_df.astype(df.dtypes)
new_df = pd.concat([df, blank_row_df], ignore_index=True)
new_df = new_df.sort_values(by="ID", kind="mergesort").reset_index(drop=True)
return new_df
def create_table_row(df, event_id, row_number_1):
event_rows = df[df["ID"] == event_id]
if event_rows.empty:
return df, f"Erro: Evento com ID {event_id} não encontrado."
header_idx = event_rows.index[0]
table_size = len(event_rows.index) - 1
# Validar posição da nova linha
if not (1 <= row_number_1 <= table_size + 1):
return df, f"Erro: Posição {row_number_1} inválida. Evento {event_id} tem {table_size} linha(s) na tabela."
insertion_point = header_idx + row_number_1
new_row_df = pd.DataFrame(columns=df.columns, index=[0])
new_row_df['ID'] = event_id
new_row_df = new_row_df.astype(df.dtypes)
df_before = df.iloc[:insertion_point]
df_after = df.iloc[insertion_point:]
new_df = pd.concat([df_before, new_row_df, df_after], ignore_index=True)
return new_df, f"Linha inserida com sucesso na posição {row_number_1} do evento {event_id}."
''' teste temporário enquanto não temnos menu
if __name__ == "__main__":
df = parser.parse()
first_id = read_ids(df)[0]
for i in range(5):
df = delete_event(df, i)
for i in range(5):
df = create_blank_event(df, i+5)
update_table_row(df, 5, 1, {"Velo": 5.1})
df, msg = insert_table_row(df, 5, 1)
df, msg = insert_table_row(df, 5, 3)
eq.guardar_csv(df, "dados.csv")
eq.guardar_df(df, "data.txt")
'''

View File

@@ -1,7 +1,23 @@
#! /usr/bin/env python #! /usr/bin/env python
import pandas as pd # pyright: basic
import json import json
from parser import parse import os
import pandas as pd
import parser
import crud
HEADER = """=== Terramotos ==="""
MENU ="""[1] Criar a base de dados
[2] Atualizar uma entrada
[3] Apagar uma entrada
[4] Visualizar uma entrada
[Q] Sair
"""
def guardar_df(df: pd.DataFrame, fname: str) -> bool: def guardar_df(df: pd.DataFrame, fname: str) -> bool:
with open(fname, "w") as fp: with open(fname, "w") as fp:
@@ -20,10 +36,99 @@ def guardar_json(df: pd.DataFrame, fname: str) -> bool:
return False return False
return True return True
def guardar_csv(df: pd.DataFrame, fname: str):
with open(fname, "w") as fp:
try:
df.to_csv(fp, index=False)
except ValueError:
return False
return True
def main(): def main():
isRunning = True
db = parser.parse("dados.txt")
retInfo = None
while isRunning:
os.system("cls")
print(HEADER + "\n" + MENU)
usrIn = input("Opção: ").lower()
match usrIn:
case "1":
os.system("cls")
print(HEADER + "\nCRIAR")
fname = input("Nome do ficheiro com os dados. (Branco para dados.txt)")
if fname == "":
fname = "dados.txt"
if _file_exists(fname):
db = parser.parse(fname)
else:
retInfo = "Nenhum ficheiro encontrado!"
case "2":
if db is not None:
continue
else:
retInfo = "Base de dados não encontrada!"
case "3":
if db is not None:
a = _get_uniques(db)
ev_ids = _show_events(a)
_select = input("Qual a entrada a apagar: ")
db = db.drop(db[db["ID"] == ev_ids[_select]].index)
else:
retInfo = "Base de dados não encontrada!"
case "4":
if db is not None:
a = _get_uniques(db)
ev_ids = _show_events(a)
_select = input("Qual a entrada a visualizar: ")
_view_event(db, ev_ids[_select])
input()
else:
retInfo = "Base de dados não encontrada!"
case "q":
isRunning = False
continue
case _:
pass pass
if retInfo:
print(retInfo)
retInfo = None
input("Clique Enter para continuar")
def _file_exists(name: str) -> bool:
currFiles = os.listdir(os.getcwd())
if name in currFiles:
return True
return False
def _get_uniques(df) -> pd.DataFrame:
return df.get(["ID", "Data", "Regiao"]).drop_duplicates(subset="ID", keep="first")
def _show_events(df):
events = {}
idx = 1
for (_, row) in df.iterrows():
print(f"{idx:2d}| {row["Regiao"]}")
events[str(idx)] = row["ID"]
idx += 1
return events
def _view_event(df, id):
for idx, row in df.loc[df["ID"] == id ].iterrows():
print(row)

BIN
info/descricao.docx Normal file

Binary file not shown.

118
parser.py
View File

@@ -1,3 +1,4 @@
# pyright: basic
import io import io
import warnings import warnings
@@ -6,6 +7,12 @@ from datetime import datetime
import pandas as pd import pandas as pd
# --- globals ---
DIST_IND = {"L": "Local", "R": "Regional", "D": "Distante"}
TYPE = {"Q": "Quake", "V": "Volcanic", "U": "Unknown", "E": "Explosion"}
# --- helper funcs ---
def is_blank(l: str) -> bool: def is_blank(l: str) -> bool:
return len(l.strip(" ")) == 0 return len(l.strip(" ")) == 0
@@ -32,22 +39,24 @@ def into_dataframe(data) -> pd.DataFrame:
return pd.DataFrame(data=aux) return pd.DataFrame(data=aux)
# ------------ principal def _concat(preamble, df: pd.DataFrame):
for (k,v) in preamble.items():
df.insert(len(df.columns)-1, k, [v for _ in range(len(df))])
def parse(fname="dados.txt"): return df
# --- principal ---
def parse(fname):
fp = open(fname) fp = open(fname)
data = [l for l in fp.read().split("\n")] data = [l for l in fp.read().split("\n")]
chunks = boundaries(data) chunks = boundaries(data)
df = pd.DataFrame() df = pd.DataFrame()
for (idx,c) in enumerate(chunks): for (idx,c) in enumerate(chunks):
a = parse_chunk(data[c[0]:c[1]], idx) a = parse_chunk(data[c[0]:c[1]])
aux = pd.concat([df, a], axis=0, ignore_index=True) aux = pd.concat([df, a], axis=0, ignore_index=True)
df = aux df = aux
print(df)
aux = df.loc[df["ID"] == 14]
print(aux)
fp.close() fp.close()
return df return df
def boundaries(data: list[str]): def boundaries(data: list[str]):
@@ -63,23 +72,18 @@ def boundaries(data: list[str]):
start = None start = None
return boundaries return boundaries
def parse_chunk(chunk_lines: list[str]):
def parse_chunk(chunk_lines: list[str], iD):
hIdx = None hIdx = None
for (idx, l) in enumerate(chunk_lines): for (idx, l) in enumerate(chunk_lines):
if l[-1] == "7": if l[-1] == "7":
hIdx = idx hIdx = idx
break break
headersRet = parse_header(chunk_lines[:hIdx]) preambleRet = _parse_preamble(chunk_lines[:hIdx])
phaseRet = parse_type_7(chunk_lines[hIdx:]) phaseRet = _parse_type_7(chunk_lines[hIdx:])
hDF = into_dataframe(headersRet) return _concat(preambleRet, phaseRet)
hDF["ID"] = iD
phaseRet["ID"] = iD
return pd.concat([hDF, phaseRet])
def _parse_preamble(hLines: list[str]):
def parse_header(hLines: list[str]):
aux = defaultdict(list) aux = defaultdict(list)
for line in hLines: for line in hLines:
@@ -91,13 +95,15 @@ def parse_header(hLines: list[str]):
case "6": case "6":
aux[6].append(line) aux[6].append(line)
case "E": case "E":
aux["E"].append(line) pass
# aux["E"].append(line)
case "I": case "I":
aux["I"].append(line) aux["I"].append(line)
case "F": case "F":
aux["F"].append(line) pass
case unknown: # aux["F"].append(line)
warnings.warn(f"header type not implemented: {unknown}") case _:
pass
headerDict = dict() headerDict = dict()
for (k,v) in aux.items(): for (k,v) in aux.items():
@@ -106,19 +112,7 @@ def parse_header(hLines: list[str]):
return headerDict return headerDict
def parse_mag(line: str): def _parse_type_1(data: list[str]):
magnitudes = []
base = 55
while base < 79:
m = line[base:base+4]
mt = line[base+4]
if not is_blank(m):
magnitudes.append({"M": m, "T": mt})
base += 8
return magnitudes
def parse_type_1(data: list[str]):
aux = data[0] aux = data[0]
y = int(aux[1:5]) y = int(aux[1:5])
mo = int(aux[6:8]) mo = int(aux[6:8])
@@ -129,55 +123,65 @@ def parse_type_1(data: list[str]):
mil = int(aux[19]) * 10**5 mil = int(aux[19]) * 10**5
dt = datetime(y,mo,d,h,m,s,mil) dt = datetime(y,mo,d,h,m,s,mil)
dist_ind = aux[21] dist_ind = DIST_IND[aux[21]]
eId = aux[22] ev_type = TYPE[aux[22]]
lat = float(aux[23:30]) lat = float(aux[23:30])
long = float(aux[30:38]) long = float(aux[30:38])
depth = float(aux[38:43]) depth = float(aux[38:43])
rep_ag = aux[45:48] no_stat = int(aux[48:51])
hypo = {"DateTime": dt.isoformat(), "Distance Indicator": dist_ind, "Event ID": eId, "Lat": lat, "Long": long, "Depth": depth, "Agency": rep_ag, "Magnitudes": list()} hypo = {"Data": dt.isoformat(), "Distancia": dist_ind, "Event Type": ev_type, "Lat": lat, "Long": long, "Depth": depth, "No. Stations": no_stat, "Magnitudes": list()}
for l in data: for l in data:
hypo["Magnitudes"] = hypo["Magnitudes"] + parse_mag(l) hypo["Magnitudes"] = hypo["Magnitudes"] + _parse_mag(l)
return hypo return hypo
def parse_type_3(data: list[str]): def _parse_mag(line: str):
comments = [] magnitudes = []
base = 55
while base < 79:
m = line[base:base+4]
mt = line[base+4]
if not is_blank(m):
magnitudes.append({"Magnitude": m, "Tipo": mt})
base += 8
return magnitudes
def _parse_type_3(data: list[str]):
comments = {}
for line in data: for line in data:
comments.append(line[:-2].strip()) if line.startswith(" SENTIDO") or line.startswith(" REGIAO"):
return {"Comments": comments} c, v = line[:-2].strip().split(": ", maxsplit=1)
comments[c.capitalize()] = v
return comments
def parse_type_6(data: list[str]): def _parse_type_6(data: list[str]):
waves = [] waves = []
for l in data: for l in data:
waves.append(l.strip().split(" ")[0]) waves.append(l.strip().split(" ")[0])
return {"Wave": waves} return {"Wave": waves}
def parse_type_7(data: list[str]):
def _parse_type_7(data: list[str]):
aux = io.StringIO("\n".join(data)) aux = io.StringIO("\n".join(data))
dados = pd.read_fwf(aux, colspecs=[(1,5), (6,8), (9,10), (10,15), (16,17), (18,22), (23,28), (29,33), (34,40), (41,45), (46,50), (51,56), (57,60), (61,63), (64,68), (69,70), (72,75), (76,79)]) dados = pd.read_fwf(aux, colspecs=[(1,5), (6,8),(10,15), (18,20), (20,22), (23,28), (34,38)])
return dados return dados
def _parse_type_e(data: list[str]):
def parse_type_e(data: list[str]):
aux = data[0] aux = data[0]
error = {"Gap": int(aux[5:8]), "Origin": float(aux[14:20]), "Error_lat": float(aux[24:30]), "Error_long": float(aux[32:38]), "Error_depth": float(aux[38:43]), "Cov_xy": float(aux[43:55]), "Cov_xz": float(aux[55:67]), "Cov_yz": float(aux[67:79])} error = {"Gap": int(aux[5:8]), "Origin": float(aux[14:20]), "Error_lat": float(aux[24:30]), "Error_long": float(aux[32:38]), "Error_depth": float(aux[38:43]), "Cov_xy": float(aux[43:55]), "Cov_xz": float(aux[55:67]), "Cov_yz": float(aux[67:79])}
return error return error
def parse_type_f(data: list[str]): def _parse_type_i(data: list[str]):
return {}
def parse_type_i(data: list[str]):
aux = data[0] aux = data[0]
dt = datetime.strptime(aux[12:26], "%y-%m-%d %H:%M") return {"ID":int(aux[60:74])}
return {"Action": aux[8:11], "Action Extra": {"Date": dt.isoformat(), "OP": aux[30:35].strip(), "Status": aux[42:57].strip(), "ID":int(aux[60:74])}}
FUNCS = {1: parse_type_1, 3: parse_type_3, 6: parse_type_6, "E": parse_type_e, "F": parse_type_f, "I": parse_type_i} FUNCS = {1: _parse_type_1, 3: _parse_type_3, 6: _parse_type_6, "E": _parse_type_e, "I": _parse_type_i}
parse() parse("dados.txt")

View File

@@ -1 +1,2 @@
pytest==8.4.2 pytest==8.4.2
pandas==2.3.3