Merge pull request #3 from Death916/meta

Meta
This commit is contained in:
Death916 2026-01-30 01:58:38 -08:00 committed by GitHub
commit 269bbef1cc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -19,7 +19,7 @@ class TorrentScrape:
def __init__(self): def __init__(self):
self.url = "https://knaben.org/search/coast%20to%20coast%20am/0/1/date" self.url = "https://knaben.org/search/coast%20to%20coast%20am/0/1/date"
self.episodes = [] self.episodes = []
self.download_amount = 5 self.download_amount: int = 5
self.last_download = None self.last_download = None
self.last_download_link = None self.last_download_link = None
self.headers = { self.headers = {
@ -87,6 +87,184 @@ class TorrentScrape:
logging.info("Returning link to qbit") logging.info("Returning link to qbit")
return links # need to return link later to qbit but need to decide logic return links # need to return link later to qbit but need to decide logic
def generate_nfo_content(self, content):
lines = [line.strip() for line in content.splitlines()]
# Remove separator lines if present
lines = [line for line in lines if not set(line).issubset({"-"}) and line]
info = {
"title": "Unknown Title",
"host": "Unknown Host",
"guests": [],
"date": "Unknown Date",
"description": "",
}
# Heuristic parsing
if lines:
info["title"] = lines[0]
desc_lines = []
parsing_desc = False
for i, line in enumerate(lines):
if i == 0:
continue
lower_line = line.lower()
if lower_line.startswith("hosted by"):
info["host"] = line.split("by", 1)[1].strip()
elif lower_line.startswith("host:"):
info["host"] = line.split(":", 1)[1].strip()
elif lower_line.startswith("guests:") or lower_line.startswith("guest:"):
if ":" in line:
val = line.split(":", 1)[1].strip()
if val:
info["guests"].append(val)
if i + 1 < len(lines):
next_line = lines[i + 1]
# Check if next line is a date or month, if not, assume it's a guest continuation
is_date = any(
x in next_line.lower()
for x in [
"friday",
"monday",
"tuesday",
"wednesday",
"thursday",
"saturday",
"sunday",
"january",
"february",
"march",
"april",
"may",
"june",
"july",
"august",
"september",
"october",
"november",
"december",
]
)
if not is_date:
info["guests"].append(next_line)
elif any(
day in lower_line
for day in [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]
) and any(
month in lower_line
for month in [
"january",
"february",
"march",
"april",
"may",
"june",
"july",
"august",
"september",
"october",
"november",
"december",
]
):
info["date"] = line
parsing_desc = True
elif parsing_desc:
desc_lines.append(line)
# Fallback for simple format: if we are deep in file and haven't found date, treat as description
elif not parsing_desc and i > 3 and not info.get("date") == "Unknown Date":
desc_lines.append(line)
info["description"] = "\n".join(desc_lines)
# Copyright year extraction
year = "202X"
if "," in info["date"]:
try:
year = info["date"].split(",")[-1].strip()
except:
pass
nfo_template = f"""General Information
===================
Title: {info["title"]}
Author: Coast to Coast AM
Read By: {info["host"]}
Copyright: (c){year} Premiere Networks
Genre: Talk Radio / Paranormal
Publisher: Coast to Coast AM
Duration: 04:00:00 (Approx)
Media Information
=================
Source Format: MP3
Source Sample Rate: 44100 Hz
Source Channels: 2
Source Bitrate: 64 kbits
Encoded Codec: MP3
Encoded Sample Rate: 44100 Hz
Encoded Channels: 2
Encoded Bitrate: 64 kbits
Book Description
================
{info["description"]}
Guest(s): {", ".join(info["guests"])}
"""
return nfo_template
def add_nfo(self):
"""Add nfo file to episode folders by reading the downloaded .txt"""
# Ensure we are using the path from the environment
download_location = os.getenv("QB_DOWNLOAD_PATH") or self.download_location
if not download_location:
logging.warning("QB_DOWNLOAD_PATH is not set. Skipping NFO generation.")
return
if not os.path.exists(download_location):
logging.warning(
f"Download path {download_location} does not exist. Skipping NFO generation."
)
return
logging.info(f"Scanning {download_location} for .txt files to generate .nfo...")
for root, dirs, files in os.walk(download_location):
for file in files:
if file.endswith(".txt") and not file.endswith("_debug.txt"):
txt_path = os.path.join(root, file)
nfo_path = os.path.splitext(txt_path)[0] + ".nfo"
if os.path.exists(nfo_path):
continue
try:
with open(txt_path, "r", encoding="utf-8") as f:
content = f.read()
nfo_content = self.generate_nfo_content(content)
if nfo_content:
with open(nfo_path, "w", encoding="utf-8") as f:
f.write(nfo_content)
logging.info(f"Created NFO: {nfo_path}")
except Exception as e:
logging.error(f"Failed to create NFO for {txt_path}: {e}")
class Qbittorrent: class Qbittorrent:
def __init__(self): def __init__(self):
@ -133,12 +311,12 @@ class Qbittorrent:
for link in links: for link in links:
try: try:
# "/" added for creating subdir so abs finds properly
self.download_path = self.download_path + "/" self.download_path = self.download_path + "/"
torrent.torrents_add(urls=link, save_path=self.download_path)
torrent.torrents_add( torrent.torrents_add(
urls=link, urls=link, save_path=self.download_path, seeding_time_limit=1
save_path=self.download_path,
) )
logging.info(f"Added torrent {link} to qbittorrent") logging.info(f"Added torrent {link} to qbittorrent")
except Exception as e: except Exception as e:
logging.error(f"Error adding torrent {link} to qbittorrent: {e}") logging.error(f"Error adding torrent {link} to qbittorrent: {e}")
@ -155,7 +333,11 @@ if __name__ == "__main__":
link = scraper.get_torrent_link() link = scraper.get_torrent_link()
torrent = Qbittorrent() torrent = Qbittorrent()
torrent.get_credentials() torrent.get_credentials()
torrent.add_torrent(link) if link:
torrent.add_torrent(link)
# Process NFOs for existing downloads
scraper.add_nfo()
""" """
try: try:
while True: while True: