diff --git a/deathclock/__pycache__/news.cpython-311.pyc b/deathclock/__pycache__/news.cpython-311.pyc index 38dcdd0..bf3d0ad 100644 Binary files a/deathclock/__pycache__/news.cpython-311.pyc and b/deathclock/__pycache__/news.cpython-311.pyc differ diff --git a/deathclock/__pycache__/weather.cpython-311.pyc b/deathclock/__pycache__/weather.cpython-311.pyc index 8740926..8a03d24 100644 Binary files a/deathclock/__pycache__/weather.cpython-311.pyc and b/deathclock/__pycache__/weather.cpython-311.pyc differ diff --git a/deathclock/app.py b/deathclock/app.py index f106822..ec25be8 100644 --- a/deathclock/app.py +++ b/deathclock/app.py @@ -13,6 +13,7 @@ weather_obj = Weather() news_obj = News() scores_obj = NBAScores() alarm_obj = alarm.Alarm() +_alarm_time = None # Uses arbitrary date in the past as initial value _last_news_update = datetime.datetime(2000, 1, 1) @@ -39,7 +40,8 @@ app.layout = html.Div([ html.Div(id='weather-display') ], id='scores-weather-container'), ]), - html.Div(id='news-ticker', className='ticker'), + #add class back if not working still + html.Div(id='news-ticker'), # Intervals dcc.Interval(id='clock-interval', interval=60000, n_intervals=0), dcc.Interval(id='weather-interval', interval=150000, n_intervals=0), @@ -77,6 +79,8 @@ def toggle_time_input(n_clicks, current_style): def process_selected_time(time_value): if time_value: # Here you can pass time_value to another function if desired. + global alarm_time + alarm_time = time_value alarm_obj.add_alarm(time_value, datetime.datetime.now()) return f'Alarm time: {time_value}' return 'No time selected yet.' @@ -108,15 +112,18 @@ def update_news(n): global _last_news_update, _cached_news, _initial_run current_time = datetime.datetime.now() try: + print("UPDATING NEWS...") headlines_dict = news_obj.get_news() - combined_text = " | ".join(headlines_dict.keys()) - text_px = len(combined_text) * 8 # Approximate 8px per character - scroll_speed = 75 # pixels per second - duration = text_px / scroll_speed # seconds required to scroll across - if duration < 20: - duration = 20 + # Combine source and headline for each news item + combined_items = " | ".join([f"{data['source']}: {headline}" + for headline, data in headlines_dict.items()]) + + text_px = len(combined_items) * 8 + scroll_speed = 75 + duration = max(text_px / scroll_speed, 20) + ticker_style = {"animationDuration": f"{duration}s"} - combined_items = " | ".join([f"{headline}" for headline in headlines_dict.keys()]) + _cached_news = html.Div( html.Span(combined_items, className="news-item", style=ticker_style), className='ticker' @@ -124,6 +131,7 @@ def update_news(n): _last_news_update = current_time _initial_run = False return _cached_news + except Exception as e: if _cached_news: return _cached_news @@ -153,6 +161,15 @@ def update_scores(n): except Exception as e: return html.Div("Scores unavailable") +# Check for alarms and play sound if triggered +def check_alarms(): + trigg = alarm_obj.check_alarm() + if trigg: + print("ALARM TRIGGERED!") + # Play alarm sound here using dash audio component +check_alarms() + + if __name__ == '__main__': app.run_server(debug=False, host='0.0.0.0', port=8050) diff --git a/deathclock/news.py b/deathclock/news.py index 4290a55..7843a1a 100644 --- a/deathclock/news.py +++ b/deathclock/news.py @@ -1,6 +1,8 @@ import feedparser from time import localtime, strftime import random +import socket + def print_time(): print(strftime("%B %d, %I:%M %p", localtime())) @@ -8,41 +10,72 @@ class News: def __init__(self): self._news_dict = {} self._news_dict_length = 0 + # Set timeout for feed fetching + socket.setdefaulttimeout(10) def get_news(self): print_time() feeds = [] - self._news_dict = {} # Reset dict each time + self._news_dict = {} self._news_dict_length = 0 - # Load RSS feed list - with open("feeds.txt", "r") as f: - feeds = [line.strip() for line in f] - - # Get latest news from each feed + + try: + with open("feeds.txt", "r") as f: + feeds = [line.strip() for line in f] + except Exception as e: + print(f"Error reading feeds.txt: {e}") + return {} + + all_entries = [] + print("Getting news entries...") + for feed in feeds: - d = feedparser.parse(feed) - #randomly select 20 news items - random.shuffle(d.entries) - #its getting all posts from first feed because there is more than 20 + try: + feed_entries = [] + print(f"Fetching from feed: {feed}") # Debug print + d = feedparser.parse(feed) + + if hasattr(d, 'status') and d.status != 200: + print(f"Skip feed {feed}: status {d.status}") + continue + + for post in d.entries: + feed_entries.append({ + 'title': post.title, + 'source': d.feed.title if hasattr(d.feed, 'title') else 'Unknown', + 'publish_date': post.published if hasattr(post, 'published') else '', + 'summary': post.summary if hasattr(post, 'summary') else '' + }) + + if feed_entries: + selected = random.sample(feed_entries, min(10, len(feed_entries))) + all_entries.extend(selected) + print(f"Added {len(selected)} entries from {feed}") # Debug print + + if len(all_entries) >= 30: + break + + except Exception as e: + print(f"Error processing feed {feed}: {e}") + continue + + if not all_entries: + print("No entries collected") + return {} - - for post in d.entries[:20]: # Limit to 20 entries per feed - if self._news_dict_length >= 20: # Max 20 total entries - return self._news_dict - - self._news_dict[post.title] = { - 'source': d.feed.title, - 'publish_date': post.published, - 'headline': post.title, - 'summary': post.summary - } - self._news_dict_length += 1 - # Store last 20 news items in text file - - - with open("news.txt", "w") as f: - for headline in list(self._news_dict.keys())[-20:]: - f.write(f"{headline}\n") - - + if len(all_entries) > 30: + all_entries = random.sample(all_entries, 30) + + for entry in all_entries: + self._news_dict[entry['title']] = entry + + try: + with open("news.txt", "w") as f: + print("Writing news to file...") + for entry in self._news_dict.values(): + f.write(f"[{entry['publish_date']}] {entry['source']}: {entry['title']}\n") + f.flush() + except Exception as e: + print(f"Error writing to news.txt: {e}") + return self._news_dict