refactor and adding alarm sound

This commit is contained in:
Death916 2025-02-09 00:34:01 -08:00
parent 3b57ff62b1
commit 88005c1da1
4 changed files with 88 additions and 38 deletions

View file

@ -13,6 +13,7 @@ weather_obj = Weather()
news_obj = News()
scores_obj = NBAScores()
alarm_obj = alarm.Alarm()
_alarm_time = None
# Uses arbitrary date in the past as initial value
_last_news_update = datetime.datetime(2000, 1, 1)
@ -39,7 +40,8 @@ app.layout = html.Div([
html.Div(id='weather-display')
], id='scores-weather-container'),
]),
html.Div(id='news-ticker', className='ticker'),
#add class back if not working still
html.Div(id='news-ticker'),
# Intervals
dcc.Interval(id='clock-interval', interval=60000, n_intervals=0),
dcc.Interval(id='weather-interval', interval=150000, n_intervals=0),
@ -77,6 +79,8 @@ def toggle_time_input(n_clicks, current_style):
def process_selected_time(time_value):
if time_value:
# Here you can pass time_value to another function if desired.
global alarm_time
alarm_time = time_value
alarm_obj.add_alarm(time_value, datetime.datetime.now())
return f'Alarm time: {time_value}'
return 'No time selected yet.'
@ -108,15 +112,18 @@ def update_news(n):
global _last_news_update, _cached_news, _initial_run
current_time = datetime.datetime.now()
try:
print("UPDATING NEWS...")
headlines_dict = news_obj.get_news()
combined_text = " | ".join(headlines_dict.keys())
text_px = len(combined_text) * 8 # Approximate 8px per character
scroll_speed = 75 # pixels per second
duration = text_px / scroll_speed # seconds required to scroll across
if duration < 20:
duration = 20
# Combine source and headline for each news item
combined_items = " | ".join([f"{data['source']}: {headline}"
for headline, data in headlines_dict.items()])
text_px = len(combined_items) * 8
scroll_speed = 75
duration = max(text_px / scroll_speed, 20)
ticker_style = {"animationDuration": f"{duration}s"}
combined_items = " | ".join([f"{headline}" for headline in headlines_dict.keys()])
_cached_news = html.Div(
html.Span(combined_items, className="news-item", style=ticker_style),
className='ticker'
@ -124,6 +131,7 @@ def update_news(n):
_last_news_update = current_time
_initial_run = False
return _cached_news
except Exception as e:
if _cached_news:
return _cached_news
@ -153,6 +161,15 @@ def update_scores(n):
except Exception as e:
return html.Div("Scores unavailable")
# Check for alarms and play sound if triggered
def check_alarms():
trigg = alarm_obj.check_alarm()
if trigg:
print("ALARM TRIGGERED!")
# Play alarm sound here using dash audio component
check_alarms()
if __name__ == '__main__':
app.run_server(debug=False, host='0.0.0.0', port=8050)

View file

@ -1,6 +1,8 @@
import feedparser
from time import localtime, strftime
import random
import socket
def print_time():
print(strftime("%B %d, %I:%M %p", localtime()))
@ -8,41 +10,72 @@ class News:
def __init__(self):
self._news_dict = {}
self._news_dict_length = 0
# Set timeout for feed fetching
socket.setdefaulttimeout(10)
def get_news(self):
print_time()
feeds = []
self._news_dict = {} # Reset dict each time
self._news_dict = {}
self._news_dict_length = 0
# Load RSS feed list
with open("feeds.txt", "r") as f:
feeds = [line.strip() for line in f]
# Get latest news from each feed
try:
with open("feeds.txt", "r") as f:
feeds = [line.strip() for line in f]
except Exception as e:
print(f"Error reading feeds.txt: {e}")
return {}
all_entries = []
print("Getting news entries...")
for feed in feeds:
d = feedparser.parse(feed)
#randomly select 20 news items
random.shuffle(d.entries)
#its getting all posts from first feed because there is more than 20
try:
feed_entries = []
print(f"Fetching from feed: {feed}") # Debug print
d = feedparser.parse(feed)
if hasattr(d, 'status') and d.status != 200:
print(f"Skip feed {feed}: status {d.status}")
continue
for post in d.entries[:20]: # Limit to 20 entries per feed
if self._news_dict_length >= 20: # Max 20 total entries
return self._news_dict
for post in d.entries:
feed_entries.append({
'title': post.title,
'source': d.feed.title if hasattr(d.feed, 'title') else 'Unknown',
'publish_date': post.published if hasattr(post, 'published') else '',
'summary': post.summary if hasattr(post, 'summary') else ''
})
self._news_dict[post.title] = {
'source': d.feed.title,
'publish_date': post.published,
'headline': post.title,
'summary': post.summary
}
self._news_dict_length += 1
# Store last 20 news items in text file
if feed_entries:
selected = random.sample(feed_entries, min(10, len(feed_entries)))
all_entries.extend(selected)
print(f"Added {len(selected)} entries from {feed}") # Debug print
if len(all_entries) >= 30:
break
with open("news.txt", "w") as f:
for headline in list(self._news_dict.keys())[-20:]:
f.write(f"{headline}\n")
except Exception as e:
print(f"Error processing feed {feed}: {e}")
continue
if not all_entries:
print("No entries collected")
return {}
if len(all_entries) > 30:
all_entries = random.sample(all_entries, 30)
for entry in all_entries:
self._news_dict[entry['title']] = entry
try:
with open("news.txt", "w") as f:
print("Writing news to file...")
for entry in self._news_dict.values():
f.write(f"[{entry['publish_date']}] {entry['source']}: {entry['title']}\n")
f.flush()
except Exception as e:
print(f"Error writing to news.txt: {e}")
return self._news_dict