#!/usr/bin/env python3 """ Warren AI - Scan Settimanale Multi-Mercato Analisi fondamentale stile Warren Buffett per mercati europei (IT, FR, DE) Usa calcoli deterministici invece di LLM per: - Velocità: ~2 secondi invece di 3-4 minuti - Affidabilità: niente errori di rete o rate limiting - Costo: $0 invece di $0.60 per scan Usage: python warren_scan.py # Default: FTSE MIB (IT) python warren_scan.py --market FR # CAC 40 (France) python warren_scan.py --market DE # DAX 40 (Germany) python warren_scan.py --market ALL # All markets (sequential) """ # Standard library import os import argparse import re import shutil import json from datetime import datetime, timedelta from pathlib import Path # Third-party from sqlalchemy import desc from loguru import logger # Local application from src.database.db_manager import DatabaseManager from src.database.models import Stock, PriceData, FundamentalData, Opportunity, OpportunityDetailed from src.data_collector.yahoo_collector import YahooFinanceCollector from src.analysis.warren_analyzer import WarrenAnalyzer from src.config.ftse_mib_stocks import get_all_tickers as get_ftse_mib_tickers from src.config.cac40_stocks import get_cac40_tickers from src.config.dax40_stocks import get_dax40_tickers from src.config.usa_djia_stocks import get_dow30_tickers # Market configuration MARKET_CONFIG = { 'IT': { 'name': 'FTSE MIB', 'full_name': 'Borsa Italiana', 'tickers_func': get_ftse_mib_tickers, 'flag': '🇮🇹' }, 'FR': { 'name': 'CAC 40', 'full_name': 'Euronext Paris', 'tickers_func': get_cac40_tickers, 'flag': '🇫🇷' }, 'DE': { 'name': 'DAX 40', 'full_name': 'Xetra', 'tickers_func': get_dax40_tickers, 'flag': '🇩🇪' }, 'USA': { 'name': 'Dow Jones 30', 'full_name': 'NYSE/NASDAQ', 'tickers_func': get_dow30_tickers, 'flag': '🇺🇸' } } def validate_fundamental_quality(fundamentals, sector: str = None, avg_fcf_yield_3y=None): """ Valida qualità e range dei dati fondamentali. Blocca analisi su dati Yahoo corrotti o anomali. I valori fuori range generano errori, ma i campi mancanti sono gestiti separatamente. Args: fundamentals: FundamentalData object Returns: tuple: (is_valid: bool, issues: List[str]) """ issues = [] # PE Ratio validation if fundamentals.pe_ratio is not None: if fundamentals.pe_ratio < 0: issues.append(f"PE ratio negativo: {fundamentals.pe_ratio:.2f}") elif fundamentals.pe_ratio > 100: issues.append(f"PE ratio troppo alto: {fundamentals.pe_ratio:.2f} (max 100)") # PB Ratio validation if fundamentals.pb_ratio is not None: if fundamentals.pb_ratio < 0: issues.append(f"PB ratio negativo: {fundamentals.pb_ratio:.2f}") elif fundamentals.pb_ratio > 20: issues.append(f"PB ratio anomalo: {fundamentals.pb_ratio:.2f} (max 20)") # ROE validation (stored as decimal, e.g. 0.15 = 15%) if fundamentals.roe is not None: if fundamentals.roe < -0.5: issues.append(f"ROE troppo negativo: {fundamentals.roe:.1%} (min -50%)") elif fundamentals.roe > 1.0: issues.append(f"ROE irrealistico: {fundamentals.roe:.1%} (max 100%)") # Debt/Equity validation if fundamentals.debt_to_equity is not None and fundamentals.debt_to_equity < 0: issues.append(f"Debt/Equity negativo: {fundamentals.debt_to_equity:.2f}") # Dividend Yield validation (now fixed in yahoo_collector.py to convert % to decimal) if fundamentals.dividend_yield is not None: if fundamentals.dividend_yield < 0: issues.append(f"Dividend yield negativo: {fundamentals.dividend_yield:.2%}") elif fundamentals.dividend_yield > 0.20: issues.append(f"Dividend yield sospetto: {fundamentals.dividend_yield:.2%} (max 20%)") # === SCHEMA V2 VALIDATIONS === # FCF Yield validation (only if we have both FCF and market cap) if fundamentals.free_cashflow is not None and fundamentals.market_cap is not None: if fundamentals.market_cap > 0: fcf_yield = fundamentals.free_cashflow / fundamentals.market_cap is_auto_industrial = False sector_l = sector.lower() if sector else '' if sector_l: for kw in ['auto', 'automotive', 'industrial', 'machinery', 'capital goods']: if kw in sector_l: is_auto_industrial = True break # Se abbiamo una media 3y e siamo in settori ciclici, valutiamo quella fcf_yield_to_check = fcf_yield if is_auto_industrial and avg_fcf_yield_3y is not None: fcf_yield_to_check = avg_fcf_yield_3y # FCF yield should typically be between -20% and +50% (o -5% media 3y per ciclici) min_threshold = -0.05 if (is_auto_industrial and avg_fcf_yield_3y is not None) else -0.20 if fcf_yield_to_check < min_threshold: issues.append(f"FCF Yield troppo negativo: {fcf_yield_to_check:.1%} (min {min_threshold:.0%})") elif fcf_yield_to_check > 0.50: issues.append(f"FCF Yield anomalo: {fcf_yield_to_check:.1%} (max 50%)") # Net Debt/EBITDA validation if fundamentals.net_debt is not None and fundamentals.ebitda is not None: if fundamentals.ebitda > 0: net_debt_ebitda = fundamentals.net_debt / fundamentals.ebitda # Most companies should have Net Debt/EBITDA between -5 and 10 if net_debt_ebitda < -5: issues.append(f"Net Debt/EBITDA anomalo: {net_debt_ebitda:.1f}x (min -5)") elif net_debt_ebitda > 10: issues.append(f"Net Debt/EBITDA eccessivo: {net_debt_ebitda:.1f}x (max 10)") # Margin validation (gross and operating) if fundamentals.gross_margin is not None: if fundamentals.gross_margin < -0.5: issues.append(f"Gross margin negativo: {fundamentals.gross_margin:.1%}") if fundamentals.operating_margin is not None: if fundamentals.operating_margin < -1.0: issues.append(f"Operating margin troppo negativo: {fundamentals.operating_margin:.1%} (min -100%)") return (len(issues) == 0, issues) def completeness_warnings(fundamentals, sector: str): """ Genera warning se mancano i campi chiave per il calcolo v2. Non blocca l'analisi, ma informa l'utente. """ warnings = [] # Schema version check if getattr(fundamentals, "schema_version", 1) < 2: warnings.append("Schema v1: FCF/EV/EBITDA non disponibili") return warnings is_financial = 'Bank' in sector or 'Insurance' in sector is_utility = 'Utilities' in sector or 'Energy' in sector # Per non special (non financial/utility): FCF, EV e EBITDA dovrebbero essere presenti if not (is_financial or is_utility): if fundamentals.free_cashflow is None: warnings.append("FCF mancante") if fundamentals.enterprise_value is None: warnings.append("Enterprise Value mancante") if fundamentals.ebitda is None: warnings.append("EBITDA mancante") # Debito/Cassa utili per net debt if fundamentals.total_debt is None or fundamentals.total_cash is None: warnings.append("Debito/Cassa mancanti") return warnings def get_stock_data(db, ticker): """ Recupera dati dello stock dal database con validazione qualità Returns: tuple: (data_dict, warnings_list) or (None, [error]) if failed """ warnings = [] with db.get_session() as session: stock = session.query(Stock).filter_by(ticker=ticker).first() if not stock: return None, ["Stock not found in database"] latest_price = session.query(PriceData).filter_by( stock_id=stock.id ).order_by(PriceData.date.desc()).first() fundamentals = session.query(FundamentalData).filter_by( stock_id=stock.id ).order_by(FundamentalData.date.desc()).first() if not latest_price: return None, ["No price data available"] # Validate data freshness (price should be < 3 days old) days_old = (datetime.now() - latest_price.date).days if days_old > 2: warnings.append(f"Stale price data ({days_old} days old)") # Validate price if latest_price.close <= 0: return None, ["Invalid price (<=0)"] # Check for missing fundamentals - CRITICAL for Warren analysis if not fundamentals: return None, ["No fundamental data available - cannot perform value analysis"] # Calcola media FCF yield (ultimi 3 record) per settori ciclici/industriali avg_fcf_yield_3y = None fcf_yields = [] # Prefer historical cashflow statements (yfinance_cashflow) to get true fiscal-year FCF fundamentals_history = session.query(FundamentalData)\ .filter_by(stock_id=stock.id, data_source="yfinance_cashflow")\ .order_by(FundamentalData.date.desc())\ .limit(3)\ .all() # Fallback: use generic fundamentals if no cashflow rows present if not fundamentals_history: fundamentals_history = session.query(FundamentalData)\ .filter_by(stock_id=stock.id)\ .order_by(FundamentalData.date.desc())\ .limit(3)\ .all() for f in fundamentals_history: if f.free_cashflow is not None and f.market_cap: fcf_yield = f.free_cashflow / f.market_cap fcf_yields.append(fcf_yield) if fcf_yields: avg_fcf_yield_3y = sum(fcf_yields) / len(fcf_yields) # Validate fundamental data quality e considera media FCF 3y per settori ciclici is_valid, validation_issues = validate_fundamental_quality( fundamentals, sector=stock.sector or '', avg_fcf_yield_3y=avg_fcf_yield_3y ) if not is_valid: warning_msg = f"Invalid fundamental data: {'; '.join(validation_issues)}" print(f" ⚠️ {warning_msg}") warnings.append(warning_msg) # Warn on missing advanced fields warnings.extend(completeness_warnings(fundamentals, stock.sector or '')) # Normalize debt/equity for net cash situations (Yahoo può dare D/E alto anche con net cash) debt_to_equity_reported = fundamentals.debt_to_equity if fundamentals else None debt_to_equity_raw_yahoo = fundamentals.debt_to_equity_raw if fundamentals else None debt_to_equity_source = fundamentals.debt_to_equity_source if fundamentals else None debt_to_equity = debt_to_equity_reported net_debt = fundamentals.net_debt if fundamentals else None # Log D/E data quality info (for monitoring) if debt_to_equity_source: logger.debug(f"{ticker}: D/E source = {debt_to_equity_source}, value = {debt_to_equity}") # Net cash override (Layer 4 of our defense-in-depth strategy) if debt_to_equity is not None and net_debt is not None and net_debt <= 0 and debt_to_equity > 1.0: logger.info( f"{ticker}: Net cash position detected (net_debt={net_debt:.0f}), " f"overriding D/E from {debt_to_equity:.4f} to 0.0" ) debt_to_equity = 0.0 # usa per lo score, ma conserva il dato riportato per il testo # Build data dict (Schema v2 with extended fundamentals) data = { 'ticker': ticker, 'name': stock.name, 'sector': stock.sector or 'N/A', 'price': latest_price.close, 'price_date': latest_price.date, # Basic fundamentals 'pe_ratio': fundamentals.pe_ratio if fundamentals else None, 'pb_ratio': fundamentals.pb_ratio if fundamentals else None, 'roe': fundamentals.roe if fundamentals else None, 'debt_to_equity': debt_to_equity, 'debt_to_equity_reported': debt_to_equity_reported, 'dividend_yield': fundamentals.dividend_yield if fundamentals else None, 'revenue_growth': fundamentals.revenue_growth if fundamentals else None, 'earnings_growth': fundamentals.earnings_growth if fundamentals else None, 'market_cap': fundamentals.market_cap if fundamentals else None, 'beta': fundamentals.beta if fundamentals else None, # Schema v2 extended fundamentals 'free_cashflow': fundamentals.free_cashflow if fundamentals else None, 'enterprise_value': fundamentals.enterprise_value if fundamentals else None, 'ebitda': fundamentals.ebitda if fundamentals else None, 'gross_margin': fundamentals.gross_margin if fundamentals else None, 'operating_margin': fundamentals.operating_margin if fundamentals else None, 'net_debt': net_debt, 'dividend_rate': fundamentals.dividend_rate if fundamentals else None, 'shares_outstanding': fundamentals.shares_outstanding if fundamentals else None, 'total_debt': fundamentals.total_debt if fundamentals else None, 'total_cash': fundamentals.total_cash if fundamentals else None, 'avg_fcf_yield_3y': avg_fcf_yield_3y, } return data, warnings def scan_market(market_code: str, db: DatabaseManager, collector: YahooFinanceCollector, analyzer: WarrenAnalyzer): """ Scan a specific market and return results. Args: market_code: Market identifier (IT, FR, DE) db: Database manager instance collector: Yahoo collector instance analyzer: Warren analyzer instance Returns: dict: Results including analyzed stocks, quality stats, etc. """ market_info = MARKET_CONFIG[market_code] tickers = market_info['tickers_func']() print("=" * 70) print(f"{market_info['flag']} WARREN AI - SCAN {market_info['name']}") print("=" * 70) print() # === AUTO-ONBOARDING: Ensure all tickers exist in database === print(f"🔄 Auto-onboarding {len(tickers)} tickers to {market_info['full_name']}...") onboarded_count = 0 skipped_count = 0 for ticker in tickers: result = collector.add_stock(ticker, market=market_info['full_name']) if result: # Check if it was newly added or already existed (log message indicates this) onboarded_count += 1 else: skipped_count += 1 print(f" ✅ {onboarded_count} tickers ready | ⚠️ {skipped_count} skipped") print() # === CHECK AND DOWNLOAD MISSING/STALE FUNDAMENTALS === print(f"🔍 Checking fundamentals (refresh if > 7 days old)...") missing_fundamentals = [] refresh_threshold = datetime.now() - timedelta(days=7) with db.get_session() as session: for ticker in tickers: stock = session.query(Stock).filter_by(ticker=ticker).first() if stock: # Check if fundamentals exist and are recent (< 7 days old) latest_fundamental = session.query(FundamentalData).filter_by( stock_id=stock.id ).order_by(desc(FundamentalData.date)).first() # Refresh if missing OR older than 7 days if not latest_fundamental or latest_fundamental.date < refresh_threshold: missing_fundamentals.append(ticker) if missing_fundamentals: print(f"📥 Downloading/refreshing fundamentals for {len(missing_fundamentals)} tickers...") success_count = 0 fail_count = 0 for i, ticker in enumerate(missing_fundamentals, 1): print(f" [{i}/{len(missing_fundamentals)}] {ticker}...", end=" ", flush=True) if collector.collect_fundamental_data(ticker): print("✅") success_count += 1 else: print("❌") fail_count += 1 print(f" ✅ {success_count} downloaded | ❌ {fail_count} failed") print() else: print(f" ✅ All tickers have fundamentals") print() print("📊 Step 1: Aggiornamento prezzi...") print(f" Aggiornamento {len(tickers)} titoli {market_info['name']}...", flush=True) # update_daily_prices() takes a list of tickers and returns dict of results update_results = collector.update_daily_prices(tickers) # Show summary success_count = sum(1 for success in update_results.values() if success) failed_count = len(update_results) - success_count if failed_count > 0: print(f" ✅ {success_count} aggiornati | ⚠️ {failed_count} errori") # Show which ones failed failed_tickers = [ticker for ticker, success in update_results.items() if not success] if failed_tickers: print(f" Errori su: {', '.join(failed_tickers)}") else: print(f" ✅ Tutti {success_count} titoli aggiornati con successo") print() print() print("🤖 Step 2: Analisi Warren AI...") print() # Query storico ultima settimana per confronto last_week = datetime.now() - timedelta(days=7) last_week_recs = {} with db.get_session() as session: recent_opps = session.query(Opportunity).filter( Opportunity.found_date >= last_week ).all() for opp in recent_opps: last_week_recs[opp.ticker] = opp.recommendation results = [] upgraded_stocks = [] # Enhanced data quality tracking quality_stats = { 'total': len(tickers), 'analyzed': 0, 'critical': 0, 'insufficient': 0, 'stale_data': 0, 'warnings': {}, # ticker -> list of warnings 'stale_tickers': {}, # ticker -> days old 'critical_details': [], # List of critical issues 'insufficient_details': [], # List of data insufficient tickers 'critical_failures': [], # Track critical failures for alert } for i, ticker in enumerate(tickers, 1): print(f"[{i}/{len(tickers)}] Analizzando {ticker}...", end=' ', flush=True) try: # Get data with validation stock_data, warnings = get_stock_data(db, ticker) # Track warnings and categorize stale data if warnings: quality_stats['warnings'][ticker] = warnings # Extract days old from stale data warning for warning in warnings: if 'Stale' in warning: quality_stats['stale_data'] += 1 # Extract number of days: "Stale price data (8 days old)" match = re.search(r'\((\d+) days old\)', warning) if match: days_old = int(match.group(1)) quality_stats['stale_tickers'][ticker] = days_old # Critical if > 7 days old if days_old > 7: quality_stats['critical_failures'].append( f"{ticker}: Data {days_old} days old (>1 week)" ) # Analyze with Warren Analyzer (deterministic, instant) result = analyzer.analyze(stock_data) quality_stats['analyzed'] += 1 # Track critical/data insufficient classifications if result['valutazione'] == analyzer.RATING_CRITICAL_DANGER: quality_stats['critical'] += 1 # Build detail with exact metric values fcf = stock_data.get('free_cashflow') mcap = stock_data.get('market_cap') nd = stock_data.get('net_debt') ebitda = stock_data.get('ebitda') details = [] if fcf is not None and mcap: fcf_yield = fcf / mcap if fcf_yield < analyzer.HARD_FAIL_FCF_YIELD_THRESHOLD: details.append(f"FCF Yield: {fcf_yield:.1%} < {analyzer.HARD_FAIL_FCF_YIELD_THRESHOLD:.1%}") if nd is not None and ebitda: nd_ratio = nd / ebitda if ebitda != 0 else None if nd_ratio is not None and nd_ratio > analyzer.HARD_FAIL_NET_DEBT_EBITDA_THRESHOLD: details.append(f"Net Debt/EBITDA: {nd_ratio:.1f}x > {analyzer.HARD_FAIL_NET_DEBT_EBITDA_THRESHOLD:.1f}x") if not details: details.append(result['ragionamento']) quality_stats['critical_details'].append(f"{ticker}: Hard Fail ({'; '.join(details)})") elif result['valutazione'] == analyzer.RATING_DATA_INSUFFICIENT: quality_stats['insufficient'] += 1 missing = [] if stock_data.get('pe_ratio') is None: missing.append("P/E") if stock_data.get('pb_ratio') is None: missing.append("P/B") if stock_data.get('roe') is None: missing.append("ROE") if stock_data.get('market_cap') is None: missing.append("Market Cap") if ticker == 'PST.MI': msg = "PST.MI: Criticità FCF/Debito non standard per settore finanziario/ibrido. Classificazione declassata a Dati Insufficienti." else: if missing: msg = f"{ticker}: Dati insufficienti - mancano {', '.join(missing)}." else: msg = f"{ticker}: Dati insufficienti per assenza di metriche avanzate non indispensabili al settore." quality_stats['insufficient_details'].append(msg) result['sector'] = stock_data.get('sector', 'N/A') result['price'] = stock_data.get('price', 0) result['fair_value'] = result.get('fair_value', 0) # Check for upgrade last_rec = last_week_recs.get(ticker) upgrade_map = {'AVOID': 0, 'HOLD': 1, 'BUY': 2, 'STRONG BUY': 3} current_level = upgrade_map.get(result['valutazione'], 0) last_level = upgrade_map.get(last_rec, 0) if last_rec else -1 if last_level >= 0 and current_level > last_level: result['upgraded'] = True result['upgrade_from'] = last_rec upgraded_stocks.append(result) else: result['upgraded'] = False results.append(result) # Save opportunity to database (if score >= 60) if result['score'] >= 60: with db.get_session() as session: stock_obj = session.query(Stock).filter_by(ticker=ticker).first() # Check for existing opportunity today (upsert logic) today = datetime.now().date() today_start = datetime.combine(today, datetime.min.time()) today_end = today_start + timedelta(days=1) existing = session.query(Opportunity).filter( Opportunity.ticker == ticker, Opportunity.found_date >= today_start, Opportunity.found_date < today_end, Opportunity.source == 'weekly_scan' ).first() if existing: # Update existing opportunity existing.stock_id = stock_obj.id if stock_obj else None existing.company_name = result['name'] existing.sector = stock_data.get('sector', 'N/A') existing.score = result['score'] existing.recommendation = result['valutazione'] existing.fair_value = result['fair_value'] existing.current_price = result['current_price'] existing.margin_of_safety = result['margin_of_safety'] existing.reasoning = result['ragionamento'] existing.updated_at = datetime.now() else: # Insert new opportunity opportunity = Opportunity( stock_id=stock_obj.id if stock_obj else None, ticker=ticker, company_name=result['name'], sector=stock_data.get('sector', 'N/A'), score=result['score'], recommendation=result['valutazione'], fair_value=result['fair_value'], current_price=result['current_price'], margin_of_safety=result['margin_of_safety'], reasoning=result['ragionamento'], found_date=datetime.now(), source='weekly_scan', status='new' ) session.add(opportunity) # === SCHEMA v4.1: SAVE DETAILED TRACKING DATA === # Save to opportunity_detailed table (upsert logic) existing_detailed = session.query(OpportunityDetailed).filter( OpportunityDetailed.stock_id == (stock_obj.id if stock_obj else None), OpportunityDetailed.scan_date == today ).first() # Extract tracking data from result score_breakdown = result.get('score_breakdown', {}) score_parameters = result.get('score_parameters', {}) fv_methods = result.get('fair_value_methods', {}) fv_method_weights = result.get('fair_value_method_weights', {}) fv_adjustments = result.get('fair_value_adjustments', {}) fv_parameters = result.get('fair_value_parameters', {}) # Prepare opportunity_detailed data detailed_data = { 'stock_id': stock_obj.id if stock_obj else None, 'scan_date': today, 'ticker': ticker, # Final output 'final_score': result['score'], 'recommendation': result['valutazione'], 'fair_value': result['fair_value'], 'margin_of_safety': result['margin_of_safety'], # Scoring breakdown - totals 'raw_score': score_breakdown.get('valuation', {}).get('total', 0) + score_breakdown.get('quality', {}).get('total', 0) + score_breakdown.get('growth', {}).get('total', 0) + score_breakdown.get('bonuses', {}).get('total', 0) - score_breakdown.get('penalties', {}).get('total', 0), 'valuation_score': score_breakdown.get('valuation', {}).get('total', 0), 'quality_score': score_breakdown.get('quality', {}).get('total', 0), 'growth_score': score_breakdown.get('growth', {}).get('total', 0), 'bonus_total': score_breakdown.get('bonuses', {}).get('total', 0), 'penalty_total': score_breakdown.get('penalties', {}).get('total', 0), # Valuation components 'pe_score': score_breakdown.get('valuation', {}).get('pe', 0), 'pb_score': score_breakdown.get('valuation', {}).get('pb', 0), 'dividend_score': score_breakdown.get('valuation', {}).get('dividend', 0), # Quality components 'roe_score': score_breakdown.get('quality', {}).get('roe', 0), 'debt_score': score_breakdown.get('quality', {}).get('debt', 0), # Growth components 'revenue_growth_score': score_breakdown.get('growth', {}).get('revenue', 0), 'earnings_growth_score': score_breakdown.get('growth', {}).get('earnings', 0), # Bonus components 'margin_bonus': score_breakdown.get('bonuses', {}).get('margins', 0), 'debt_coverage_bonus': score_breakdown.get('bonuses', {}).get('debt_coverage', 0), 'fcf_payout_bonus': score_breakdown.get('bonuses', {}).get('fcf_payout', 0), 'peg_bonus': score_breakdown.get('bonuses', {}).get('peg', 0), 'ev_ebitda_bonus': score_breakdown.get('bonuses', {}).get('ev_ebitda', 0), # Penalty components 'roe_negative_penalty': score_breakdown.get('penalties', {}).get('roe_negative', 0), 'debt_excess_penalty': score_breakdown.get('penalties', {}).get('debt_excess', 0), # Advanced quality metrics (from v4) 'roic_score': result.get('roic', 0) or 0, 'interest_coverage_score': result.get('interest_coverage', 0) or 0, 'piotroski_score': result.get('piotroski_fscore', 0) or 0, # Fair value methods 'fair_value_pe': fv_methods.get('pe', 0), 'fair_value_pb': fv_methods.get('pb', 0), 'fair_value_ps': 0, # Not implemented yet 'fair_value_fcf_yield': fv_methods.get('fcf_yield', 0), 'fair_value_ev_ebitda': fv_methods.get('ev_ebitda', 0), 'fair_value_dividend': fv_methods.get('dividend', 0), # Fair value method weights 'weight_pe': fv_method_weights.get('pe', 0), 'weight_pb': fv_method_weights.get('pb', 0), 'weight_ps': 0, 'weight_fcf_yield': fv_method_weights.get('fcf_yield', 0), 'weight_ev_ebitda': fv_method_weights.get('ev_ebitda', 0), 'weight_dividend': fv_method_weights.get('dividend', 0), # Fair value adjustments 'fv_base': fv_adjustments.get('base_fair_value', 0), 'fv_quality_premium': fv_adjustments.get('quality_premium', 0), 'fv_utility_bonus': fv_adjustments.get('utility_bonus', 0), 'fv_country_penalty': fv_adjustments.get('country_penalty', 0), # Fundamental parameters (from stock_data) 'pe_ratio': stock_data.get('pe_ratio', 0) or 0, 'pb_ratio': stock_data.get('pb_ratio', 0) or 0, 'roe': stock_data.get('roe', 0) or 0, 'debt_to_equity': stock_data.get('debt_to_equity', 0) or 0, 'current_price': result['current_price'], 'revenue_growth': stock_data.get('revenue_growth', 0) or 0, 'earnings_growth': stock_data.get('earnings_growth', 0) or 0, 'dividend_yield': stock_data.get('dividend_yield', 0) or 0, # Scoring parameters 'growth_rate_original': score_parameters.get('growth_rate_original', 0), 'growth_rate_used': score_parameters.get('growth_rate_used', 0), 'is_growth_capped': score_parameters.get('is_growth_capped', False), 'graham_multiplier': fv_parameters.get('graham_multiplier', 0), # Sector flags 'is_financial': score_parameters.get('is_financial', False) or fv_parameters.get('is_financial', False), 'is_utility': score_parameters.get('is_utility', False) or fv_parameters.get('is_utility', False), 'is_luxury': fv_parameters.get('is_luxury', False), 'is_auto_industrial': score_parameters.get('is_auto_industrial', False), # Metadata 'schema_version': '4.1' } if existing_detailed: # Update existing record for key, value in detailed_data.items(): if key not in ['stock_id', 'scan_date', 'ticker']: # Don't update primary/unique keys setattr(existing_detailed, key, value) logger.info(f"{ticker}: Updated opportunity_detailed record for {today}") else: # Insert new record opportunity_detailed = OpportunityDetailed(**detailed_data) session.add(opportunity_detailed) logger.info(f"{ticker}: Created new opportunity_detailed record for {today}") # Display score = result['score'] valutazione = result['valutazione'] if valutazione == 'BUY': print(f"🎯 BUY! Score: {score}/100") elif score >= 70: print(f"✅ Score: {score}/100 - {valutazione}") else: print(f"📊 Score: {score}/100 - {valutazione}") except Exception as e: print(f"❌ Errore: {str(e)[:50]}") return { 'market_code': market_code, 'market_info': market_info, 'tickers': tickers, 'results': results, 'upgraded_stocks': upgraded_stocks, 'quality_stats': quality_stats } def _get_scoring_methodology(): """ Return complete scoring methodology with all thresholds, formulas, and constants. This enables AI to understand exactly how scores are calculated. """ return { "schema_version": "4.0", "total_score_range": "0-100 points (raw max ~130, normalized)", "components": { "quality_score": { "max_points": 40, "description": "Financial health and competitive advantage", "metrics": { "roe": { "weight": "20 points", "excellent_threshold": "≥15%", "good_threshold": "≥10%", "formula": "20 * (ROE / 0.15) capped at 20 pts" }, "current_ratio": { "weight": "10 points", "excellent_threshold": "≥2.0", "good_threshold": "≥1.5", "poor_threshold": "<1.0" }, "profit_margin": { "weight": "10 points", "excellent_threshold": "≥20%", "good_threshold": "≥10%", "formula": "10 * (margin / 0.20) capped at 10 pts" } } }, "growth_score": { "max_points": 20, "description": "Revenue and earnings growth potential", "metrics": { "revenue_growth": { "weight": "10 points", "sector_aware_cap": { "mature_sectors": "4% (Financial Services, Utilities, Basic Materials)", "other_sectors": "5%" }, "excellent_threshold": "≥sector_cap", "good_threshold": "≥3%" }, "earnings_growth": { "weight": "10 points", "excellent_threshold": "≥10%", "good_threshold": "≥5%" } } }, "value_score": { "max_points": 20, "description": "Undervaluation vs intrinsic value", "calculation": "Based on margin of safety (discount to fair value)", "formula": "20 * (margin_of_safety / 0.30) capped at 20 pts", "thresholds": { "strong_buy": "≥20% margin", "buy": "≥15% margin", "hold": "0-15% margin", "avoid": "<0% margin (overvalued)" } }, "debt_penalty": { "max_penalty": -30, "description": "Conservative debt avoidance (Buffett principle)", "metric": "Net Debt / EBITDA ratio", "thresholds": { "hard_fail": ">5x (CRITICAL - automatic low score)", "high_penalty": "3-5x (-30 pts)", "medium_penalty": "2-3x (-20 pts)", "low_penalty": "1-2x (-10 pts)", "no_penalty": "<1x (0 pts)" } }, "forward_looking_bonuses": { "max_bonus": 20, "description": "Schema v3 enhancement - compensate growth stocks penalized by traditional P/E", "peg_ratio_bonus": { "max_points": 5, "criteria": { "excellent": "PEG < 1.0 → +5 pts (undervalued relative to growth)", "good": "PEG < 1.5 → +3 pts (reasonable valuation)" } }, "ev_ebitda_bonus": { "max_points": 5, "criteria": { "excellent": "EV/EBITDA < 8 → +5 pts (strong FCF generation)", "good": "EV/EBITDA < 12 → +3 pts (decent valuation)" } }, "fcf_yield_bonus": { "max_points": 10, "3y_avg_threshold": "≥8%", "description": "Rewards consistent free cash flow generation" } }, "advanced_quality_score": { "max_points": 20, "description": "Schema v4 - Professional-grade quality metrics", "roic": { "max_points": 10, "formula": "NOPAT / Invested Capital", "nopat": "EBIT × (1 - Tax Rate)", "invested_capital": "Total Assets - Current Liabilities - Cash", "scoring": { "excellent": "≥15% → 10 pts", "good": "≥10% → 7 pts", "acceptable": "≥5% → 4 pts", "poor": "<5% → 0 pts" }, "tax_fallback": { "Italy": "24%", "France": "25%", "Germany": "30%", "USA": "21%" } }, "interest_coverage": { "max_points": 5, "formula": "EBIT / Interest Expense", "scoring": { "excellent": "≥5.0x → 5 pts", "good": "≥3.0x → 3 pts", "acceptable": "≥1.5x → 1 pt", "poor": "<1.5x → 0 pts", "not_applicable": "None (Financial Services, Utilities) → 2.5 pts neutral" }, "excluded_sectors": ["Financial Services", "Utilities"] }, "piotroski_fscore": { "max_points": 5, "range": "0-9 points", "scoring": { "excellent": "8-9 → 5 pts", "good": "6-7 → 3 pts", "acceptable": "4-5 → 1 pt", "poor": "0-3 → 0 pts" }, "criteria_9": { "profitability": [ "1. ROA > 0", "2. Operating Cash Flow > 0", "3. ΔROA > 0 (YoY improvement)", "4. Accruals < 0 (OCF > Net Income)" ], "leverage": [ "5. ΔLong-term Debt ≤ 0", "6. ΔCurrent Ratio > 0", "7. No New Equity (shares not increased)" ], "efficiency": [ "8. ΔGross Margin > 0", "9. ΔAsset Turnover > 0" ] } } } }, "normalization": { "formula": "min(100, int((raw_score / 130) * 100))", "raw_score": "base_score (0-100) + advanced_quality (0-20)", "max_raw": "~130 points (base ~110 with bonuses + advanced 20)", "final_range": "0-100 points" }, "fair_value_calculation": { "description": "Sector-aware intrinsic value estimation (Schema v3)", "method": "Weighted average of 4 valuation methods + country risk adjustment", "weights": { "pe_based": "35%", "pb_based": "25%", "ps_based": "20%", "dividend_based": "20%" }, "sector_aware_graham_multiplier": { "description": "Graham formula: Fair Value = EPS × (8.5 + 2g) × sector_multiplier", "multipliers": { "Financial Services": "15x (conservative for cyclical sector)", "Consumer Cyclical": "30x (luxury/premium brands command higher multiples)", "Other sectors": "22.5x (standard)" } }, "country_risk_penalty": { "description": "Discount for sovereign risk and currency stability", "adjustments": { "Italy": "-20% (higher debt/GDP, political instability)", "France": "-10% (moderate fiscal concerns)", "Germany": "-10% (conservative baseline)", "USA": "0% (reserve currency, stable institutions)" } } }, "rating_system": { "STRONG BUY": "Score ≥80 AND Margin ≥20%", "BUY": "Score ≥70 AND Margin ≥15%", "HOLD": "Score ≥60 OR Margin 0-15%", "AVOID": "Score <60 AND Margin <0%", "CRITICAL": "Hard fail conditions (FCF yield <-5% OR Net Debt/EBITDA >5x)", "DATA INSUFFICIENT": "Missing critical metrics for analysis" }, "hard_fail_conditions": { "fcf_yield_threshold": "-5% (burning cash unsustainably)", "net_debt_ebitda_threshold": "5x (excessive leverage)" } } def _calculate_summary_stats(results: list, analyzer: WarrenAnalyzer): """Calculate aggregate statistics for the scanned market.""" total_stocks = len(results) ratings_count = { 'STRONG BUY': 0, 'BUY': 0, 'HOLD': 0, 'AVOID': 0, 'CRITICAL': 0, 'DATA INSUFFICIENT': 0 } scores = [] margins = [] for r in results: rating = r['valutazione'] if rating in ratings_count: ratings_count[rating] += 1 scores.append(r['score']) if r['margin_of_safety'] is not None: margins.append(r['margin_of_safety']) avg_score = sum(scores) / len(scores) if scores else 0 avg_margin = sum(margins) / len(margins) if margins else 0 return { "total_stocks_analyzed": total_stocks, "rating_distribution": ratings_count, "average_score": round(avg_score, 1), "average_margin_of_safety": round(avg_margin, 1), "top_score": max(scores) if scores else 0, "bottom_score": min(scores) if scores else 0 } def _format_quality_stats(quality_stats: dict): """Format quality statistics for JSON report.""" quality_pct = 100 - ((quality_stats['critical'] + quality_stats['insufficient']) / quality_stats['total'] * 100) if quality_stats['total'] > 0 else 0 return { "overall_quality_percentage": round(quality_pct, 1), "total_tickers": quality_stats['total'], "successfully_analyzed": quality_stats['analyzed'], "critical_failures": quality_stats['critical'], "data_insufficient": quality_stats['insufficient'], "stale_data_count": quality_stats['stale_data'], "critical_details": quality_stats.get('critical_details', []), "insufficient_details": quality_stats.get('insufficient_details', []), "stale_tickers": [ {"ticker": ticker, "days_old": days} for ticker, days in sorted(quality_stats.get('stale_tickers', {}).items(), key=lambda x: x[1], reverse=True) ] } def generate_json_report(scan_data: dict, db: DatabaseManager, analyzer: WarrenAnalyzer): """ Generate comprehensive JSON report for AI consumption. Contains ALL information that determines the score, including constants and methodology. """ market_info = scan_data['market_info'] market_code = scan_data['market_code'] results = scan_data['results'] quality_stats = scan_data['quality_stats'] # Sort results by score (descending) results_sorted = sorted(results, key=lambda x: x['score'], reverse=True) # Create reports directory structure: reports/YYYY-MM-DD/json/ scan_date = datetime.now() reports_base = Path("reports") dated_folder = reports_base / scan_date.strftime("%Y-%m-%d") json_folder = dated_folder / "json" latest_folder = reports_base / "latest" / "json" # Ensure directories exist json_folder.mkdir(parents=True, exist_ok=True) latest_folder.mkdir(parents=True, exist_ok=True) # Prepare JSON structure json_report = { "ai_instructions": { "purpose": "This JSON report provides complete transparency into Warren AI's value investing analysis. It contains ALL data, formulas, thresholds, and calculations used to generate stock scores and recommendations.", "philosophy": "Warren AI follows Warren Buffett's conservative Deep Value investing principles: 'Rule #1: Don't lose money. Rule #2: Never forget Rule #1.' We prioritize capital preservation, strong fundamentals, sustainable competitive advantages (moats), and buying quality businesses at significant discounts to intrinsic value.", "how_to_use": [ "1. Review 'scoring_methodology' to understand the complete scoring algorithm", "2. Check 'summary_statistics' for market-wide trends and rating distribution", "3. Analyze individual stocks in 'stocks' array - each contains full breakdown", "4. Use 'data_quality_report' to assess reliability of the analysis", "5. Focus on STRONG BUY/BUY rated stocks with high margins of safety (≥15-20%)" ], "key_concepts": { "margin_of_safety": "Core Buffett principle - buy at significant discount to fair value to protect against errors in valuation or unexpected events. Minimum 15% for BUY, 20% for STRONG BUY.", "fair_value": "Intrinsic value estimate using sector-aware Graham formula + P/E, P/B, P/S, Dividend yield methods. Adjusted for country risk (IT: -20%, FR/DE: -10%, USA: 0%).", "quality_score": "Financial health metrics - ROE ≥15% (excellent), Current Ratio ≥2.0, Profit Margin ≥20%. Measures competitive advantage sustainability.", "debt_penalty": "Conservative debt avoidance - Net Debt/EBITDA >3x triggers harsh penalties. >5x is automatic CRITICAL rating.", "forward_bonuses": "Schema v3 enhancement - PEG Ratio and EV/EBITDA bonuses compensate growth stocks penalized by traditional P/E ratios." }, "interpretation_guide": { "STRONG BUY": "Score ≥80 + Margin ≥20%. Exceptional quality at deep discount. Highest conviction opportunities.", "BUY": "Score ≥70 + Margin ≥15%. Strong fundamentals with adequate safety margin. Recommended for value portfolios.", "HOLD": "Score ≥60 OR Margin 0-15%. Decent business but limited upside or fair valuation. Wait for better entry.", "AVOID": "Score <60 + Margin <0%. Weak fundamentals or overvalued. Not suitable for conservative value investing.", "CRITICAL": "Hard fail conditions met (FCF yield <-5% OR Net Debt/EBITDA >5x). Extreme risk - avoid completely.", "DATA INSUFFICIENT": "Missing critical metrics. Cannot perform reliable analysis. Wait for complete data." }, "conservative_bias": "Warren AI intentionally uses conservative assumptions (country risk penalties, sector-aware growth caps, harsh debt penalties) to prioritize capital preservation. This may cause it to miss some opportunities, but significantly reduces downside risk - aligned with Buffett's philosophy." }, "metadata": { "scan_date": scan_date.strftime("%Y-%m-%d"), "scan_timestamp": scan_date.isoformat(), "market_code": market_code, "market_name": market_info['name'], "market_full_name": market_info['full_name'], "country_flag": market_info['flag'], "schema_version": "3.0", "generator": "Warren AI - Deterministic Value Investing Scanner", "total_stocks_scanned": len(results) }, "scoring_methodology": _get_scoring_methodology(), "stocks": [], "summary_statistics": _calculate_summary_stats(results, analyzer), "data_quality_report": _format_quality_stats(quality_stats) } # Add detailed stock information for result in results_sorted: ticker = result['ticker'] # Get full stock data from database stock_data = None with db.get_session() as session: stock = session.query(Stock).filter_by(ticker=ticker).first() if stock: # Get latest fundamental data fundamental = session.query(FundamentalData).filter_by( stock_id=stock.id ).order_by(desc(FundamentalData.date)).first() # Get latest price price_data = session.query(PriceData).filter_by( stock_id=stock.id ).order_by(desc(PriceData.date)).first() if fundamental and price_data: stock_data = { "ticker": ticker, "name": result['name'], "sector": stock.sector or "N/A", "price": float(price_data.close) if price_data.close else None, "market_cap": float(fundamental.market_cap) if fundamental.market_cap else None, "pe_ratio": float(fundamental.pe_ratio) if fundamental.pe_ratio else None, "pb_ratio": float(fundamental.pb_ratio) if fundamental.pb_ratio else None, "ps_ratio": float(fundamental.ps_ratio) if fundamental.ps_ratio else None, "roe": float(fundamental.roe * 100) if fundamental.roe else None, # Convert to percentage "current_ratio": float(fundamental.current_ratio) if fundamental.current_ratio else None, "profit_margin": float(fundamental.profit_margin * 100) if fundamental.profit_margin else None, # Convert to percentage "revenue_growth": float(fundamental.revenue_growth * 100) if fundamental.revenue_growth else None, "earnings_growth": float(fundamental.earnings_growth * 100) if fundamental.earnings_growth else None, "dividend_yield": float(fundamental.dividend_yield * 100) if fundamental.dividend_yield else None, "net_debt": float(fundamental.net_debt) if fundamental.net_debt else None, "ebitda": float(fundamental.ebitda) if fundamental.ebitda else None, "free_cashflow": float(fundamental.free_cashflow) if fundamental.free_cashflow else None, "peg_ratio": float(fundamental.peg_ratio) if fundamental.peg_ratio else None, "ev_to_ebitda": float(fundamental.ev_to_ebitda) if fundamental.ev_to_ebitda else None, "last_updated": fundamental.date.strftime("%Y-%m-%d") if fundamental.date else None } stock_entry = { "ticker": ticker, "name": result['name'], "sector": result.get('sector', 'N/A'), "score": { "total": result['score'], "rating": result['valutazione'], # Add raw score before normalization "raw_score": ( result.get('score_breakdown', {}).get('valuation', {}).get('total', 0) + result.get('score_breakdown', {}).get('quality', {}).get('total', 0) + result.get('score_breakdown', {}).get('growth', {}).get('total', 0) + result.get('score_breakdown', {}).get('bonuses', {}).get('total', 0) - result.get('score_breakdown', {}).get('penalties', {}).get('total', 0) + result.get('score_breakdown', {}).get('advanced_quality', {}).get('total', 0) ), # Add complete breakdown (Schema v4.1 - AI-oriented transparency) "breakdown": { "valuation": { "total": result.get('score_breakdown', {}).get('valuation', {}).get('total', 0), "pe_score": result.get('score_breakdown', {}).get('valuation', {}).get('pe', 0), "pb_score": result.get('score_breakdown', {}).get('valuation', {}).get('pb', 0), "dividend_score": result.get('score_breakdown', {}).get('valuation', {}).get('dividend', 0) }, "quality": { "total": result.get('score_breakdown', {}).get('quality', {}).get('total', 0), "roe_score": result.get('score_breakdown', {}).get('quality', {}).get('roe', 0), "debt_score": result.get('score_breakdown', {}).get('quality', {}).get('debt', 0) }, "growth": { "total": result.get('score_breakdown', {}).get('growth', {}).get('total', 0), "revenue_score": result.get('score_breakdown', {}).get('growth', {}).get('revenue', 0), "earnings_score": result.get('score_breakdown', {}).get('growth', {}).get('earnings', 0) }, "bonuses": { "total": result.get('score_breakdown', {}).get('bonuses', {}).get('total', 0), "margin_bonus": result.get('score_breakdown', {}).get('bonuses', {}).get('margins', 0), "debt_coverage_bonus": result.get('score_breakdown', {}).get('bonuses', {}).get('debt_coverage', 0), "fcf_payout_bonus": result.get('score_breakdown', {}).get('bonuses', {}).get('fcf_payout', 0), "peg_bonus": result.get('score_breakdown', {}).get('bonuses', {}).get('peg', 0), "ev_ebitda_bonus": result.get('score_breakdown', {}).get('bonuses', {}).get('ev_ebitda', 0) }, "penalties": { "total": result.get('score_breakdown', {}).get('penalties', {}).get('total', 0), "roe_negative_penalty": result.get('score_breakdown', {}).get('penalties', {}).get('roe_negative', 0), "debt_excess_penalty": result.get('score_breakdown', {}).get('penalties', {}).get('debt_excess', 0) }, "advanced_quality": { "total": result.get('score_breakdown', {}).get('advanced_quality', {}).get('total', 0), "roic_score": result.get('score_breakdown', {}).get('advanced_quality', {}).get('roic', 0), "interest_coverage_score": result.get('score_breakdown', {}).get('advanced_quality', {}).get('interest_coverage', 0), "piotroski_score": result.get('score_breakdown', {}).get('advanced_quality', {}).get('piotroski', 0) } } }, "valuation": { "current_price": result['current_price'], "fair_value": result['fair_value'], "margin_of_safety": result['margin_of_safety'], "is_undervalued": result['margin_of_safety'] > 0, # Add fair value methods breakdown (Schema v4.1) "fair_value_methods": { "pe_based": result.get('fair_value_methods', {}).get('pe', 0), "pb_based": result.get('fair_value_methods', {}).get('pb', 0), "ps_based": result.get('fair_value_methods', {}).get('ps', 0), "fcf_yield_based": result.get('fair_value_methods', {}).get('fcf_yield', 0), "ev_ebitda_based": result.get('fair_value_methods', {}).get('ev_ebitda', 0), "dividend_based": result.get('fair_value_methods', {}).get('dividend', 0) }, # Add method weights "method_weights": { "pe_weight": result.get('fair_value_method_weights', {}).get('pe', 0), "pb_weight": result.get('fair_value_method_weights', {}).get('pb', 0), "ps_weight": result.get('fair_value_method_weights', {}).get('ps', 0), "fcf_yield_weight": result.get('fair_value_method_weights', {}).get('fcf_yield', 0), "ev_ebitda_weight": result.get('fair_value_method_weights', {}).get('ev_ebitda', 0), "dividend_weight": result.get('fair_value_method_weights', {}).get('dividend', 0) }, # Add adjustments applied "adjustments": { "base_fair_value": result.get('fair_value_adjustments', {}).get('base_fair_value', 0), "quality_premium": result.get('fair_value_adjustments', {}).get('quality_premium', 0), "utility_bonus": result.get('fair_value_adjustments', {}).get('utility_bonus', 0), "country_penalty": result.get('fair_value_adjustments', {}).get('country_penalty', 0) } }, # Add algorithm parameters used (Schema v4.1) "algorithm_parameters": { "scoring": { "growth_rate_original": result.get('score_parameters', {}).get('growth_rate_original', 0), "growth_rate_used": result.get('score_parameters', {}).get('growth_rate_used', 0), "is_growth_capped": result.get('score_parameters', {}).get('is_growth_capped', False) }, "valuation": { "graham_multiplier": result.get('fair_value_parameters', {}).get('graham_multiplier', 0), "is_mature_sector": result.get('fair_value_parameters', {}).get('is_mature_sector', False) }, "sector_flags": { "is_financial": ( result.get('score_parameters', {}).get('is_financial', False) or result.get('fair_value_parameters', {}).get('is_financial', False) ), "is_utility": ( result.get('score_parameters', {}).get('is_utility', False) or result.get('fair_value_parameters', {}).get('is_utility', False) ), "is_luxury": result.get('fair_value_parameters', {}).get('is_luxury', False), "is_auto_industrial": result.get('score_parameters', {}).get('is_auto_industrial', False) } }, "reasoning": result['ragionamento'] } # Add fundamental data if available if stock_data: stock_entry["fundamental_data"] = { "financial_health": { "roe_percent": stock_data.get('roe'), "current_ratio": stock_data.get('current_ratio'), "profit_margin_percent": stock_data.get('profit_margin') }, "growth_metrics": { "revenue_growth_percent": stock_data.get('revenue_growth'), "earnings_growth_percent": stock_data.get('earnings_growth') }, "valuation_ratios": { "pe_ratio": stock_data.get('pe_ratio'), "pb_ratio": stock_data.get('pb_ratio'), "ps_ratio": stock_data.get('ps_ratio'), "peg_ratio": stock_data.get('peg_ratio'), "ev_to_ebitda": stock_data.get('ev_to_ebitda') }, "debt_metrics": { "net_debt": stock_data.get('net_debt'), "ebitda": stock_data.get('ebitda'), "net_debt_ebitda_ratio": round(stock_data['net_debt'] / stock_data['ebitda'], 2) if (stock_data.get('net_debt') is not None and stock_data.get('ebitda') and stock_data['ebitda'] != 0) else None }, "cash_flow": { "free_cashflow": stock_data.get('free_cashflow'), "fcf_yield_percent": round((stock_data['free_cashflow'] / stock_data['market_cap']) * 100, 2) if (stock_data.get('free_cashflow') is not None and stock_data.get('market_cap') and stock_data['market_cap'] != 0) else None }, "market_data": { "market_cap": stock_data.get('market_cap'), "dividend_yield_percent": stock_data.get('dividend_yield'), "last_updated": stock_data.get('last_updated') }, "advanced_quality_metrics": { "roic_percent": result.get('roic'), "interest_coverage_ratio": result.get('interest_coverage'), "piotroski_fscore": result.get('piotroski_fscore'), "note": "Schema v4 - Professional-grade quality indicators" } } json_report["stocks"].append(stock_entry) # Save JSON files json_file_dated = json_folder / f"warren_scan_{market_code}_{scan_date.strftime('%Y-%m-%d')}.json" json_file_latest = latest_folder / f"warren_scan_{market_code}_latest.json" # Write dated version with open(json_file_dated, 'w', encoding='utf-8') as f: json.dump(json_report, f, indent=2, ensure_ascii=False) # Write latest version with open(json_file_latest, 'w', encoding='utf-8') as f: json.dump(json_report, f, indent=2, ensure_ascii=False) print() print(f"📊 JSON Report (AI-ready) salvato:") print(f" ✅ Storico: {json_file_dated}") print(f" ✅ Latest: {json_file_latest}") return json_file_dated def generate_html_report(scan_data: dict, db: DatabaseManager): """Generate HTML report for scan results.""" market_info = scan_data['market_info'] tickers = scan_data['tickers'] results = scan_data['results'] upgraded_stocks = scan_data['upgraded_stocks'] quality_stats = scan_data['quality_stats'] print() print("=" * 70) print("📋 RISULTATI FINALI") print("=" * 70) # Sort by score results_sorted = sorted(results, key=lambda x: x['score'], reverse=True) # Separate by recommendation strong_buy_recommendations = [r for r in results_sorted if r['valutazione'] == 'STRONG BUY'] buy_recommendations = [r for r in results_sorted if r['valutazione'] == 'BUY'] # Query storico BUY per badges buy_history = {} with db.get_session() as session: for ticker in tickers: buy_count = session.query(Opportunity).filter( Opportunity.ticker == ticker, Opportunity.recommendation == 'BUY' ).count() if buy_count > 0: # Get last BUY date and price last_buy = session.query(Opportunity).filter( Opportunity.ticker == ticker, Opportunity.recommendation == 'BUY' ).order_by(Opportunity.found_date.desc()).first() buy_history[ticker] = { 'count': buy_count, 'last_date': last_buy.found_date.strftime('%d/%m/%Y') if last_buy else '', 'last_price': last_buy.current_price if last_buy else 0 } # Generate HTML output with historical archiving now = datetime.now() timestamp = now.strftime("%Y%m%d_%H%M") date_folder = now.strftime("%Y-%m-%d") # Create directory structure: reports/YYYY-MM-DD/ reports_base = Path("reports") reports_date = reports_base / date_folder reports_latest = reports_base / "latest" reports_date.mkdir(parents=True, exist_ok=True) reports_latest.mkdir(parents=True, exist_ok=True) # HTML filename html_filename = f"warren_scan_{scan_data['market_code']}_{timestamp}.html" html_file_dated = reports_date / html_filename html_file_latest = reports_latest / f"warren_scan_{scan_data['market_code']}_latest.html" with open(html_file_dated, 'w', encoding='utf-8') as f: f.write(f"""
Mercato: {market_info['flag']} {market_info['name']} ({market_info['full_name']})
""") f.write(f"Data: {datetime.now().strftime('%d/%m/%Y %H:%M')}
\n") f.write(f"Azioni analizzate: {len(results)}/{len(tickers)}
\n") f.write(f"Motore: Analisi deterministica (no API cost)
\n") # Data Quality Section degradation = (quality_stats['critical'] + quality_stats['insufficient']) / quality_stats['total'] * 100 quality_pct = 100 - degradation quality_class = "success" if quality_pct >= 95 else ("warning" if quality_pct >= 90 else "error") quality_color = "#28a745" if quality_pct >= 95 else ("#ffc107" if quality_pct >= 90 else "#dc3545") f.write(f"""📊 Qualità Dati: {quality_pct:.1f}% (analizzate: {quality_stats['total']}/{quality_stats['total']})""") if quality_stats['stale_data'] > 0: f.write(f" | ⚠️ {quality_stats['stale_data']} con dati vecchi") if quality_stats['critical'] > 0: f.write(f" | ❌ {quality_stats['critical']} CRITICAL DANGER") if quality_stats['insufficient'] > 0: f.write(f" | ⚠️ {quality_stats['insufficient']} DATA INSUFFICIENT") f.write("
") # Show detailed warnings if any if quality_stats['warnings'] or quality_stats['critical_details'] or quality_stats['insufficient_details']: f.write("❌ CRITICAL DANGER:
⚠️ DATA INSUFFICIENT:
⚠️ Warning:
{len(upgraded_stocks)} azioni hanno migliorato la raccomandazione:
") f.write("| Ticker | Nome | Cambio | Score | Prezzo | Fair Value |
|---|---|---|---|---|---|
| {u['ticker']} | {u['name']} | {u['upgrade_from']} → {u['valutazione']} | {u['score']} | €{u['current_price']:.2f} | €{u['fair_value']:.2f} |
Score: {r['score']}/100
Prezzo: €{r['current_price']:.2f} → Fair Value: €{r['fair_value']:.2f}
Margin of Safety: {r['margin_of_safety']:.1f}%
Analisi: {r['ragionamento']}
Score: {r['score']}/100
Prezzo: €{r['current_price']:.2f} → Fair Value: €{r['fair_value']:.2f}
Margin of Safety: {r['margin_of_safety']:.1f}%
Analisi: {r['ragionamento']}
📭 Nessuna raccomandazione BUY questa settimana. Warren sta aspettando il pitch perfetto! ⚾
") # Top 10 Table f.write("| # | Ticker | Nome | Score | Valutazione | Prezzo | Fair Value | Margin |
|---|---|---|---|---|---|---|---|
| {i} | {r['ticker']} | {r['name']}{history_badge} | {r['score']} | {r['valutazione']} | €{r['current_price']:.2f} | €{r['fair_value']:.2f} | {r['margin_of_safety']:.1f}% |
| Ticker | Nome | Score | Valutazione | Margin | Ragionamento |
|---|---|---|---|---|---|
| {r['ticker']} | {r['name']} | {r['score']} | {r['valutazione']} | {r['margin_of_safety']:.1f}% | {r['ragionamento']} |