{}
See how a CS professor is using our compiler for class assignment.
Try Programiz PRO for Educators!
Learn DSA with step-by-step code visualization.
Try Programiz PRO for Educators!
run-icon
main.py
# Online Python compiler (interpreter) to run Python online. # Write Python 3 code in this online editor and run it. import requests import re import statistics from collections import Counter, defaultdict from bs4 import BeautifulSoup # ------------------------- # 1. Romeo and Juliet - Top 10 Frequent Words # ------------------------- def romeo_and_juliet_top_words(): url = 'http://www.gutenberg.org/files/1112/1112.txt' text = requests.get(url).text.lower() words = re.findall(r'\b[a-z]+\b', text) return Counter(words).most_common(10) # ------------------------- # 2. Cats API - Weight, Lifespan, Country/Breed Table # ------------------------- def analyze_cats_api(): url = 'https://api.thecatapi.com/v1/breeds' cats = requests.get(url).json() weights, lifespans = [], [] country_breeds = defaultdict(list) for cat in cats: # Weight (metric) try: w = list(map(float, cat['weight']['metric'].split(' - '))) weights.append(sum(w) / 2) except: continue # Lifespan (years) try: l = list(map(float, cat['life_span'].split(' - '))) lifespans.append(sum(l) / 2) except: continue # Country and breed country = cat.get('origin', 'Unknown') breed = cat.get('name', 'Unknown') country_breeds[country].append(breed) # Stats helper def stats(arr): return { 'min': min(arr), 'max': max(arr), 'mean': round(statistics.mean(arr), 2), 'median': statistics.median(arr), 'stdev': round(statistics.stdev(arr), 2) } return { 'weight_stats': stats(weights), 'lifespan_stats': stats(lifespans), 'country_breeds': {k: len(v) for k, v in country_breeds.items()} } # ------------------------- # 3. Countries API - Largest Countries, Spoken Languages # ------------------------- def analyze_countries_api(): url = 'https://restcountries.com/v3.1/all' countries = requests.get(url).json() # Largest countries by area largest = sorted(countries, key=lambda c: c.get('area', 0), reverse=True)[:10] largest_countries = [(c['name']['common'], c['area']) for c in largest] # Language stats language_count = defaultdict(int) all_languages = set() for c in countries: langs = c.get('languages', {}) for lang in langs.values(): language_count[lang] += 1 all_languages.add(lang) top_languages = sorted(language_count.items(), key=lambda x: x[1], reverse=True)[:10] total_languages = len(all_languages) return { 'largest_countries': largest_countries, 'top_languages': top_languages, 'total_languages': total_languages } # ------------------------- # 4. UCI Machine Learning Datasets # ------------------------- def fetch_uci_datasets(): url = 'https://archive.ics.uci.edu/ml/datasets.php' soup = BeautifulSoup(requests.get(url).text, 'html.parser') datasets = [a.text.strip() for a in soup.find_all('a') if a.get('href', '').startswith('datasets/')] return datasets[:20] # return first 20 for brevity # ------------------------- # Main Execution # ------------------------- if __name__ == "__main__": # 1. Romeo and Juliet print("\n🔤 Top 10 Frequent Words in Romeo and Juliet:") for word, count in romeo_and_juliet_top_words(): print(f"{word}: {count}") # 2. Cats API Analysis cat_data = analyze_cats_api() print("\n🐱 Cat Weight Stats (kg):", cat_data['weight_stats']) print("🐱 Cat Lifespan Stats (years):", cat_data['lifespan_stats']) print("\n📊 Country vs Number of Cat Breeds:") for country, count in sorted(cat_data['country_breeds'].items(), key=lambda x: -x[1]): print(f"{country}: {count}") # 3. Countries API country_data = analyze_countries_api() print("\n🌍 10 Largest Countries by Area:") for name, area in country_data['largest_countries']: print(f"{name}: {area} km²") print("\n🗣️ Top 10 Most Spoken Languages:") for lang, count in country_data['top_languages']: print(f"{lang}: {count} countries") print(f"\n🧮 Total Unique Languages: {country_data['total_languages']}") # 4. UCI Datasets print("\n📚 Sample UCI Datasets:") for name in fetch_uci_datasets(): print("-", name)
Output