cl1p.net - The internet clipboard
Login/Sign Up
cl1p.net/123
cl1p.net/123
Login/Sign Up
This cl1p will be deleted in in 27 days.
Copy
1.Automation Program Using schedule Library import schedule import time def job(): print(" Reminder: Take a short break and stretch!") schedule.every(5).seconds.do(job) print("Scheduler started. Press Ctrl+C to stop.") while True: schedule.run_pending() time.sleep(1) 2.Robotic Process Automation import requests from bs4 import BeautifulSoup import pandas as pd all_books = [] for page in range(1, 51): url = f"https://books.toscrape.com/catalogue/page-{page}.html" headers = {"User-Agent": "Mozilla/5.0"} response = requests.get(url, headers=headers) if response.status_code != 200: print(f"⚠️ Failed to fetch page {page}. Status code: {response.status_code}") break soup = BeautifulSoup(response.text, "html.parser") books = soup.find_all("h3") prices = soup.find_all("p", class_="price_color") for book, price in zip(books, prices): all_books.append({ "Title": book.a['title'], "Price": price.text }) df = pd.DataFrame(all_books) df.to_csv("books_data.csv", index=False, encoding='utf-8') print("Scraping complete!") print(f"Total books collected: {len(df)}") print("\nSample output:") print(df.head()) 3.Ex.No: 4 Process Automation import pandas as pd import smtplib from email.mime.text import MIMEText # STEP 1: Read data from Excel data = pd.read_excel("/content/drive/MyDrive/employee.xlsx") # sample file with columns: Name, Email, Status # STEP 2: Process data (filter rows where Status = 'Pending') pending = data[data['Status'] == 'Absent'] # STEP 3: Send automated email sender_email = "integratedaidnnn@gmail.com" password = "hhprrnmnugkrihcf" # Setup SMTP server = smtplib.SMTP("smtp.gmail.com", 587) server.starttls() server.login(sender_email, password) for _, row in Absent.iterrows(): name = row['Name'] recipient = row['Email'] message = f"Hello {name},\n\nYour are Absent Today for Intelligent Automation Subject." msg = MIMEText(“ABSENT”) msg['Subject'] = "Absent Message" msg['From'] = sender_email msg['To'] = recipient server.sendmail(sender_email, recipient, msg.as_string()) print(f"Email sent to {name}") server.quit() print("Automation completed!") 4.COGNITIVE AUTOMATION USING NLP: EMAIL CLASSIFICATION import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import make_pipeline from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report # Sample dataset of emails (text + label) data = { 'email': [ 'I want a refund, the product is defective', 'Your service is excellent, thank you!', 'Where is my order? It’s late again!', 'Please cancel my subscription immediately', 'This is spam. Stop sending these emails!', 'I need help with my account login issue', 'Thanks for the quick response, great support', 'You guys are scammers, I lost my money!', 'My package was damaged during delivery', 'Congratulations! You won a free iPhone!' ], 'label': [ 'complaint', 'praise', 'complaint', 'support_request', 'spam', 'support_request', 'praise', 'complaint', 'complaint', 'spam' ] } # Convert to DataFrame df = pd.DataFrame(data) # Split into training and test sets X_train, X_test, y_train, y_test = train_test_split(df['email'], df['label'], test_size=0.3, random_state=42) # Build a simple NLP pipeline with TF-IDF and Naive Bayes model = make_pipeline(TfidfVectorizer(), MultinomialNB()) # Train the model model.fit(X_train, y_train) # Predict on test set y_pred = model.predict(X_test) # Print evaluation print("Classification Report:") print(classification_report(y_test, y_pred)) # Test on a new email (Cognitive Task) new_email = "Why did you charge me twice for the same item?" predicted_label = model.predict([new_email])[0] print(f"\nNew Email: \"{new_email}\"\nPredicted Category: {predicted_label}") 5.Ex.No:5 Web Scraping import requests from bs4 import BeautifulSoup url = "https://news.ycombinator.com/" headers = {"User-Agent": "Mozilla/5.0"} # Trick: Makes it look like Chrome browser response = requests.get(url, headers=headers) if response.status_code == 200: soup = BeautifulSoup(response.text, "html.parser") title_spans = soup.find_all("span", class_="titleline") print("Top Headlines:") for i, span in enumerate(title_spans[:5], start=1): link = span.find("a") print(f"{i}. {link.text}") else: print("Failed to fetch page. Status code:", response.status_code) EX.NO: 6 Web Scraping -2 import requests from bs4 import BeautifulSoup url = "https://books.toscrape.com/catalogue/category/books_1/index.html" headers = {"User-Agent": "Mozilla/5.0"} response = requests.get(url, headers=headers) if response.status_code == 200: soup = BeautifulSoup(response.text, "html.parser") books = soup.find_all("h3") prices = soup.find_all("p", class_="price_color") for i, (book, price) in enumerate(zip(books, prices), start=1): print(f"{i}. {book.a['title']} – {price.text}") else: print(f"Failed to fetch page. Status code: {response.status_code}") 6.2. WEATHER DATA COLLECTION import requests def get_weather(city): url = f"https://wttr.in/{city}?format=3" try: response = requests.get(url) if response.status_code == 200: print(f"Current weather in {city}: {response.text}") else: print("Failed to get weather info") except Exception as e: print("Error:", e) if __name__ == "__main__": city = "Chennai" # Change city here print(f"--- Weather Information for {city} ---") get_weather(city) 7.Automated data preprocessing import pandas as pd df = pd.read_excel("C:/Users/User/Desktop/bike.xlxs") print("Original Data:") print(df.head()) df = df.drop_duplicates() for col in df.select_dtypes(include=['object']).columns: df[col] = df[col].str.strip() for col in df.select_dtypes(include=['object']).columns: df[col] = df[col].str.lower() df['age'] = df['age'].fillna(df['age'].mean()) df['date'] = pd.to_datetime(df['date'], errors='coerce') df = df.dropna(subset=['date']) print("\nCleaned Data:") print(df.head()) df.to_csv("data_cleaned.csv", index=False) 8.Predictive analytics import pandas as pd from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart # Sample data data = { 'age': [22, 35, 58, 45, 25, 33, 52, 40, 60, 48], 'browsing_time': [5, 10, 2, 8, 7, 12, 1, 4, 3, 9], 'previous_purchases': [0, 3, 1, 5, 0, 2, 0, 1, 0, 4], 'made_purchase': [0, 1, 0, 1, 0, 1, 1, 1, 1, 1] } df = pd.DataFrame(data) # Features and target X = df[['age', 'browsing_time', 'previous_purchases']] y = df['made_purchase'] # Train/test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # Train model model = RandomForestClassifier(n_estimators=100, random_state=42) model.fit(X_train, y_train) # New customer data to predict new_customer = {'age': 35, 'browsing_time': 10, 'previous_purchases': 3, 'email': 'integratedaidnnn@gmail.com'} new_df = pd.DataFrame([new_customer]) # Predict prediction = model.predict(new_df[['age', 'browsing_time', 'previous_purchases']]) if prediction[0] == 1: # Prepare email sender_email = "integratedaidnnn@gmail.com" # Your Gmail address sender_password = "hhprrnmnugkrihcf" # Your app password receiver_email = new_customer['email'] message = MIMEMultipart() message["From"] = sender_email message["To"] = receiver_email message["Subject"] = "Special Offer Just for You!" body = f""" Hi there, We noticed you're interested in our products. Here's a special offer just for you! Thanks for visiting us. Best regards, Your Company """ message.attach(MIMEText(body, "plain")) try: server = smtplib.SMTP("smtp.gmail.com", 587) server.starttls() server.login(sender_email, sender_password) server.sendmail(sender_email, receiver_email, message.as_string()) print("Email sent successfully to", receiver_email) except Exception as e: print("Failed to send email:", e) finally: server.quit() else: print("Customer unlikely to purchase — no email sent.")