Posts

Saving Web dataset into a file, Pickling that file, then Unpickling it

import pickle import requests # getting iris dataset from a web address resp = requests.get("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data") # saving the web data into a text file with open("iris.txt", "w") as f: f.write(resp.text) # reading the created text file containing the web data with open("iris.txt", "r") as f: dataset = f.read() # splitting the web data in text file listof = dataset.split("\n") # converting list into list of lists lol = [[item] for item in listof] # pickling the data pickleed = open("irislists.pkl", "wb") pickle.dump(file=pickleed, obj=lol) pickleed.close() # unpickling the data unpickleed = open("irislists.pkl", "rb") pythonobj = pickle.load(file=unpickleed) print(pythonobj) unpickleed.close()

Pickle Module in Python

import pickle # pickling a python object mylist = ["waqas", "sheeraz",{"sheeraz": "haroon", "sheer": "veer"}, "haroon", "farrukh"] file_object = open("mylist.pkl", "wb") # write as binary file pickle.dump(file=file_object, obj=mylist) file_object.close() # unpickling into python object fileb = open("mylist.pkl", "rb") # read binary file python_obj = pickle.load(file=fileb) print(python_obj) fileb.close()

News API Client in Python

from newsapi import NewsApiClient news_api = NewsApiClient(api_key="YOURAPIKEY") # YOURAPIKEY get from https://newsapi.org/ by making an account first top_headlines = news_api.get_top_headlines(q="amazon") # q represents keywords or phrases that NEWS must contain print(top_headlines) all_articles = news_api.get_everything(q="amazon") print(all_articles) sources = news_api.get_sources() print(sources)

Get and Read top NEWS Headlines in Python

import json import requests def newsreader(string): from win32com.client import Dispatch speak_it = Dispatch("SAPI.SpVoice") speak_it.Speak(string) response = requests.get("https://newsapi.org/v2/top-headlines?country=in&category=general&apiKey=YOURAPIKEY") # YOURAPIKEY get from https://newsapi.org/ by making an account first news = json.loads(response.text) # print(news) for i in range(5): breaking_news = news["articles"][i]["description"] print(breaking_news) if i==0: newsreader("Welcome to Breaking news for this hour.") newsreader("the first breaking news for this hour is, ") newsreader(breaking_news) elif i==4: newsreader("the last breaking news for this hour is, ") newsreader(breaking_news) newsreader("Thanks for listening. Plz come again for next round of updates.") else: newsreader("the ne...

Text to Speech conversion in Python

from win32com.client import Dispatch speak_it = Dispatch("SAPI.SpVoice") string = input("Enter the text and we will speak it: ") speak_it.Speak(string)

JSON module in Python

import json # Converting python object and writing into json file data = { "name": "Satyam kumar", "place": "patna", "skills": ["Raspberry pi", "Machine Learning", "Web Development"], "email": "xyz@gmail.com", "projects": ["Python Data Mining", "Python Data Science"], "player": True} with open("data2.json", "w") as f: json.dump(data, f) # Converting python object into json string. res = json.dumps(data) print(res) # loads the created json file with open("data.json", "r") as f: print(json.load(f)) # json string, after parsing becomes a python dict data = """{ "name" : "abdur", "natur" : "space", "favterm" : "infinity", "lvoe" : true }""" pasa = js...

Coroutines in Python

Note: Get your free API key at https://newsapi.org/ by first making your account. from urllib.request import urlopen from bs4 import BeautifulSoup def searcher(): url = "https://en.wikipedia.org/wiki/Kabul#Toponymy_and_etymology" page = urlopen(url) html = page.read().decode("utf-8") soup = BeautifulSoup(html, "html.parser") article1 = soup.get_text() url = "https://en.wikipedia.org/wiki/Python_(programming_language)" page = urlopen(url) html = page.read().decode("utf-8") soup = BeautifulSoup(html, "html.parser") article2 = soup.get_text() url = "https://en.wikipedia.org/wiki/Amoeba_(operating_system)" page = urlopen(url) html = page.read().decode("utf-8") soup = BeautifulSoup(html, "html.parser") article3 = soup.get_text() article = article1 + article2 + article3 while True: word = (yield) ...