-
Notifications
You must be signed in to change notification settings - Fork 0
/
EmailScrapper.py
47 lines (35 loc) · 1.44 KB
/
EmailScrapper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# -*- coding: utf-8 -*-
"""
Python script to bulk scrape websites for email addresses
Author: Jaya Kumar
"""
import urllib.request
import re
import csv
import pandas as pd
import os
# 1: Get input file path from user 'C:/Users/jaya.kumar/Documents/upw/websites.csv'
user_input = input("Enter the path of your file: ")
if os.path.exists(user_input):
# 2. read file
df = pd.read_csv(user_input)
# 3. create the output csv file
with open('EmailID.csv', mode='w',newline='') as file:
csv_writer = csv.writer(file, delimiter=',')
csv_writer.writerow(['Website','EmailID'])
# 4. Get websites
for site in list(df['website']):
print(site)
req = urllib.request.Request(site, headers={'User-Agent' : "Magic Browser"})
# 5. Scrape email id
with urllib.request.urlopen(req) as url:
s = url.read().decode('utf-8')
email = re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}",s)
print(email)
# 6. Write the output
with open('EmailID.csv', mode='a',newline='') as file:
csv_writer = csv.writer(file, delimiter=',')
[csv_writer.writerow([site,item]) for item in email]
#If input file doesn't exist
else:
print("File not found, verify the location - ",str(user_input))