job-scrapper/lib/scrap_jobs.py
ccppi 42d11c1c8d added indeed suport
start extracting date from "vor X days" where time = today-X
2024-07-23 14:55:37 +02:00

193 lines
6.3 KiB
Python

from helpers import *
DEBUG = True
def log(*s):
if DEBUG:
print(s)
def scrap_indeed_com(url,entry,session):
log("[scrap_indeed_com] url: ",url)
jobs = []
# if(session == 0):
with requests.Session() as session:
session.headers = {
"User-Agent" : "Mozilla/5.0 (X11; Linux x86_64; rv:127.0) Gecko/20100101 Firefox/127.0"
}
page = session.get(url)
log(page)
# else:
# page = session.get(url)
# log(page)
soup = BeautifulSoup(page.content,"html.parser")
#print(soup.prettify())
results = soup.find_all("li",class_= 'css-5lfssm eu4oa1w0') #top level list element
location = item("div",{'data-testid':'text-location'},0,"indeed location")
ar_location = finder(results,location,ATTRS=1,LOCATION_CLEANUP=1)
company = item("span",{'data-testid':'company-name'},0,"indeed company")
ar_company = finder(results,company,ATTRS=1)
title = item("a",'jcs-JobTitle',0,"indeed title")
ar_title = finder(results,title,GETCHILDREN="span")
date = item("span",{'data-testid':'myJobsStateDate'},0,"indeed date")
ar_date = finder(results,date,ATTRS=1)
link = item("a",'jcs-JobTitle',0,"link")
ar_link = finder(results,link,LINK=1,BASEURL="https://ch.indeed.com")
tag = entry.tag#get from config
return arrayToClass(ar_title,ar_company,ar_location,ar_date,ar_link,tag)
def scrap_jobs(url,entry,session):
jobs = []
log("in scrap jobs,url",url)
if(session == 0):
with requests.Session() as session:
page = session.get(url)
log(page)
else:
page = session.get(url)
log(page)
soup = BeautifulSoup(page.content,"html.parser")
#print(soup.prettify())
results = soup.find_all("div",attrs={'data-feat':'searched_jobs'})
location_class = "P-sc-hyu5hk-0 Text__p2-sc-1lu7urs-10 Span-sc-1ybanni-0 Text__span-sc-1lu7urs-12 Text-sc-1lu7urs-13 jZCxUn"
location = item("p",location_class,0,"location")
ar_location = finder(results,location,LOCATION_CLEANUP=1)
company_class = "P-sc-hyu5hk-0 Text__p2-sc-1lu7urs-10 Span-sc-1ybanni-0 Text__span-sc-1lu7urs-12 Text-sc-1lu7urs-13 jZCxUn"
company = item("p",company_class,0,"company")
ar_company = finder(results,company,DEFAULT=1,GETCHILDREN='strong')
title = item("span","jlFpCz",0,"TITLE")
ar_title = finder(results,title,DEFAULT=1)
date = item("span","Span-sc-1ybanni-0 Text__span-sc-1lu7urs-12 Text-sc-1lu7urs-13 krGudM hUhFmL",0,"date")
ar_date = finder(results,date,CLEANDATE=1)
link = item("a",{'data-cy' :'job-link'},0,"link")
ar_link = finder(results,link,LINK=1,ATTRS=1,BASEURL="https://jobs.ch")
tag = entry.tag#get from config
return arrayToClass(ar_title,ar_company,ar_location,ar_date,ar_link,tag)
def next_url_indeed_com(url,session,baseurl):
next_link_str = ''
if(session == 0):
with requests.Session() as session:
page = session.get(url)
else:
page = requests.get(url)
soup = BeautifulSoup(page.content,"html.parser")
result_next = soup.findAll("nav",attrs={"role":"navigation"})
next_=item("a",{'data-testid':'pagination-page-next'},0)
next_link = finder(result_next,next_,ATTRS=1,LINK=1)
if next_link:
if(next_link[0] != "NOTFound"):
next_link_str = str(next_link[0])
next_link_str = baseurl + next_link_str
log(next_link_str)
else:
return 0
if next_link_str != '':
return next_link_str
else:
return 0
def next_url_jobs_ch(url,session,baseurl):
next_link_str = ''
if(session == 0):
with requests.Session() as session:
page = session.get(url)
else:
page = requests.get(url)
soup = BeautifulSoup(page.content,"html.parser")
result_next = soup.findAll("div",attrs={"data-cy":"paginator"})
next_=item("a",{"data-cy":"paginator-next"},0)
next_link = finder(result_next,next_,ATTRS=1,LINK=1)
if next_link:
if(next_link[0] != "NOTFound"):
next_link_str = str(next_link[0])
next_link_str = baseurl + next_link_str
log(next_link_str)
else:
return 0
if next_link_str != '':
return next_link_str
else:
return 0
def next_url_jobagent(base_url,session,c):#depreacted will be removed in the future
found = False
if(session == 0):
with requests.Session() as session:
page = session.get(base_url)
else:
page = requests.get(base_url)
soup = BeautifulSoup(page.content,"html.parser")
results = soup.find("ul",class_="pagination")
if(results != None):
pages = results.text
if(results == None):
print("pagination next not found, probably end of pages:")
next_url_names = soup.find_all("a",class_="btn btn-sm btn-secondary")
for i2 in next_url_names:
striped_string = i2.text.strip()
log(i2.text.strip(),"stripped:",striped_string)
log("Printable characters?",striped_string.isprintable())
if (striped_string) == "Nächste Seite":
log(i2)
next_url = i2.get("href")
log("url of next site")
found = True
return next_url
break
if found == False:
return 0
def scrap_jobagent(url,entry,session):
jobs = []
log("in scrap jobs,url",url)
if(session == 0):
with requests.Session() as session:
page = session.get(url)
log(page)
else:
page = session.get(url)
log(page)
soup = BeautifulSoup(page.content,"html.parser")
#print(soup.prettify())
results = soup.find_all("li",class_="item")
if not results:
print("no li items found")
log("page:",page)
title = item("span","jobtitle",0,"jobagent title")
ar_title = finder(results,title)
location = item("span","location",0,"jobagent location")
ar_location = finder(results,location,LOCATION_CLEANUP=1)
company = item("span","company",0,"jobagent company")
ar_company = finder(results,company,DEFAULT=1)
link = item("a","title",0,"jobagent link")
ar_link = finder(results,link,LINK=1)
date = item("span","pubdate",0)
ar_date = finder(results,date,SWAPDATE=1)
tag = entry.tag
return arrayToClass(ar_title,ar_company,ar_location,ar_date,ar_link,tag)