import ...
def get_links():
urls = []
for page in range(1, 71):
list_view = '
http://sz.58.com/tech/pn{}'.format(page) url_1 = '
http://sz.58.com/tech/'
url_2 = 'x.shtml'
wb_data = requests.get(list_view, headers=headers)
soup = BeautifulSoup(wb_data.text, 'html.parser')
for link in soup.select('div.job_name a'):
urls.append(url_1+str(link.get('urlparams').split('=')[-1].strip('_q'))+url_2)
return urls
def get_info():
urls = get_links()
try:
for url in urls:
wb_data = requests.get(url, headers=headers)
soup = BeautifulSoup(wb_data.text, 'html.parser')
time.sleep(2)
data = {
'job': soup.select('.pos_title')[0].text,
'salary': soup.select('.pos_salary')[0].text,
'condition': soup.select('.item_condition')[1].text,
'exprience': soup.select('.item_condition')[2].text
}
print(data)
except IndexError:
pass
except requests.exceptions.ConnectionError:
pass
get_info()