poshmark/posh.py~

131 lines
5.1 KiB
Python
Raw Normal View History

2022-02-22 02:27:02 +00:00
import requests
from bs4 import BeautifulSoup as b
import time
import re
import concurrent.futures
import numpy as np
# import matplotlib.pyplot as plt
search_query = str(input('Title Search: '))
def url_base_builder(search_query):
genders = ['Men', 'Women']
posh_colors = ['Red', 'Pink', 'Orange', 'Yellow', 'Green', 'Blue', 'Purple',
'Gold', 'Silver', 'Black', 'Gray', 'White', 'Cream', 'Brown', 'Tan']
for i in range(0, len(posh_colors)):
if posh_colors[i] in search_query:
url_color = '&color[]=' + posh_colors[i]
color = posh_colors[i]
break
else:
color = ''
url_color = ''
for i in range(0, len(genders)):
if genders[i] in search_query:
url_gender = '&department=' + genders[i]
gender = genders[i]
break
else:
gender = ''
url_gender = '&department=All'
sq = search_query.replace(color, '').replace(gender, '').replace('NEW', '').replace(' ', '+')
all_sold_url_base = 'https://poshmark.com/search?query=' + sq + \
"&availability=sold_out" + url_color + url_gender + '&max_id='
new_sold_url_base = 'https://poshmark.com/search?query=' + sq + '&availability=sold_out' + \
'&condition=nwt_and_ret' + url_color + url_gender + '&max_id='
return all_sold_url_base, new_sold_url_base
def all_sold_list_builder(i):
bases = url_base_builder(search_query)
all_sold_url_base = bases[0]
all_sold_prices = []
url = all_sold_url_base + str(i)
html = requests.get(url).text
soup = b(html, "lxml")
# last_page = soup.find(string = re.compile('No Listings Found'))
for price in soup.find_all('span', {'class': 'p--t--1 fw--bold'}):
price = price.get_text()
price = re.findall(r'[^\$]\d+', price)[0]
price = float(price)
# dollar_index = price.find('$')
# price = price[dollar_index+1:]
# space = price.find(' ')
# price = int(price[:space-1])
all_sold_prices.append(price)
# all_sold_prices = list(map(float, re.findall(r'[^\$]\d+[.]\d+', html)))
return all_sold_prices
def new_sold_list_builder(i):
bases = url_base_builder(search_query)
new_sold_url_base = bases[1]
new_sold_prices = []
url = new_sold_url_base + str(i)
html = requests.get(url).text
soup = b(html, "lxml")
# last_page = soup.find(string = re.compile('No Listings Found'))#this is present in all pages that don't have a full 48 listings on them. So you end up with an empty price list becuase of your conditional statement
# new_sold_prices = list(map(float, re.findall(r'[^\$]\d+[.]\d+', html)))
for price in soup.find_all('span', {'class': 'p--t--1 fw--bold'}):
price = price.get_text()
price = re.findall(r'[^\$]\d+', price)[0]
price = float(price)
# dollar_index = price.find('$')
# price = price[dollar_index+1:]
# space = price.find(' ')
# price = int(price[:space-1])
new_sold_prices.append(price)
return new_sold_prices
def main():
start = time.time()
with concurrent.futures.ThreadPoolExecutor() as executor:
for future in executor.map(all_sold_list_builder, page_list):
all_sold_list.extend(future)
with concurrent.futures.ThreadPoolExecutor() as executor:
for future in executor.map(new_sold_list_builder, page_list):
new_sold_list.extend(future)# if you can pull the nwt price simultaneously with used then you won't have to use this
for element in new_sold_list:
all_sold_list.remove(element)
used_sold_list = all_sold_list
average_used_sold_price = '$' + str(round(np.mean(used_sold_list), 2))
average_new_sold_price = '$' + str(round(np.mean(new_sold_list), 2))
used_sold_results = str(len(used_sold_list)) + ' Used Results'
new_sold_results = str(len(new_sold_list)) + ' NWT Results'
total_results = str(len(used_sold_list) + len(new_sold_list)) + ' Total Results'
end = time.time()
print(end - start, 'seconds')
print('Average Used Sold Price', average_used_sold_price, used_sold_results)
print('Average New Sold Price', average_new_sold_price, new_sold_results)
print(total_results)
if __name__ == '__main__':
page_list = list(range(1, 5))
all_sold_list = []
new_sold_list = []
main()
'''to speed up the program you can include a few things: 1) only parse the total results and sift for the NWT listings to create a separate NWT list 2) Implement processpoolexecutor to use more than one worker to parse the pages 3) find a better way to find the last page so you don't have to make more requests than necessary. This could be either taking the "smallest" "no listings found" page of the pages while excluding the others after the smallest one is found. Or, determining from the request headers whether a page is worth downloading or not 4) using a while loop in chunks of 2-4 pages to find the last page in conjunction with number 3'''