44 lines
1.6 KiB
Python
44 lines
1.6 KiB
Python
|
import requests
|
||
|
import json
|
||
|
from bs4 import BeautifulSoup as b
|
||
|
import pandas as p
|
||
|
|
||
|
# keywords = input('keyword search: ')
|
||
|
|
||
|
with open('cat_list.txt') as jf:
|
||
|
cat_list = json.load(jf)
|
||
|
finding_service = ['findItemsAdvanced', 'findCompletedItems', 'findItemsByKeywords', 'findItemsIneBayStores', 'findItemsByCategory', 'findItemsByProduct']
|
||
|
|
||
|
pageNumber = list(range(1, 63))
|
||
|
|
||
|
# departments = ["3034","93427"]
|
||
|
|
||
|
def get_ids():
|
||
|
itemid_results_list = []
|
||
|
for categoryID in cat_list[0:2]:
|
||
|
params = {
|
||
|
"OPERATION-NAME":finding_service[4],
|
||
|
"SECURITY-APPNAME":"scottbea-xlister-PRD-6796e0ff6-14862949",
|
||
|
"SERVICE-VERSION":"1.13.0",
|
||
|
"RESPONSE-DATA-FORMAT":"JSON",
|
||
|
"categoryId":categoryID ,
|
||
|
"paginationInput.entriesPerPage":"100",
|
||
|
"paginationInput.PageNumber":pageNumber[0]
|
||
|
}
|
||
|
# extract item id here for piping into shopping_test.py
|
||
|
|
||
|
response = requests.get("https://svcs.ebay.com/services/search/FindingService/v1", params=params)
|
||
|
data = response.json()
|
||
|
pretty_data = json.dumps(data, indent=2)
|
||
|
return data
|
||
|
# can use pandas.json_normalize(custom dict cobbled from respons.json())
|
||
|
|
||
|
|
||
|
# Additional problem you will run into when getting labeled data is shoe types and features not in features, accents, styles, categories or subcategories.
|
||
|
|
||
|
# also limited to 5000 calls per day. This leaves you with 500k listings
|
||
|
|
||
|
# If you want to split up each cat equally with their respective maxes then use 62 pages with 100
|
||
|
# Entries per page. At this amount you'll have the max number of calls you can make on the
|
||
|
# shopping api.
|