deleted shopping_test.py and saved as shopping_api.py. Added classes including from finding_api.py
This commit is contained in:
parent
752fa7efaa
commit
77038e92f8
67
shopping_api.py
Normal file
67
shopping_api.py
Normal file
@ -0,0 +1,67 @@
|
||||
import requests
|
||||
import json
|
||||
from bs4 import BeautifulSoup as b
|
||||
import pandas as pd
|
||||
|
||||
with open('cat_list.txt') as jf:
|
||||
cat_list = json.load(jf)
|
||||
finding_service = ['findItemsAdvanced', 'findCompletedItems', 'findItemsByKeywords', 'findItemsIneBayStores', 'findItemsByCategory', 'findItemsByProduct']
|
||||
|
||||
class Finding_api:
|
||||
pageNumber = list(range(1, 63))
|
||||
|
||||
# departments = ["3034","93427"]
|
||||
|
||||
def get_ids(self):
|
||||
itemid_results_list = []
|
||||
for categoryID in cat_list[0:2]:
|
||||
params = {
|
||||
"OPERATION-NAME":finding_service[4],
|
||||
"SECURITY-APPNAME":"scottbea-xlister-PRD-6796e0ff6-14862949",
|
||||
"SERVICE-VERSION":"1.13.0",
|
||||
"RESPONSE-DATA-FORMAT":"JSON",
|
||||
"categoryId":categoryID ,
|
||||
"paginationInput.entriesPerPage":"100",
|
||||
"paginationInput.PageNumber":pageNumber[0]
|
||||
}
|
||||
# extract item id here for piping into shopping_test.py
|
||||
|
||||
response = requests.get("https://svcs.ebay.com/services/search/FindingService/v1", params=params)
|
||||
data = response.json()
|
||||
pretty_data = json.dumps(data, indent=2)
|
||||
return data
|
||||
|
||||
class Shopping_api:
|
||||
|
||||
def get_item(self):
|
||||
|
||||
params = {
|
||||
"callname":"GetMultipleItems",
|
||||
"appid":"scottbea-xlister-PRD-6796e0ff6-14862949",
|
||||
"version":"671",
|
||||
"responseencoding":"JSON",
|
||||
"ItemID":item_id_results, # you pass in a list? If not then maybe a comma-separated
|
||||
"IncludeSelector":"ItemSpecifics",
|
||||
}
|
||||
|
||||
response = requests.get("https://open.api.ebay.com/shopping?", params=params)
|
||||
data = response.json()
|
||||
pretty_data = json.dumps(data, indent=2)
|
||||
|
||||
names = []
|
||||
values = []
|
||||
nvl = data['Item'][0]['ItemSpecifics']['NameValueList']
|
||||
|
||||
for nvl_dict in nvl:
|
||||
names.append(nvl_dict['Name'])
|
||||
values.append(nvl_dict['Value'])
|
||||
|
||||
nvl_dict = dict(zip(names, values))
|
||||
data.update(nvl_dict)
|
||||
df = pd.json_normalize(data)
|
||||
df.to_csv('big_data.csv')
|
||||
|
||||
# Limited to 5000 calls to shopping api per day, and getMultpileitems service maxes out at 20 items
|
||||
# per call leaving you 100,000 items per day for you pandas dataframe initially. So you'll have
|
||||
# to divide these up into the categories. This will leave you with about 6.25K results per cat.
|
||||
# More than enough data for your dataset. Consider
|
Loading…
Reference in New Issue
Block a user