changing defs to methods and variables to attributes. Trial and error. Code seems to be broken right now

This commit is contained in:
spbeach46 2020-10-11 22:12:01 -07:00
parent 9d02381140
commit 47ad8639ff

View File

@ -6,7 +6,10 @@ import pandas as pd
with open('cat_list.txt') as jf:
cat_list = json.load(jf)
big_data = pd.read_csv('big_data.csv')
class FindingApi:
'''Some docstring to get rid of linting errors'''
finding_service = [
'findItemsAdvanced', 'findCompletedItems',
'findItemsByKeywords', 'findItemsIneBayStores', 'findItemsByCategory',
@ -16,8 +19,9 @@ class FindingApi:
# departments = ["3034","93427"] (womens and mens)
def get_ids_from_cats():
itemid_results_list = []
def get_ids_from_cats(self):
'''Stop bothering me for docstrings.'''
self.itemid_results_list = []
for category_id in cat_list:
for i in FindingApi.pageNumber:
params = {
@ -29,44 +33,47 @@ class FindingApi:
"paginationInput.entriesPerPage":"100",
"paginationInput.PageNumber":FindingApi.pageNumber[i]
}
response = requests.get("https://svcs.ebay.com/services/search/FindingService/v1", params=params)
response = requests.get("https://svcs.ebay.com/services/search/FindingService/v1",
params=params)
data = response.json()
for item in data['findItemsByCategoryResponse'][0]['searchResult'[0]['item']:
itemid_results_list.append(item['itemId']) # above this line you will have to check whether the id exists in the pandas dataframe before you go on to create your list for the shopping
#api: check df --> if does not exist, append itemId to itemid_results_list --> with curated list, make a list of 20-itemId lists.
item_id_results = ','.join(item_id_results_list) # you will have to split this into 20 item lists strings to feed into ItemID param. Consider making a list of str lists.
for item in data['findItemsByCategoryResponse'][0]['searchResult'][0]['item']:
if item not in big_data.values:
itemid_results_list.append(item['itemId'])
item_id_results = [','.join(itemid_results_list[n:n+20]) for n in list(range(0,
len(itemid_results_list), 20))]
return item_id_results
class ShoppingApi:
class ShoppingApi(FindingApi):
self.item_id_results = FindingApi.get_ids_from_cats()
def get_item_from_findItemsByCategory(self):
for twentyId in item_id_results:
params = {
"callname":"GetMultipleItems",
"appid":"scottbea-xlister-PRD-6796e0ff6-14862949",
"version":"671",
"responseencoding":"JSON",
"ItemID":twentyId, # you pass in a list? If not then maybe a comma-separated
"IncludeSelector":"ItemSpecifics",
}
def get_item_from_findItemsByCategory():
response = requests.get("https://open.api.ebay.com/shopping?", params=params)
data = response.json()
params = {
"callname":"GetMultipleItems",
"appid":"scottbea-xlister-PRD-6796e0ff6-14862949",
"version":"671",
"responseencoding":"JSON",
"ItemID":item_id_results, # you pass in a list? If not then maybe a comma-separated
"IncludeSelector":"ItemSpecifics",
}
names = []
values = []
nvl = data['Item'][0]['ItemSpecifics']['NameValueList']
response = requests.get("https://open.api.ebay.com/shopping?", params=params)
data = response.json()
for nvl_dict in nvl:
names.append(nvl_dict['Name'])
values.append(nvl_dict['Value'])
names = []
values = []
nvl = data['Item'][0]['ItemSpecifics']['NameValueList']
for nvl_dict in nvl:
names.append(nvl_dict['Name'])
values.append(nvl_dict['Value'])
nvl_dict = dict(zip(names, values))
data.update(nvl_dict)
df = pd.json_normalize(data)
df.to_csv('big_data.csv')
nvl_dict = dict(zip(names, values))
data.update(nvl_dict)
df = pd.json_normalize(data)
df.to_csv('big_data.csv')
# Limited to 5000 calls to shopping api per day, and getMultpileitems service maxes out at 20 items
# per call leaving you 100,000 items per day for you pandas dataframe initially. So you'll have
# to divide these up into the categories. This will leave you with about 6.25K results per cat.
# More than enough data for your dataset. Consider
# to divide these up into the categories. This will leave you with about 6.25K results per cat.
# More than enough data for your dataset. Consider