diff --git a/ebay_api.py b/ebay_api.py new file mode 100644 index 0000000..f6117c0 --- /dev/null +++ b/ebay_api.py @@ -0,0 +1,85 @@ +import json +import requests +import pandas as pd + +with open('cat_list.txt') as jf: + cat_list = json.load(jf) + +big_data = pd.read_csv('big_data.csv') + +class FindingApi: + '''Some docstring to get rid of linting errors''' + def __init__(self): + self.service = [ + 'findItemsAdvanced', 'findCompletedItems', + 'findItemsByKeywords', 'findItemsIneBayStores', 'findItemsByCategory', + 'findItemsByProduct' + ] + self.pageNumber = list(range(1, 63)) + + # departments = ["3034","93427"] (womens and mens) + + def get_ids_from_cats(self): + '''Stop bothering me for docstrings.''' + itemid_results_list = [] + for category_id in cat_list: + for i in self.pageNumber: + params = { + "OPERATION-NAME":self.service[4], + "SECURITY-APPNAME":"scottbea-xlister-PRD-6796e0ff6-14862949", + "SERVICE-VERSION":"1.13.0", + "RESPONSE-DATA-FORMAT":"JSON", + "categoryId":category_id, + "paginationInput.entriesPerPage":"100", + "paginationInput.PageNumber":self.pageNumber[i] + } + response = requests.get("https://svcs.ebay.com/services/search/FindingService/v1", + params=params) + data = response.json() + for item in data['findItemsByCategoryResponse'][0]['searchResult'][0]['item']: + if item not in big_data.values: + itemid_results_list.append(item['itemId']) + item_id_results = [','.join(itemid_results_list[n:n+20]) for n in list(range(0, + len(itemid_results_list), 20))] + + return item_id_results + +class ShoppingApi(FindingApi): + ''' + I don't need no stinkin' dockstring, foo + ''' + def get_item_from_findItemsByCategory(self, item_id_results): + for twenty_id in item_id_results: + params = { + "callname":"GetMultipleItems", + "appid":"scottbea-xlister-PRD-6796e0ff6-14862949", + "version":"671", + "responseencoding":"JSON", + "ItemID":twenty_id, + "IncludeSelector":"ItemSpecifics", + } + + response = requests.get("https://open.api.ebay.com/shopping?", params=params) + data = response.json() + + names = [] + values = [] + nvl = data['Item'][0]['ItemSpecifics']['NameValueList'] + + for nvl_dict in nvl: + names.append(nvl_dict['Name']) + values.append(nvl_dict['Value']) + + nvl_dict = dict(zip(names, values)) + data.update(nvl_dict) + df = pd.json_normalize(data) + df.to_csv('big_data.csv') + +# Limited to 5000 calls to shopping api per day, and getMultpileitems service maxes out at 20 items +# per call leaving you 100,000 items per day for you pandas dataframe initially. So you'll have +# to divide these up into the categories. This will leave you with about 6.25K results per cat. +# More than enough data for your dataset. + +# Need to make sure dataframe gets important stuff outside of nvl. Also need to +# change init method in findingapi to have variable pages and possibly variable +# services.