diff --git a/ebay_api.py b/ebay_api.py index 9e82d96..6379fcc 100644 --- a/ebay_api.py +++ b/ebay_api.py @@ -11,7 +11,7 @@ class FindingApi: 'findItemsByProduct' ][service] self.pageNumber = list(range(1, pageNumber)) # 64 pages is recommended - # this will give equal weights to cats given call restraints + # this will give equal weights to cats given call constraints # departments = ["3034","93427"] (womens and mens) def get_data(self): @@ -42,16 +42,19 @@ class FindingApi: ''' data = self.get_data() itemid_results_list = [] + try:# TODO run pdb here to see how to extract itemId before update_df training = pd.read_csv('training.csv') for item in data['findItemsByCategoryResponse'][0]['searchResult'][0]['item']: if (item not in training.values) and (item not in itemid_results_list): itemid_results_list.append(item['itemId'][0]) # itemId # values are in lists for some reason + except (pd.errors.EmptyDataError, FileNotFoundError): for item in data['findItemsByCategoryResponse'][0]['searchResult'][0]['item']: - if (item not in training.values) and (item not in itemid_results_list): + if item not in itemid_results_list: itemid_results_list.append(item['itemId'][0]) + item_id_results = [','.join(itemid_results_list[n:n+20]) for n in list(range(0, len(itemid_results_list), 20))] @@ -98,7 +101,7 @@ class CurateData: # TODO Also append itemId and value to the dictionary somewhere nvl_dict = dict(zip(names, values)) - data.update(nvl_dict) + data.update(nvl_dict) # TODO this changes iterable so you get error df = pd.json_normalize(data) df.to_csv('training.csv', mode='a')