Changed to_csv mode to append to allow for writing if not present and appending if present

This commit is contained in:
spbeach46 2020-10-18 13:56:16 -07:00
parent 60b8f8979c
commit 26b425f31c

View File

@ -33,7 +33,7 @@ class FindingApi:
response = requests.get("https://svcs.ebay.com/services/search/FindingService/v1",
params=params)
data = response.json()
return data # May want to save raw json as text file here or in main
return data
def get_ids_from_cats(self):
'''
@ -43,15 +43,14 @@ class FindingApi:
data = self.get_data()
itemid_results_list = []
try:
big_data = pd.read_csv('big_data.csv')
training = pd.read_csv('training.csv')
for item in data['findItemsByCategoryResponse'][0]['searchResult'][0]['item']:
if item not in big_data.values:
if item not in training.values:
itemid_results_list.append(item['itemId'][0]) # itemId
# values are in lists for some reason
except pd.errors.EmptyDataError:
for item in data['findItemsByCategoryResponse'][0]['searchResult'][0]['item']:
itemid_results_list.append(item['itemId'][0]) # itemId
# values are in lists for some reason
itemid_results_list.append(item['itemId'][0])
item_id_results = [','.join(itemid_results_list[n:n+20]) for n in list(range(0,
len(itemid_results_list), 20))]
@ -64,7 +63,7 @@ class ShoppingApi:
'''
def get_item_from_findItemsByCategory(self, item_id_results):
'''
Gets raw JSON data from multiple live listings
Gets raw JSON data from multiple live listings given multiple itemIds
'''
for twenty_id in item_id_results:
params = {
@ -79,7 +78,7 @@ class ShoppingApi:
response = requests.get("https://open.api.ebay.com/shopping?", params=params)
data = response.json()
return data
# Maybe end def here and create new def for curating data
class CurateData:
'''
Contains functions for curating data for machine learning training sets
@ -91,12 +90,12 @@ class CurateData:
for nvl_dict in nvl:
names.append(nvl_dict['Name'])
values.append(nvl_dict['Value']) # Try to excract value from list here
values.append(nvl_dict['Value']) # TODO Try to excract value from list here
nvl_dict = dict(zip(names, values))
data.update(nvl_dict)
df = pd.json_normalize(data)
df.to_csv('big_data.csv')
df.to_csv('training.csv', mode='a')
def main():
'''