Parse JSON strings as tables using Python











up vote
0
down vote

favorite












Needed a simple CSV file that had headers and no nesting of data to consume in Tableau. The JSON file I was consuming had nested dictionaries and lists (which sometimes had in themselves nested lists/dictionaries).



The output is a pipe delimited format (because free text fields have comments in that mess things up) with nested fields having both the original dictionary key plus any lower keys as the headers.



# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 12:22:02 2018

@author: redim_learning
"""
import sys
import time


class parse_json():
# this class handles parsing json data, once it's been read in as a string using the json library
def __init__(self):
return None

#this writes out dictionaries within the main json dictonary
def write_dict(self, sub_dict, dict_name, f, str_dict):
try:
str_dict = str_dict + ('"%s"' % (str(dict_name))) #commar?!
except:
print ('key not passed correctly', str(dict_name))
try:
for second_key, second_value in sub_dict.items():
if type(second_value) == dict:
print ('dictionary within dictionary')
print( second_key)
write_dict(item[key],second_key,f,str_dict)

str_value = str(second_value)
#clean up characters
if 'n' in second_key or 'r' in str_value:
str_value = (str_value).replace('n','').replace('r','').replace(chr(34),chr(39))

str_dict = str_dict + (', "%s"' % str_value)
return str_dict[:len(str_dict)]
except:
print('dict write out did not workn' , str_dict )
print('item[key] is ', sub_dict)

#print('second key:%s, second value:%s' %(second_key, second_value))
#this function manages to parse a list that is stored within the original json dictionary
def write_list(self, item, key, f, list_str):
# don't write a new line, that will be done later
#write first item in the list in current row
for list_value in item[key]:
if type(list_value) ==str:
list_str = list_str +(list_value.replace('n','').replace('r','').replace(chr(34),chr(39)) + ", ")
elif type(list_value) ==dict:
#sys.stdout.write("nkey: %s, type(item): %s" % (key, type(item)))
#print('nlist_value is :' + list_value)
sys.stdout.flush()
sys.stdout.flush()
sub_dict = list_value
list_str = list_str + write_dict(sub_dict,dict_name, f, list_str)

return list_str[:len(list_str)-2]

#this is needed to know when to add a line feed to the total string
def find_last_field(self, item):
#loop through all fields and return last header value
for header,value in item.items():
last_key = header
#print (header)
return last_key

#parses headers
def write_headers(self, item,last_header):
header_list = ''
object_list = ''
for h,v in item.items():
if type(v) ==dict:
for sub_header in v.items():
if type(sub_header) == tuple:
object_list = object_list + '"' + h + '_' + sub_header[0]+ '",'
else:
object_list = object_list + '"' + sub_header + '",'
elif type(v) ==list:
for rec in v:
object_list = object_list + '"' + h + "',"
else:
header_list = str(header_list) + '"' + h+ '",'
# return the full header string, but exclude the last commar

return header_list + object_list[:len(object_list)-1]


def parse_json_file(self, data, f, page):
full_str = ''
last_header = ''
for item in data:
try:
sys.stdout.write("rPage %i, record %s of %i records" %(page+1, str(item['id']), len(data))) #Writes out progress for user
sys.stdout.flush()
except TypeError:
sys.stdout.flush()
sys.stdout.write("rprogress is progressing ")
sys.stdout.flush()
sys.stdout.flush()
#when you're only looking at one record
if type((item))==str:
item = data
dict_str = ''
list_str = ''
item_str = ''
if last_header == '' and page == 0:
#determine the last header so you know when to write the line return
last_header = find_last_field (item)
#write out a the headers in the first row
f.write(write_headers(item, last_header) + "n")

for key, value in item.items():
#print (item_str )
#print (key,value, type(value))
#try:
if type(item[key]) == dict:
#print('is dict')
try:
dict_str = dict_str + write_dict(value, key, f, dict_str)
except:
sys.stdout.write("rdictionary didn't write properly ")
sys.stdout.flush()
elif type(item[key]) == list:
#print('is list')
try:
list_str = list_str + write_list(item, key, f, list_str)
except:
sys.stdout.write("rlist didn't write properly ")
sys.stdout.flush()
elif type(item[key])==tuple:
item_str = item_str + '"' + value[1] +'",'
elif type(item[key])==int:
item_str = item_str +'"' + str(value) +'",'
elif value == 'True' or value == 'False' or type(value) ==bool:
#print('is bool')
item_str = item_str + '"' + str(value).lower() +'",'
#print (item_str)
elif type(value) == str:
#print('is str')
item_str = item_str +'"' + value.lower().replace('n','').replace('r','').replace(chr(34),chr(39)) +'",'
#print (item_str)
elif type(value) == None or value == None:
#print('is str')
item_str = item_str +'"",'
else:
print ('not added %s as is type %s' % (value, type(value)))
full_str = full_str + item_str + dict_str + list_str + "n"
#print (full_str)
time.sleep(0.5) #Wait so we don't overload instance with too many api requests

#break
return (full_str)









share|improve this question
















bumped to the homepage by Community 9 hours ago


This question has answers that may be good or bad; the system has marked it active so that they can be reviewed.











  • 3




    perhaps add an example input file and how this code is supposed to be called .
    – Maarten Fabré
    Sep 14 at 13:20















up vote
0
down vote

favorite












Needed a simple CSV file that had headers and no nesting of data to consume in Tableau. The JSON file I was consuming had nested dictionaries and lists (which sometimes had in themselves nested lists/dictionaries).



The output is a pipe delimited format (because free text fields have comments in that mess things up) with nested fields having both the original dictionary key plus any lower keys as the headers.



# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 12:22:02 2018

@author: redim_learning
"""
import sys
import time


class parse_json():
# this class handles parsing json data, once it's been read in as a string using the json library
def __init__(self):
return None

#this writes out dictionaries within the main json dictonary
def write_dict(self, sub_dict, dict_name, f, str_dict):
try:
str_dict = str_dict + ('"%s"' % (str(dict_name))) #commar?!
except:
print ('key not passed correctly', str(dict_name))
try:
for second_key, second_value in sub_dict.items():
if type(second_value) == dict:
print ('dictionary within dictionary')
print( second_key)
write_dict(item[key],second_key,f,str_dict)

str_value = str(second_value)
#clean up characters
if 'n' in second_key or 'r' in str_value:
str_value = (str_value).replace('n','').replace('r','').replace(chr(34),chr(39))

str_dict = str_dict + (', "%s"' % str_value)
return str_dict[:len(str_dict)]
except:
print('dict write out did not workn' , str_dict )
print('item[key] is ', sub_dict)

#print('second key:%s, second value:%s' %(second_key, second_value))
#this function manages to parse a list that is stored within the original json dictionary
def write_list(self, item, key, f, list_str):
# don't write a new line, that will be done later
#write first item in the list in current row
for list_value in item[key]:
if type(list_value) ==str:
list_str = list_str +(list_value.replace('n','').replace('r','').replace(chr(34),chr(39)) + ", ")
elif type(list_value) ==dict:
#sys.stdout.write("nkey: %s, type(item): %s" % (key, type(item)))
#print('nlist_value is :' + list_value)
sys.stdout.flush()
sys.stdout.flush()
sub_dict = list_value
list_str = list_str + write_dict(sub_dict,dict_name, f, list_str)

return list_str[:len(list_str)-2]

#this is needed to know when to add a line feed to the total string
def find_last_field(self, item):
#loop through all fields and return last header value
for header,value in item.items():
last_key = header
#print (header)
return last_key

#parses headers
def write_headers(self, item,last_header):
header_list = ''
object_list = ''
for h,v in item.items():
if type(v) ==dict:
for sub_header in v.items():
if type(sub_header) == tuple:
object_list = object_list + '"' + h + '_' + sub_header[0]+ '",'
else:
object_list = object_list + '"' + sub_header + '",'
elif type(v) ==list:
for rec in v:
object_list = object_list + '"' + h + "',"
else:
header_list = str(header_list) + '"' + h+ '",'
# return the full header string, but exclude the last commar

return header_list + object_list[:len(object_list)-1]


def parse_json_file(self, data, f, page):
full_str = ''
last_header = ''
for item in data:
try:
sys.stdout.write("rPage %i, record %s of %i records" %(page+1, str(item['id']), len(data))) #Writes out progress for user
sys.stdout.flush()
except TypeError:
sys.stdout.flush()
sys.stdout.write("rprogress is progressing ")
sys.stdout.flush()
sys.stdout.flush()
#when you're only looking at one record
if type((item))==str:
item = data
dict_str = ''
list_str = ''
item_str = ''
if last_header == '' and page == 0:
#determine the last header so you know when to write the line return
last_header = find_last_field (item)
#write out a the headers in the first row
f.write(write_headers(item, last_header) + "n")

for key, value in item.items():
#print (item_str )
#print (key,value, type(value))
#try:
if type(item[key]) == dict:
#print('is dict')
try:
dict_str = dict_str + write_dict(value, key, f, dict_str)
except:
sys.stdout.write("rdictionary didn't write properly ")
sys.stdout.flush()
elif type(item[key]) == list:
#print('is list')
try:
list_str = list_str + write_list(item, key, f, list_str)
except:
sys.stdout.write("rlist didn't write properly ")
sys.stdout.flush()
elif type(item[key])==tuple:
item_str = item_str + '"' + value[1] +'",'
elif type(item[key])==int:
item_str = item_str +'"' + str(value) +'",'
elif value == 'True' or value == 'False' or type(value) ==bool:
#print('is bool')
item_str = item_str + '"' + str(value).lower() +'",'
#print (item_str)
elif type(value) == str:
#print('is str')
item_str = item_str +'"' + value.lower().replace('n','').replace('r','').replace(chr(34),chr(39)) +'",'
#print (item_str)
elif type(value) == None or value == None:
#print('is str')
item_str = item_str +'"",'
else:
print ('not added %s as is type %s' % (value, type(value)))
full_str = full_str + item_str + dict_str + list_str + "n"
#print (full_str)
time.sleep(0.5) #Wait so we don't overload instance with too many api requests

#break
return (full_str)









share|improve this question
















bumped to the homepage by Community 9 hours ago


This question has answers that may be good or bad; the system has marked it active so that they can be reviewed.











  • 3




    perhaps add an example input file and how this code is supposed to be called .
    – Maarten Fabré
    Sep 14 at 13:20













up vote
0
down vote

favorite









up vote
0
down vote

favorite











Needed a simple CSV file that had headers and no nesting of data to consume in Tableau. The JSON file I was consuming had nested dictionaries and lists (which sometimes had in themselves nested lists/dictionaries).



The output is a pipe delimited format (because free text fields have comments in that mess things up) with nested fields having both the original dictionary key plus any lower keys as the headers.



# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 12:22:02 2018

@author: redim_learning
"""
import sys
import time


class parse_json():
# this class handles parsing json data, once it's been read in as a string using the json library
def __init__(self):
return None

#this writes out dictionaries within the main json dictonary
def write_dict(self, sub_dict, dict_name, f, str_dict):
try:
str_dict = str_dict + ('"%s"' % (str(dict_name))) #commar?!
except:
print ('key not passed correctly', str(dict_name))
try:
for second_key, second_value in sub_dict.items():
if type(second_value) == dict:
print ('dictionary within dictionary')
print( second_key)
write_dict(item[key],second_key,f,str_dict)

str_value = str(second_value)
#clean up characters
if 'n' in second_key or 'r' in str_value:
str_value = (str_value).replace('n','').replace('r','').replace(chr(34),chr(39))

str_dict = str_dict + (', "%s"' % str_value)
return str_dict[:len(str_dict)]
except:
print('dict write out did not workn' , str_dict )
print('item[key] is ', sub_dict)

#print('second key:%s, second value:%s' %(second_key, second_value))
#this function manages to parse a list that is stored within the original json dictionary
def write_list(self, item, key, f, list_str):
# don't write a new line, that will be done later
#write first item in the list in current row
for list_value in item[key]:
if type(list_value) ==str:
list_str = list_str +(list_value.replace('n','').replace('r','').replace(chr(34),chr(39)) + ", ")
elif type(list_value) ==dict:
#sys.stdout.write("nkey: %s, type(item): %s" % (key, type(item)))
#print('nlist_value is :' + list_value)
sys.stdout.flush()
sys.stdout.flush()
sub_dict = list_value
list_str = list_str + write_dict(sub_dict,dict_name, f, list_str)

return list_str[:len(list_str)-2]

#this is needed to know when to add a line feed to the total string
def find_last_field(self, item):
#loop through all fields and return last header value
for header,value in item.items():
last_key = header
#print (header)
return last_key

#parses headers
def write_headers(self, item,last_header):
header_list = ''
object_list = ''
for h,v in item.items():
if type(v) ==dict:
for sub_header in v.items():
if type(sub_header) == tuple:
object_list = object_list + '"' + h + '_' + sub_header[0]+ '",'
else:
object_list = object_list + '"' + sub_header + '",'
elif type(v) ==list:
for rec in v:
object_list = object_list + '"' + h + "',"
else:
header_list = str(header_list) + '"' + h+ '",'
# return the full header string, but exclude the last commar

return header_list + object_list[:len(object_list)-1]


def parse_json_file(self, data, f, page):
full_str = ''
last_header = ''
for item in data:
try:
sys.stdout.write("rPage %i, record %s of %i records" %(page+1, str(item['id']), len(data))) #Writes out progress for user
sys.stdout.flush()
except TypeError:
sys.stdout.flush()
sys.stdout.write("rprogress is progressing ")
sys.stdout.flush()
sys.stdout.flush()
#when you're only looking at one record
if type((item))==str:
item = data
dict_str = ''
list_str = ''
item_str = ''
if last_header == '' and page == 0:
#determine the last header so you know when to write the line return
last_header = find_last_field (item)
#write out a the headers in the first row
f.write(write_headers(item, last_header) + "n")

for key, value in item.items():
#print (item_str )
#print (key,value, type(value))
#try:
if type(item[key]) == dict:
#print('is dict')
try:
dict_str = dict_str + write_dict(value, key, f, dict_str)
except:
sys.stdout.write("rdictionary didn't write properly ")
sys.stdout.flush()
elif type(item[key]) == list:
#print('is list')
try:
list_str = list_str + write_list(item, key, f, list_str)
except:
sys.stdout.write("rlist didn't write properly ")
sys.stdout.flush()
elif type(item[key])==tuple:
item_str = item_str + '"' + value[1] +'",'
elif type(item[key])==int:
item_str = item_str +'"' + str(value) +'",'
elif value == 'True' or value == 'False' or type(value) ==bool:
#print('is bool')
item_str = item_str + '"' + str(value).lower() +'",'
#print (item_str)
elif type(value) == str:
#print('is str')
item_str = item_str +'"' + value.lower().replace('n','').replace('r','').replace(chr(34),chr(39)) +'",'
#print (item_str)
elif type(value) == None or value == None:
#print('is str')
item_str = item_str +'"",'
else:
print ('not added %s as is type %s' % (value, type(value)))
full_str = full_str + item_str + dict_str + list_str + "n"
#print (full_str)
time.sleep(0.5) #Wait so we don't overload instance with too many api requests

#break
return (full_str)









share|improve this question















Needed a simple CSV file that had headers and no nesting of data to consume in Tableau. The JSON file I was consuming had nested dictionaries and lists (which sometimes had in themselves nested lists/dictionaries).



The output is a pipe delimited format (because free text fields have comments in that mess things up) with nested fields having both the original dictionary key plus any lower keys as the headers.



# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 12:22:02 2018

@author: redim_learning
"""
import sys
import time


class parse_json():
# this class handles parsing json data, once it's been read in as a string using the json library
def __init__(self):
return None

#this writes out dictionaries within the main json dictonary
def write_dict(self, sub_dict, dict_name, f, str_dict):
try:
str_dict = str_dict + ('"%s"' % (str(dict_name))) #commar?!
except:
print ('key not passed correctly', str(dict_name))
try:
for second_key, second_value in sub_dict.items():
if type(second_value) == dict:
print ('dictionary within dictionary')
print( second_key)
write_dict(item[key],second_key,f,str_dict)

str_value = str(second_value)
#clean up characters
if 'n' in second_key or 'r' in str_value:
str_value = (str_value).replace('n','').replace('r','').replace(chr(34),chr(39))

str_dict = str_dict + (', "%s"' % str_value)
return str_dict[:len(str_dict)]
except:
print('dict write out did not workn' , str_dict )
print('item[key] is ', sub_dict)

#print('second key:%s, second value:%s' %(second_key, second_value))
#this function manages to parse a list that is stored within the original json dictionary
def write_list(self, item, key, f, list_str):
# don't write a new line, that will be done later
#write first item in the list in current row
for list_value in item[key]:
if type(list_value) ==str:
list_str = list_str +(list_value.replace('n','').replace('r','').replace(chr(34),chr(39)) + ", ")
elif type(list_value) ==dict:
#sys.stdout.write("nkey: %s, type(item): %s" % (key, type(item)))
#print('nlist_value is :' + list_value)
sys.stdout.flush()
sys.stdout.flush()
sub_dict = list_value
list_str = list_str + write_dict(sub_dict,dict_name, f, list_str)

return list_str[:len(list_str)-2]

#this is needed to know when to add a line feed to the total string
def find_last_field(self, item):
#loop through all fields and return last header value
for header,value in item.items():
last_key = header
#print (header)
return last_key

#parses headers
def write_headers(self, item,last_header):
header_list = ''
object_list = ''
for h,v in item.items():
if type(v) ==dict:
for sub_header in v.items():
if type(sub_header) == tuple:
object_list = object_list + '"' + h + '_' + sub_header[0]+ '",'
else:
object_list = object_list + '"' + sub_header + '",'
elif type(v) ==list:
for rec in v:
object_list = object_list + '"' + h + "',"
else:
header_list = str(header_list) + '"' + h+ '",'
# return the full header string, but exclude the last commar

return header_list + object_list[:len(object_list)-1]


def parse_json_file(self, data, f, page):
full_str = ''
last_header = ''
for item in data:
try:
sys.stdout.write("rPage %i, record %s of %i records" %(page+1, str(item['id']), len(data))) #Writes out progress for user
sys.stdout.flush()
except TypeError:
sys.stdout.flush()
sys.stdout.write("rprogress is progressing ")
sys.stdout.flush()
sys.stdout.flush()
#when you're only looking at one record
if type((item))==str:
item = data
dict_str = ''
list_str = ''
item_str = ''
if last_header == '' and page == 0:
#determine the last header so you know when to write the line return
last_header = find_last_field (item)
#write out a the headers in the first row
f.write(write_headers(item, last_header) + "n")

for key, value in item.items():
#print (item_str )
#print (key,value, type(value))
#try:
if type(item[key]) == dict:
#print('is dict')
try:
dict_str = dict_str + write_dict(value, key, f, dict_str)
except:
sys.stdout.write("rdictionary didn't write properly ")
sys.stdout.flush()
elif type(item[key]) == list:
#print('is list')
try:
list_str = list_str + write_list(item, key, f, list_str)
except:
sys.stdout.write("rlist didn't write properly ")
sys.stdout.flush()
elif type(item[key])==tuple:
item_str = item_str + '"' + value[1] +'",'
elif type(item[key])==int:
item_str = item_str +'"' + str(value) +'",'
elif value == 'True' or value == 'False' or type(value) ==bool:
#print('is bool')
item_str = item_str + '"' + str(value).lower() +'",'
#print (item_str)
elif type(value) == str:
#print('is str')
item_str = item_str +'"' + value.lower().replace('n','').replace('r','').replace(chr(34),chr(39)) +'",'
#print (item_str)
elif type(value) == None or value == None:
#print('is str')
item_str = item_str +'"",'
else:
print ('not added %s as is type %s' % (value, type(value)))
full_str = full_str + item_str + dict_str + list_str + "n"
#print (full_str)
time.sleep(0.5) #Wait so we don't overload instance with too many api requests

#break
return (full_str)






python python-3.x strings parsing json






share|improve this question















share|improve this question













share|improve this question




share|improve this question








edited Sep 15 at 15:35









Jamal

30.2k11115226




30.2k11115226










asked Sep 14 at 12:53









REdim.Learning

1266




1266





bumped to the homepage by Community 9 hours ago


This question has answers that may be good or bad; the system has marked it active so that they can be reviewed.







bumped to the homepage by Community 9 hours ago


This question has answers that may be good or bad; the system has marked it active so that they can be reviewed.










  • 3




    perhaps add an example input file and how this code is supposed to be called .
    – Maarten Fabré
    Sep 14 at 13:20














  • 3




    perhaps add an example input file and how this code is supposed to be called .
    – Maarten Fabré
    Sep 14 at 13:20








3




3




perhaps add an example input file and how this code is supposed to be called .
– Maarten Fabré
Sep 14 at 13:20




perhaps add an example input file and how this code is supposed to be called .
– Maarten Fabré
Sep 14 at 13:20










1 Answer
1






active

oldest

votes

















up vote
0
down vote













You can use the csvkit package from Pypi to accomplish that. Here's one of the tools from csvkit to convert json to csv in python: https://csvkit.readthedocs.io/en/1.0.2/scripts/in2csv.html



Edit:



So making the answer a bit more discriptive, a messy json like this one:



[
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
},
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
}
]


Would become a very simple csv file like this one:



a1,a2,aa/0,aa/1,bb/0/b1,bb/0/b2,bb/1/b1,bb/1/b2
1,2,a,a,1,2,1,2
1,2,a,a,1,2,1,2


by just running this command:



pip install csvkit
in2csv -f json < file.json > file.csv





share|improve this answer























  • Welcome to Code Review! You have presented an alternative solution, but haven't reviewed the code. Please explain your reasoning (how your solution works and why it is better than the original) so that the author and other readers can learn from your thought process.
    – Dannnno
    Sep 14 at 20:50












  • Links can rot - please include the relevant content in the answer itself and provide the link as added reference material.
    – Dannnno
    Sep 14 at 20:51











Your Answer





StackExchange.ifUsing("editor", function () {
return StackExchange.using("mathjaxEditing", function () {
StackExchange.MarkdownEditor.creationCallbacks.add(function (editor, postfix) {
StackExchange.mathjaxEditing.prepareWmdForMathJax(editor, postfix, [["\$", "\$"]]);
});
});
}, "mathjax-editing");

StackExchange.ifUsing("editor", function () {
StackExchange.using("externalEditor", function () {
StackExchange.using("snippets", function () {
StackExchange.snippets.init();
});
});
}, "code-snippets");

StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "196"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);

StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});

function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
convertImagesToLinks: false,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: null,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});


}
});














draft saved

draft discarded


















StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fcodereview.stackexchange.com%2fquestions%2f203735%2fparse-json-strings-as-tables-using-python%23new-answer', 'question_page');
}
);

Post as a guest















Required, but never shown

























1 Answer
1






active

oldest

votes








1 Answer
1






active

oldest

votes









active

oldest

votes






active

oldest

votes








up vote
0
down vote













You can use the csvkit package from Pypi to accomplish that. Here's one of the tools from csvkit to convert json to csv in python: https://csvkit.readthedocs.io/en/1.0.2/scripts/in2csv.html



Edit:



So making the answer a bit more discriptive, a messy json like this one:



[
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
},
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
}
]


Would become a very simple csv file like this one:



a1,a2,aa/0,aa/1,bb/0/b1,bb/0/b2,bb/1/b1,bb/1/b2
1,2,a,a,1,2,1,2
1,2,a,a,1,2,1,2


by just running this command:



pip install csvkit
in2csv -f json < file.json > file.csv





share|improve this answer























  • Welcome to Code Review! You have presented an alternative solution, but haven't reviewed the code. Please explain your reasoning (how your solution works and why it is better than the original) so that the author and other readers can learn from your thought process.
    – Dannnno
    Sep 14 at 20:50












  • Links can rot - please include the relevant content in the answer itself and provide the link as added reference material.
    – Dannnno
    Sep 14 at 20:51















up vote
0
down vote













You can use the csvkit package from Pypi to accomplish that. Here's one of the tools from csvkit to convert json to csv in python: https://csvkit.readthedocs.io/en/1.0.2/scripts/in2csv.html



Edit:



So making the answer a bit more discriptive, a messy json like this one:



[
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
},
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
}
]


Would become a very simple csv file like this one:



a1,a2,aa/0,aa/1,bb/0/b1,bb/0/b2,bb/1/b1,bb/1/b2
1,2,a,a,1,2,1,2
1,2,a,a,1,2,1,2


by just running this command:



pip install csvkit
in2csv -f json < file.json > file.csv





share|improve this answer























  • Welcome to Code Review! You have presented an alternative solution, but haven't reviewed the code. Please explain your reasoning (how your solution works and why it is better than the original) so that the author and other readers can learn from your thought process.
    – Dannnno
    Sep 14 at 20:50












  • Links can rot - please include the relevant content in the answer itself and provide the link as added reference material.
    – Dannnno
    Sep 14 at 20:51













up vote
0
down vote










up vote
0
down vote









You can use the csvkit package from Pypi to accomplish that. Here's one of the tools from csvkit to convert json to csv in python: https://csvkit.readthedocs.io/en/1.0.2/scripts/in2csv.html



Edit:



So making the answer a bit more discriptive, a messy json like this one:



[
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
},
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
}
]


Would become a very simple csv file like this one:



a1,a2,aa/0,aa/1,bb/0/b1,bb/0/b2,bb/1/b1,bb/1/b2
1,2,a,a,1,2,1,2
1,2,a,a,1,2,1,2


by just running this command:



pip install csvkit
in2csv -f json < file.json > file.csv





share|improve this answer














You can use the csvkit package from Pypi to accomplish that. Here's one of the tools from csvkit to convert json to csv in python: https://csvkit.readthedocs.io/en/1.0.2/scripts/in2csv.html



Edit:



So making the answer a bit more discriptive, a messy json like this one:



[
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
},
{
"a1":"1",
"a2":"2",
"aa":[
"a",
"a"
],
"bb":[
{
"b1":1,
"b2":2
},
{
"b1":1,
"b2":2
}
]
}
]


Would become a very simple csv file like this one:



a1,a2,aa/0,aa/1,bb/0/b1,bb/0/b2,bb/1/b1,bb/1/b2
1,2,a,a,1,2,1,2
1,2,a,a,1,2,1,2


by just running this command:



pip install csvkit
in2csv -f json < file.json > file.csv






share|improve this answer














share|improve this answer



share|improve this answer








edited Sep 15 at 0:00

























answered Sep 14 at 18:55









Danilo Souza Morães

1111




1111












  • Welcome to Code Review! You have presented an alternative solution, but haven't reviewed the code. Please explain your reasoning (how your solution works and why it is better than the original) so that the author and other readers can learn from your thought process.
    – Dannnno
    Sep 14 at 20:50












  • Links can rot - please include the relevant content in the answer itself and provide the link as added reference material.
    – Dannnno
    Sep 14 at 20:51


















  • Welcome to Code Review! You have presented an alternative solution, but haven't reviewed the code. Please explain your reasoning (how your solution works and why it is better than the original) so that the author and other readers can learn from your thought process.
    – Dannnno
    Sep 14 at 20:50












  • Links can rot - please include the relevant content in the answer itself and provide the link as added reference material.
    – Dannnno
    Sep 14 at 20:51
















Welcome to Code Review! You have presented an alternative solution, but haven't reviewed the code. Please explain your reasoning (how your solution works and why it is better than the original) so that the author and other readers can learn from your thought process.
– Dannnno
Sep 14 at 20:50






Welcome to Code Review! You have presented an alternative solution, but haven't reviewed the code. Please explain your reasoning (how your solution works and why it is better than the original) so that the author and other readers can learn from your thought process.
– Dannnno
Sep 14 at 20:50














Links can rot - please include the relevant content in the answer itself and provide the link as added reference material.
– Dannnno
Sep 14 at 20:51




Links can rot - please include the relevant content in the answer itself and provide the link as added reference material.
– Dannnno
Sep 14 at 20:51


















draft saved

draft discarded




















































Thanks for contributing an answer to Code Review Stack Exchange!


  • Please be sure to answer the question. Provide details and share your research!

But avoid



  • Asking for help, clarification, or responding to other answers.

  • Making statements based on opinion; back them up with references or personal experience.


Use MathJax to format equations. MathJax reference.


To learn more, see our tips on writing great answers.





Some of your past answers have not been well-received, and you're in danger of being blocked from answering.


Please pay close attention to the following guidance:


  • Please be sure to answer the question. Provide details and share your research!

But avoid



  • Asking for help, clarification, or responding to other answers.

  • Making statements based on opinion; back them up with references or personal experience.


To learn more, see our tips on writing great answers.




draft saved


draft discarded














StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fcodereview.stackexchange.com%2fquestions%2f203735%2fparse-json-strings-as-tables-using-python%23new-answer', 'question_page');
}
);

Post as a guest















Required, but never shown





















































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown

































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown







Popular posts from this blog

Quarter-circle Tiles

build a pushdown automaton that recognizes the reverse language of a given pushdown automaton?

Mont Emei