Learning the most efficient method for converting a tree structure to csv in Python

I have a tree with the following structure:

my_hash_pop = {
    "Europe" : {
        "France" : {
            "Paris" : 2220445,
            "Lille" : 225789,
            "Lyon" : 506615 },
        "Germany" : {
            "Berlin" : 3520031,
            "Munchen" : 1544041,
            "Dresden" : 540000 },
        },
    "South America" : {
        "Brasil" : {
            "Sao Paulo" : 11895893,
            "Rio de Janeiro" : 6093472 },
        "Argentina" : {
            "Salta" : 535303,
            "Buenos Aires" : 3090900 },
        },
    }

I would like to convert this structure to CSV using python:

Europe;Germany;Berlin;3520031
Europe;Germany;Munchen;1544041
Europe;Germany;Dresden;540000
Europe;France;Paris;2220445
Europe;France;Lyon;506615
Europe;France;Lille;225789
South America;Argentina;Buenos Aires;3090900
South America;Argentina;Salta;3090900
South America;Brasil;Sao Paulo;11895893
South America;Brasil;Rio de Janeiro;6093472

Since my tree contains a large number of leaves in real life (not in this example, obviously), the script conversion that I use takes a lot of time. I am trying to find a more effective way to do the conversion. Here is what I tried:

First method: concatenate a line on each sheet:

### METHOD 1 ###

start_1 = time.time()

data_to_write = ""

for region in my_hash_pop:
    for country in my_hash_pop[region]:
        for city in my_hash_pop[region][country]:
            data_to_write += region+";"+country+";"+city+";"+str(my_hash_pop[region][country][city])+"\n"

filename = "my_test_1.csv"
with open("my_test_1.csv", 'w+') as outfile:
    outfile.write(data_to_write)
    outfile.close()

end_1 = time.time()
print("---> METHOD 1 : Write all took " + str(end_1 - start_1) + "s")

Second method: combine string with "breakpoints"

### METHOD 2 ###

start_2 = time.time()

data_to_write = ""

for region in my_hash_pop:
    region_to_write = ""

    for country in my_hash_pop[region]:
        country_to_write = ""

        for city in my_hash_pop[region][country]:
            city_to_write = region+";"+country+";"+city+";"+str(my_hash_pop[region][country][city])+"\n"
            country_to_write += city_to_write

        region_to_write += country_to_write

    data_to_write += region_to_write

filename = "my_test_2.csv"
with open("my_test_2.csv", 'w+') as outfile:
    outfile.write(data_to_write)
    outfile.close()

end_2 = time.time()
print("---> METHOD 2 : Write all took " + str(end_2 - start_2) + "s")

Third method: with a Writer object

### METHOD 3 ###

import csv

start_3 = time.time()

with open("my_test_3.csv", 'w+') as outfile:
    del_char = b";"
    w = csv.writer(outfile, delimiter=del_char)

    for region in my_hash_pop:
        for country in my_hash_pop[region]:
            for city in my_hash_pop[region][country]:
                w.writerow([region, country, city, str(my_hash_pop[region][country][city])])

end_3 = time.time()
print("---> METHOD 3 : Write all took " + str(end_3 - start_3) + "s")

, , , 1 . 2 3, , ( 3 )

:

  • , ?
  • ?

:

  • , 1 2 . 3 , . ?

!

+4
4

.

dict, items() :

with open("my_test_3.csv", 'w+') as outfile:
    del_char = ";"
    w = csv.writer(outfile, delimiter=del_char)

    for region,countries in my_hash_pop.items():
        for country,cities in countries.items():
            for city,value in cities.items():
                w.writerow([region, country, city, value])

2 3 newlines: "\n" 'my_test_2.csv' "\r\n" 'my_test_3.csv'. 'my_test_3.csv' 1 , 'my_test_2.csv'.

+1
start_1 = time.time()
filename = "my_test_4.csv"
with open("my_test_4.csv", 'w+') as outfile:
    a = [outfile.write("%s;%s;%s;%s\n" % (k, kk, kkk, vvv))
         for (k, v) in my_hash_pop.items()
         for (kk, vv) in v.items()
         for (kkk, vvv) in vv.items()]
end_1 = time.time()
print("---> METHOD 1 : Write all took " + str(end_1 - start_1) + "s")
+1

It was suggested to use pandas as follows:

import pandas as pd
df = pd.DataFrame([(i,j,k,my_hash_pop[i][j][k])
                           for i in my_hash_pop.keys() 
                           for j in my_hash_pop[i].keys()
                           for k in my_hash_pop[i][j].keys()])

with open("my_test_4.csv", 'w') as outfile:
    outfile.write(df.to_csv(sep=';', header=False, index=False)))

I did not compare runtime, and perhaps using pandas is not an option for you, so this is just a suggestion.

0
source

panadsvery effective when it comes to processing a large data set. Below is the import method of dict of dicts in pandas, smoothing with json_normalize, and then you can manipulate it. e.g. write in csv, etc.

Let me know how it is charged with your parameters.

Source

from pandas.io.json import json_normalize

df = json_normalize(my_hash_pop)

outfile = "temp.csv"
del_char = ";"

with open(outfile, 'wb+') as outfile:
    w = csv.writer(outfile, delimiter =';',quoting=csv.QUOTE_MINIMAL)
    for i in df.keys():
        s = ("{};{}").format(i.replace('.',';'),df[i][0]).split(";")
        w.writerow(s)
0
source

Source: https://habr.com/ru/post/1686136/


All Articles