Looking at the dbf code, I see no way to pass the dialect, so you can convert your files as follows:
import csv reader = csv.reader(open('input.csv'), delimiter=';') writer = csv.writer(open('output.csv', 'w')) for row in reader: writer.writerow(row)
Note. This will correctly contain lines that already contain a comma as part of its contents.
Edit: if you want to schedule dbf.from_csv to take delimiter as a parameter to avoid converting all your csv files, this should work:
--- dbf.py.orig 2012-01-23 12:48:32.112101218 +0100 +++ dbf.py 2012-01-23 12:49:59.468534408 +0100 @@ -4502,13 +4502,14 @@ print str(table[0]) finally: table.close() -def from_csv(csvfile, to_disk=False, filename=None, field_names=None, extra_fields=None, dbf_type='db3', memo_size=64, min_field_size=1): +def from_csv(csvfile, to_disk=False, filename=None, field_names=None, extra_fields=None, dbf_type='db3', memo_size=64, min_field_size=1, + delimiter=','): """creates a Character table from a csv file to_disk will create a table with the same name filename will be used if provided field_names default to f0, f1, f2, etc, unless specified (list) extra_fields can be used to add additional fields -- should be normal field specifiers (list)""" - reader = csv.reader(open(csvfile)) + reader = csv.reader(open(csvfile), delimiter=delimiter) if field_names: field_names = ['%s M' % fn for fn in field_names] else:
source share