Here is my last code that runs smoothly. Please let me know if I could make it smarter!
import urllib2 from bs4 import BeautifulSoup import codecs f = codecs.open("eu2015valg.txt", "w", encoding="iso-8859-1") soup = BeautifulSoup(urllib2.urlopen('http://www.kmdvalg.dk/').read()) liste = [] alle_links = soup.find_all("a") for link in alle_links: link2 = link["href"] liste.append(link2) for url in liste[1:93]: soup = BeautifulSoup(urllib2.urlopen(url).read().decode('iso-8859-1')) tds = soup.findAll('td') stemmernu = soup.findAll('td', class_='StemmerNu') print >> f, tds[5].string,";",tds[12].string,";",tds[14].string,";",tds[16].string,";", stemmernu[0].string,";",stemmernu[1].string,";",stemmernu[2].string,";",stemmernu[3].string,";",stemmernu[6].string,";",stemmernu[8].string,";",'\r\n' f.close()
source share