Most of the regular expression service resources are compiled by regular expressions. You must move the regex compilation from the loop.
This should give you a significant improvement:
pattern1 = re.compile('[^0-9a-zA-Z]+')
pattern2 = re.compile('\s+')
for k in range(len(titles)):
#remove all non-alphanumeric characters
s = re.sub(pattern1,' ',titles[k])
#remove extra white space
s = re.sub(pattern2,' ', s).strip()
wordlist.txt :
import re
def noncompiled():
with open("wordlist.txt",'r') as f:
titles = f.readlines()
titles = ["".join([title,nonalpha]) for title in titles for nonalpha in "!@#$%"]
for k in range(len(titles)):
s = re.sub('[^0-9a-zA-Z]+', ' ',titles[k])
s = re.sub( '\s+', ' ', s).strip()
def compiled():
with open("wordlist.txt",'r') as f:
titles = f.readlines()
titles = ["".join([title,nonalpha]) for title in titles for nonalpha in "!@#$%"]
pattern1=re.compile('[^0-9a-zA-Z]+')
pattern2 = re.compile( '\s+')
for k in range(len(titles)):
s = pattern1.sub('',titles[k])
s = pattern2.sub('', s)
In [2]: %timeit noncompiled()
1 loops, best of 3: 292 ms per loop
In [3]: %timeit compiled()
10 loops, best of 3: 176 ms per loop
" " , , @zsquare, , , , , .
def with_excludes():
with open("wordlist.txt",'r') as f:
titles = f.readlines()
titles = ["".join([title,nonalpha]) for title in titles for nonalpha in "!@#$%"]
pattern1=re.compile('[^0-9a-zA-Z]+')
pattern2 = re.compile( '\s+')
excludes = ["shit","poo","ass","love","boo","ch"]
excludes_regex = re.compile('|'.join(excludes))
for k in range(len(titles)):
s = pattern1.sub('',titles[k])
s = pattern2.sub('', s)
s = pattern2.sub('', s)
In [2]: %timeit with_excludes()
1 loops, best of 3: 251 ms per loop
, :
def master():
with open("wordlist.txt",'r') as f:
titles = f.readlines()
titles = ["".join([title,nonalpha]) for title in titles for nonalpha in "!@#$%"]
excludes = ["shit","poo","ass","love","boo","ch"]
nonalpha='[^0-9a-zA-Z]+'
whitespace='\s+'
badwords = '|'.join(excludes)
master_regex=re.compile('|'.join([nonalpha,whitespace,badwords]))
for k in range(len(titles)):
s = master_regex.sub('',titles[k])
In [2]: %timeit master()
10 loops, best of 3: 148 ms per loop
, python:
result = [master_regex.sub('',item) for item in titles]
In [4]: %timeit list_comp()
10 loops, best of 3: 139 ms per loop
. :
def baseline():
with open("wordlist.txt",'r') as f:
titles = f.readlines()
titles = ["".join([title,nonalpha]) for title in titles for nonalpha in "!@#$%"]
In [2]: %timeit baseline()
10 loops, best of 3: 24.8 ms per loop