removing zonefile-parser lib (does not do all we need) and droping the IANA zonefile (too complicated to parse) to go with a list of TLD's of ICAN (easyer to parse and then test individually)
This commit is contained in:
parent
f41b765f8a
commit
62e2138d5c
|
@ -12,4 +12,3 @@ pycodestyle==2.11.1
|
||||||
pyflakes==3.2.0
|
pyflakes==3.2.0
|
||||||
pyproject_hooks==1.0.0
|
pyproject_hooks==1.0.0
|
||||||
sqlparse==0.4.4
|
sqlparse==0.4.4
|
||||||
zonefile-parser==0.1.14
|
|
||||||
|
|
|
@ -52,8 +52,6 @@ sqlparse==0.4.4
|
||||||
# django
|
# django
|
||||||
wheel==0.42.0
|
wheel==0.42.0
|
||||||
# via pip-tools
|
# via pip-tools
|
||||||
zonefile-parser==0.1.14
|
|
||||||
# via -r requirements.in
|
|
||||||
|
|
||||||
# The following packages are considered to be unsafe in a requirements file:
|
# The following packages are considered to be unsafe in a requirements file:
|
||||||
# pip
|
# pip
|
||||||
|
|
|
@ -3,54 +3,31 @@ This file is dumping the IANA root zone and sorting it in the database
|
||||||
Link to IANA website : https://www.internic.net/domain/root.zone
|
Link to IANA website : https://www.internic.net/domain/root.zone
|
||||||
"""
|
"""
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import zonefile_parser
|
|
||||||
import json
|
import json
|
||||||
from tldtester.models import zonecontent
|
# from tldtester.models import zonecontent, TLD
|
||||||
|
import dns.resolver
|
||||||
|
|
||||||
|
|
||||||
def downloader():
|
def downloader():
|
||||||
"""
|
"""
|
||||||
Downloads the data. Returns None if not working, Returns data if working
|
Downloads the data. Returns None if not working, returns a list of TLD's if working
|
||||||
"""
|
"""
|
||||||
url = urllib.request.urlopen("https://www.internic.net/domain/root.zone")
|
url = urllib.request.urlopen("https://data.iana.org/TLD/tlds-alpha-by-domain.txt")
|
||||||
if url.getcode() == 200:
|
if url.getcode() == 200:
|
||||||
raw = url.read()
|
raw = url.read()
|
||||||
raw = raw.decode("utf-8")
|
raw = raw.decode("utf-8").splitlines()
|
||||||
|
# File has a timestamp as first line. This will take it out so we only keep the TLD's
|
||||||
|
raw.pop(0)
|
||||||
else:
|
else:
|
||||||
raw = None
|
raw = None
|
||||||
return raw
|
return raw
|
||||||
|
|
||||||
|
|
||||||
def sorter(rawdata):
|
|
||||||
"""
|
|
||||||
This file removes the tabs and line breaks from rawdata
|
|
||||||
returns as a list with dictionary in it
|
|
||||||
:returns: a list of dictionaries
|
|
||||||
"""
|
|
||||||
encodeddata = zonefile_parser.parse(rawdata)
|
|
||||||
properdata = []
|
|
||||||
for line in encodeddata:
|
|
||||||
properdata.append(dict(json.loads(str(line).replace("'", '"'))))
|
|
||||||
return properdata
|
|
||||||
|
|
||||||
|
|
||||||
def dbwriter(data):
|
|
||||||
"""
|
|
||||||
Writes everything in the Zone database
|
|
||||||
"""
|
|
||||||
for line in data:
|
|
||||||
DB = zonecontent()
|
|
||||||
DB.rtype = line["rtype"]
|
|
||||||
DB.name = line["name"]
|
|
||||||
DB.rclass = line["rclass"]
|
|
||||||
DB.data = line["rdata"]
|
|
||||||
DB.ttl = int(line["ttl"])
|
|
||||||
DB.save()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
try:
|
try:
|
||||||
dbwriter(sorter(downloader()))
|
data = sorter(downloader())
|
||||||
|
dbwriter(data)
|
||||||
|
print(data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue