本文整理汇总了Python中Analyser_Merge.Analyser_Merge类的典型用法代码示例。如果您正苦于以下问题:Python Analyser_Merge类的具体用法?Python Analyser_Merge怎么用?Python Analyser_Merge使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Analyser_Merge类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8160", "class": 1, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental not integrated") }
self.possible_merge = {"item":"8161", "class": 3, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental integration suggestion") }
self.update_official = {"item":"8162", "class": 4, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle update") }
Analyser_Merge.__init__(self, config, logger,
"http://data.bordeaux-metropole.fr/data.php?themes=10",
u"Station VCUB",
SHP(Source(attribution = u"Bordeaux Métropole", millesime = "08/2016",
fileUrl = "http://data.bordeaux-metropole.fr/files.php?gid=43&format=2", zip = "TB_STVEL_P.shp", encoding = "ISO-8859-15")),
Load(("ST_X(geom)",), ("ST_Y(geom)",), srid = 2154),
Mapping(
select = Select(
types = ["nodes"],
tags = {"amenity": "bicycle_rental"}),
osmRef = "ref",
conflationDistance = 100,
generate = Generate(
static1 = {
"amenity": "bicycle_rental",
"network": "VCUB"},
static2 = {"source": self.source},
mapping1 = {
"name": "NOM",
"ref": "NUMSTAT",
"capacity": "NBSUPPOR",
"vending": lambda res: "subscription" if res["TERMBANC"] == "OUI" else None,
"description": lambda res: "VCUB+" if res["TARIF"] == "VLS PLUS" else None} )))
示例2: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8040", "class": 61, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop not integrated") }
self.possible_merge = {"item":"8041", "class": 63, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop, integration suggestion") }
Analyser_Merge.__init__(self, config, logger,
Source(
url = "http://www.opendata71.fr/thematiques/transport/localisation-des-points-d-arret-de-bus",
name = u"Localisation des arrêts de bus et car - CG71",
file = "public_transport_FR_cg71.csv.bz2",
encoding = "ISO-8859-15",
csv = CSV(separator = ";")),
Load("latitude", "longitude", table = "bus_cg71",
xFunction = self.float_comma,
yFunction = self.float_comma),
Mapping(
select = Select(
types = ["nodes", "ways"],
tags = {"highway": "bus_stop"}),
osmRef = "ref:FR:CG71",
conflationDistance = 100,
generate = Generate(
static = {
"source": u"Conseil général de la Saône-et-Loire - Direction des Transports et de l'intermodalité - 03/2013",
"highway": "bus_stop",
"public_transport": "stop_position",
"bus": "yes"},
mapping = {
"ref:FR:CG71": "cod_arret",
"name": lambda res: res['nom'].split(' - ')[1].strip() if ' - ' in res['nom'] else res['nom'].strip()},
text = lambda tags, fields: {"en": u"CG71 stop of %s" % fields["nom"].strip(), "fr": u"Arrêt CG71 de %s" % fields["nom"].strip()} )))
示例3: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8120", "class": 1, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"CUB glass recycling not integrated") }
self.possible_merge = {"item":"8121", "class": 1, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"CUB glass recycling, integration suggestion") }
Analyser_Merge.__init__(self, config, logger)
self.officialURL = "http://data.lacub.fr/data.php?themes=5"
self.officialName = "Emplacements d'apport volontaire"
self.csv_file = "merge_data/recycling_FR_cub.csv"
self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER"
self.csv_encoding = "ISO-8859-15"
self.csv_select = {
"ident": "%"
}
self.osmTags = {
"amenity": "recycling",
}
self.osmRef = "ref:FR:CUB"
self.osmTypes = ["nodes", "ways"]
self.sourceTable = "cub_recycling_glass"
self.sourceX = "ident_x"
self.sourceY = "ident_y"
self.sourceSRID = "3945"
self.defaultTag = {
"source": "Communauté Urbaine de Bordeaux - 03/2014",
"amenity": "recycling",
"recycling:glass": "yes",
"recycling:glass_bottles": "yes",
"recycling_type": "container",
}
self.defaultTagMapping = {
"ref:FR:CUB": "ident",
}
self.conflationDistance = 100
示例4: __init__
def __init__(self, config, classs, desc, wikiTypes, wikiCountry, wikiLang, starts, osmTags, osmTypes, conflationDistance, logger = None):
self.possible_merge = {"item":"8101", "class": classs, "level": 3, "tag": ["merge", "wikipedia"], "desc":desc }
Analyser_Merge.__init__(self, config, logger,
Source(
url = "http://toolserver.org/~kolossos/wp-world/pg-dumps/wp-world/",
name = "Wikipedia-World",
file = "wikipedia_point_fr.csv.bz2",
csv = CSV(csv = False, separator = None, null = None)),
Load(("ST_X(the_geom)",), ("ST_Y(the_geom)",), table = "wikipedia_point_fr",
create = self.create_table,
select = {"lang": wikiLang, "Country": wikiCountry},
where = (lambda res: not res["titel"].startswith("Liste ")) if starts == None else
(lambda res: res["titel"].startswith(starts)) ),
Mapping(
select = Select(
types = osmTypes,
tags = {"name": None}),
osmRef = "wikipedia",
conflationDistance = conflationDistance,
generate = Generate(
mapping = {"wikipedia": lambda fields: fields["lang"]+":"+fields["titel"]},
text = lambda tags, fields: {fields["lang"]: fields["titel"]} )))
if wikiTypes != None:
self.load.select["types"] = wikiTypes # http://en.wikipedia.org/wiki/Wikipedia:GEO#type:T
if isinstance(osmTags, dict):
self.mapping.select.tags.update(osmTags)
else:
for t in osmTags:
t.update(self.osmTags)
self.mapping.select.tags = osmTags
示例5: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8200", "class": 1, "level": 3, "tag": ["merge", "highway"], "desc": T_(u"Gas station not integrated") }
Analyser_Merge.__init__(self, config, logger,
Source(
url = "http://www.prix-carburants.economie.gouv.fr/rubrique/opendata/",
name = u"Prix des carburants en France",
file = "fuel_FR.csv.bz2"),
Load("lon", "lat", table = "fuel_FR"),
Mapping(
select = Select(
types = ["nodes", "ways"],
tags = {"amenity": "fuel"}),
conflationDistance = 300,
generate = Generate(
static = {
"amenity": "fuel",
"source": "Ministère de l'Economie, de l'Industrie et du Numérique - 15/09/2014"},
mapping = {
"fuel:e85": lambda res: "yes" if res["E85"] == "x" else None,
"fuel:lpg": lambda res: "yes" if res["GPLc"] == "x" else None,
"fuel:lpg": lambda res: "yes" if res["GPL"] == "x" else None,
"fuel:e10": lambda res: "yes" if res["E10"] == "x" else None,
"fuel:octane_95": lambda res: "yes" if res["SP95"] == "x" else None,
"fuel:octane_98": lambda res: "yes" if res["SP98"] == "x" else None,
"fuel:diesel": lambda res: "yes" if res["Gazole"] == "x" else None,
"vending_machine": lambda res: "fuel" if res["Automate CB"] == "x" else None,
"opening_hours": lambda res: "24/7" if res["debut"] != "" and res["debut"] == res["fin"] and res["saufjour"] == "" else None,
"toilets": lambda res: "yes" if res["Toilettes publiques"] == "x" else None,
"compressed_air": lambda res: "yes" if res["Station de gonflage"] == "x" else None,
"shop": lambda res: "convenience" if res["Boutique alimentaire"] == "x" else None,
"hgv:lanes": lambda res: "yes" if res["Piste poids lourds"] == "x" else None,
"vending": lambda res: "fuel" if res["Automate CB"] == "x" else None},
text = lambda tags, fields: {"en": u"%s, %s" % (fields["adresse"], fields["ville"])} )))
示例6: __init__
def __init__(self, config, classs, logger = None):
self.missing_official = {"item":"8030", "class": classs+1, "level": 3, "tag": ["merge"], "desc": T_(u"School not integrated") }
self.missing_osm = {"item":"7070", "class": classs+2, "level": 3, "tag": ["merge"], "desc": T_(u"School without ref:UAI or invalid") }
self.possible_merge = {"item":"8031", "class": classs+3, "level": 3, "tag": ["merge"], "desc": T_(u"School, integration suggestion") }
Analyser_Merge.__init__(self, config, logger)
self.officialURL = "http://www.data.gouv.fr/donnees/view/G%C3%A9olocalisation-des-%C3%A9tablissements-d%27enseignement-du-premier-degr%C3%A9-et-du-second-degr%C3%A9-du-minist%C3%A8re-d-30378093"
self.officialName = "établissements d'enseignement du premier degré et du second degré"
self.csv_file = "merge_data/MENJVA_etab_geoloc.csv"
self.csv_format = "WITH DELIMITER AS ';' NULL AS 'null' CSV HEADER"
self.csv_encoding = "ISO-8859-15"
self.csv_filter = lambda t: t.replace("; ", ";null").replace(";.", ";null").replace("Ecole", u"École").replace("Saint ", "Saint-").replace("Sainte ", "Sainte-").replace(u"élementaire", u"élémentaire")
self.osmTags = {
"amenity": ["school", "kindergarten"],
}
self.osmRef = "ref:UAI"
self.osmTypes = ["nodes", "ways", "relations"]
self.sourceTable = "School_FR"
self.sourceX = "X"
self.sourceY = "Y"
self.defaultTag = {
"amenity": "school",
"source": "data.gouv.fr:Ministère de l'Éducation nationale, de la Jeunesse et de la Vie associative - 05/2012"
}
self.defaultTagMapping = {
"ref:UAI": "numero_uai",
"school:FR": self.School_FR,
"name": "appellation_officielle_uai",
"operator:type": lambda res: "private" if "PRIVE" in res["denomination_principale_uai"] else None,
}
self.conflationDistance = 50
self.text = lambda tags, fields: {"en":fields["appellation_officielle_uai"] if fields["appellation_officielle_uai"] else ""}
示例7: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8160", "class": 1, "level": 3, "tag": ["merge", "public equipment"], "desc": T_(u"Paris Autolib' car rental not integrated") }
self.missing_osm = {"item":"7140", "class": 2, "level": 3, "tag": ["merge", "public equipment"], "desc": T_(u"Paris Autolib' car rental without ref:FR:Paris:DSP") }
self.possible_merge = {"item":"8161", "class": 3, "level": 3, "tag": ["merge", "public equipment"], "desc": T_(u"Paris Autolib' car rental integration suggestion") }
Analyser_Merge.__init__(self, config, logger,
Source(
url = "http://opendata.paris.fr/explore/dataset/stations_et_espaces_autolib_de_la_metropole_parisienne",
name = u"Stations et espaces AutoLib de la métropole parisienne",
file = "car_rental_FR_paris.csv.bz2",
csv = CSV(separator = ";")),
Load("field13", "field13", table = "car_rental_FR_paris",
xFunction = lambda x: x.split(',')[1],
yFunction = lambda y: y.split(',')[0]),
Mapping(
select = Select(
types = ["ways", "nodes"],
tags = {"amenity": "car_rental", "network": "Autolib'"}),
osmRef = "ref:FR:Paris:DSP",
conflationDistance = 200,
generate = Generate(
static = {
"source": u"Mairie de Paris - 05/2013",
"amenity": "car_rental",
"network": "Autolib'",
"operator": "Autolib'",
},
mapping = {
"name": "nom_de_la_station",
"ref:FR:Paris:DSP": "identifiant_dsp",
"capacity": "places_autolib"} )))
示例8: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8010", "class": 11, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum not integrated") }
self.possible_merge = {"item":"8011", "class": 13, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum, integration suggestion") }
Analyser_Merge.__init__(self, config, logger)
self.officialURL = "http://www.datalocale.fr/drupal7/dataset/liste-musees-cdt33"
self.officialName = "Liste des musées et centres d'interprétation de Gironde"
self.csv_file = "merge_data/tourism_FR_gironde_museum.csv"
self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER"
self.csv_select = {
"type": u"Musée"
}
self.osmTags = {
"tourism": "museum"
}
self.osmTypes = ["nodes", "ways"]
self.sourceTable = "gironde_museum"
self.sourceX = "longitude"
self.sourceY = "latitude"
self.sourceSRID = "4326"
self.defaultTag = {
"source": "Observatoire du comité départemental du Tourisme de la Gironde - 09/2013",
"tourism": "museum"
}
self.defaultTagMapping = {
"name": "raison_sociale",
}
self.conflationDistance = 300
self.text = lambda tags, fields: {
"en": u"%s, %s %s %s" % (fields["raison_sociale"], fields["adresse"], fields["adresse_suite"], fields["commune"]),
}
示例9: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8010", "class": 1, "level": 3, "tag": ["merge", "building"], "desc": T_(u"Historical monument not integrated") }
self.missing_osm = {"item":"7080", "class": 2, "level": 3, "tag": ["merge"], "desc": T_(u"Historical monument without ref:mhs or invalid") }
self.possible_merge = {"item":"8011", "class": 3, "level": 3, "tag": ["merge"], "desc": T_(u"Historical monument, integration suggestion") }
Analyser_Merge.__init__(self, config, logger)
self.officialURL = "http://www.data.gouv.fr/donnees/view/Liste-des-Immeubles-prot%C3%A9g%C3%A9s-au-titre-des-Monuments-Historiques-30382152"
self.officialName = "Liste des Immeubles protégés au titre des Monuments Historiques"
self.csv_file = "merge_data/merimee.csv"
self.osmTags = {
"heritage": ["1", "2", "3"],
"heritage:operator": None,
}
self.osmRef = "ref:mhs"
self.osmTypes = ["nodes", "ways", "relations"]
self.sourceTable = "merimee"
self.sourceX = "lon"
self.sourceY = "lat"
self.sourceSRID = "4326"
self.defaultTag = {
"heritage:operator": "mhs",
"source": "data.gouv.fr:Ministère de la Culture - 08/2011"
}
self.defaultTagMapping = {
"ref:mhs": "ref",
"name": "tico",
"mhs:inscription_date": lambda res: u"%s" % res["ppro"][-4:],
"heritage": lambda res: 2 if "classement par arrêté" in res["ppro"] else 3 if "inscription par arrêté" in res["ppro"] else None,
"wikipedia": self.wikipedia,
}
self.conflationDistance = 1000
self.text = lambda tags, fields: {"en": u"Historical monument: %s" % ", ".join(filter(lambda x: x!= None and x != "", [fields["ppro"], fields["adrs"], fields["loca"]]))}
self.WikipediaSearch = re.compile("\[\[.*\]\]")
self.WikipediaSub = re.compile("[^[]*\[\[([^|]*).*\]\][^]]*")
示例10: __init__
def __init__(self, config, classs, officialName, srid, logger = None):
self.missing_official = {"item":"8030", "class": classs+1, "level": 3, "tag": ["merge"], "desc": T_(u"School not integrated") }
self.missing_osm = {"item":"7070", "class": classs+2, "level": 3, "tag": ["merge"], "desc": T_(u"School without ref:UAI or invalid") }
self.possible_merge = {"item":"8031", "class": classs+3, "level": 3, "tag": ["merge"], "desc": T_(u"School, integration suggestion") }
Analyser_Merge.__init__(self, config, logger,
Source(
url = "http://www.data.gouv.fr/donnees/view/G%C3%A9olocalisation-des-%C3%A9tablissements-d%27enseignement-du-premier-degr%C3%A9-et-du-second-degr%C3%A9-du-minist%C3%A8re-d-30378093",
name = u"établissements d'enseignement du premier degré et du second degré - " + officialName,
file = "school_FR.csv.bz2",
encoding = "ISO-8859-15",
csv = CSV(separator = ";", null = "null")),
Load("X", "Y", srid = srid, table = "School_FR",
filter = lambda t: t.replace("; ", ";null").replace(";.", ";null").replace("Ecole", u"École").replace("Saint ", "Saint-").replace("Sainte ", "Sainte-").replace(u"élementaire", u"élémentaire"),
where = lambda res: res["_x"] and res["_y"] and self.is_in(float(res["_x"]), float(res["_y"]))),
Mapping(
select = Select(
types = ["nodes", "ways", "relations"],
tags = {"amenity": ["school", "kindergarten"]}),
osmRef = "ref:UAI",
conflationDistance = 50,
generate = Generate(
static = {
"amenity": "school",
"source": u"data.gouv.fr:Ministère de l'Éducation nationale, de la Jeunesse et de la Vie associative - 05/2012"},
mapping = {
"ref:UAI": "numero_uai",
"school:FR": self.School_FR,
"name": "appellation_officielle_uai",
"operator:type": lambda res: "private" if res["denomination_principale_uai"] and "PRIVE" in res["denomination_principale_uai"] else None},
text = lambda tags, fields: {"en":fields["appellation_officielle_uai"] if fields["appellation_officielle_uai"] else ""} )))
示例11: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8030", "class": 100, "level": 3, "tag": ["merge", "railway"], "desc": T_(u"College not integrated") }
Analyser_Merge.__init__(self, config, logger)
self.officialURL = "http://www.data.gouv.fr/DataSet/30382046"
self.officialName = "Etablissements d'enseignement supérieur"
self.csv_file = "merge_data/Etablissements d'enseignement supérieur.csv"
self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER"
decsep = re.compile("([0-9]),([0-9])")
self.csv_filter = lambda t: decsep.sub("\\1.\\2", t)
self.osmTags = {
"amenity": ["college", "university"],
}
self.osmTypes = ["nodes", "ways", "relations"]
self.sourceTable = "college_fr"
self.sourceX = "lon"
self.sourceY = "lat"
self.sourceSRID = "4326"
self.defaultTag = {
"amenity": "college",
"source": u"data.gouv.fr:Office national d'information sur les enseignements et les professions - 11/2011"
}
self.defaultTagMapping = {
"name": "nom",
"short_name": "sigle",
"operator:type": lambda res: "private" if res["statut"] in [u"CFA privé", u"Privé hors contrat", u"Privé reconnu", u"Privé sous contrat"] else None,
}
self.conflationDistance = 50
self.text = lambda tags, fields: {"en": " - ".join(filter(lambda i: i != "None", [fields["sigle"], fields["nom"]]))}
示例12: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8140", "class": 11, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde camp site not integrated") }
Analyser_Merge.__init__(self, config, logger,
"http://catalogue.datalocale.fr/dataset/liste-campings-aquitaine",
u"Liste des campings en Aquitaine",
JSON(Source(attribution = u"Réseau SIRTAQUI - Comité Régional de Tourisme d'Aquitaine - www.sirtaqui-aquitaine.com", millesime = "06/2016",
fileUrl = "http://wcf.tourinsoft.com/Syndication/aquitaine/13d7f8ab-bd69-4815-b02c-d8134663b849/Objects?$format=json"),
extractor = lambda json: json['d']),
Load("LON", "LAT",
xFunction = self.degree,
yFunction = self.degree),
Mapping(
select = Select(
types = ["nodes", "ways"],
tags = {"tourism": "camp_site"}),
conflationDistance = 300,
generate = Generate(
static1 = {"tourism": "camp_site"},
static2 = {"source": self.source},
mapping1 = {
"name": "NOMOFFRE",
"stars": lambda fields: fields["RECHERCHECLAS"][0] if fields["RECHERCHECLAS"] and fields["RECHERCHECLAS"][0].isdigit() else None,
"ref:FR:CRTA": "SyndicObjectID",
"website": lambda fields: None if not fields["URL"] else fields["URL"] if fields["URL"].startswith('http') else 'http://' + fields["URL"]},
text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x != "None", [fields["NOMOFFRE"], fields["AD1"], fields["AD1SUITE"], fields["AD2"], fields["AD3"], fields["CP"], fields["COMMUNE"]]))} )))
示例13: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8120", "class": 11, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"CAPP glass recycling not integrated") }
Analyser_Merge.__init__(self, config, logger)
self.officialURL = "http://opendata.agglo-pau.fr/index.php/fiche?idQ=8"
self.officialName = "Point d'apport volontaire du verre : Bornes à verres sur la CAPP"
self.csv_file = "merge_data/recycling_FR_capp_glass.csv"
self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER"
self.csv_encoding = "ISO-8859-15"
decsep = re.compile("([0-9]),([0-9])")
self.csv_filter = lambda t: decsep.sub("\\1.\\2", t)
self.csv_select = {
"usage_": "En service"
}
self.osmTags = {
"amenity": "recycling",
}
self.osmTypes = ["nodes", "ways"]
self.sourceTable = "capp_recycling_glass"
self.sourceX = "x"
self.sourceY = "y"
self.sourceSRID = "4326"
self.defaultTag = {
"source": "Communauté d'Agglomération Pau-Pyrénées - 01/2013",
"amenity": "recycling",
"recycling:glass": "yes",
"recycling:glass_bottles": "yes",
"recycling_type": "container",
}
self.conflationDistance = 100
示例14: __init__
def __init__(self, config, logger = None):
self.missing_official = {"item":"8010", "class": 11, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum not integrated") }
self.possible_merge = {"item":"8011", "class": 13, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum, integration suggestion") }
Analyser_Merge.__init__(self, config, logger,
"http://catalogue.datalocale.fr/dataset/liste-musees-aquitaine",
u"Liste des musées et centres d'interprétation de Gironde",
JSON(Source(attribution = u"Réseau SIRTAQUI - Comité Régional de Tourisme d'Aquitaine - www.sirtaqui-aquitaine.com", millesime = "06/2016",
fileUrl = "http://wcf.tourinsoft.com/Syndication/aquitaine/094df128-7ac5-43e5-a7e9-a5d752317674/Objects?$format=json"),
extractor = lambda json: json['d']),
Load("LON", "LAT",
xFunction = self.degree,
yFunction = self.degree),
Mapping(
select = Select(
types = ["nodes", "ways"],
tags = {"tourism": "museum"}),
conflationDistance = 300,
generate = Generate(
static1 = {"tourism": "museum"},
static2 = {"source": self.source},
mapping1 = {
"name": "NOMOFFRE",
"ref:FR:CRTA": "SyndicObjectID",
"website": lambda fields: None if not fields["URL"] else fields["URL"] if fields["URL"].startswith('http') else 'http://' + fields["URL"]},
text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x != "None", [fields["NOMOFFRE"], fields["AD1"], fields["AD1SUITE"], fields["AD2"], fields["AD3"], fields["CP"], fields["COMMUNE"]]))} )))
示例15: __init__
def __init__(self, config, logger=None):
self.update_official = {
"item": "8101",
"class": 100,
"level": 3,
"tag": ["merge", "wikipedia"],
"desc": T_(u"Update Wikipedia tag"),
}
Analyser_Merge.__init__(
self,
config,
logger,
Source(url="http://wikipedia.fr", name="wikipedia insee", file="wikipedia_insee_FR.csv.bz2"),
Load(
table="wikipedia_insee_FR",
create="""
insee VARCHAR(254) PRIMARY KEY,
title VARCHAR(254)""",
),
Mapping(
select=Select(
types=["relations"], tags={"type": "boundary", "boundary": "administrative", "admin_level": "8"}
),
osmRef="ref:INSEE",
generate=Generate(mapping={"ref:INSEE": "insee", "wikipedia": lambda res: "fr:" + res["title"]}),
),
)