Lab Solutions: Difference between revisions

From info216
No edit summary
No edit summary
Line 370: Line 370:
</syntaxhighlight>
</syntaxhighlight>


<!--
 
More examples can be found in the example section on the official query service here: https://query.wikidata.org/.
More examples can be found in the example section on the official query service here: https://query.wikidata.org/.


Line 438: Line 438:
     print(result["comment"]["value"])
     print(result["comment"]["value"])
</syntaxhighlight>
</syntaxhighlight>
==Lifting CSV to RDF==
<syntaxhighlight>
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import RDF, FOAF, RDFS, OWL
import pandas as pd
g = Graph()
ex = Namespace("http://example.org/")
g.bind("ex", ex)
# Load the CSV data as a pandas Dataframe.
csv_data = pd.read_csv("task1.csv")
# Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
csv_data = csv_data.replace(to_replace=" ", value="_", regex=True)
# Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
csv_data = csv_data.fillna("unknown")
# Loop through the CSV data, and then make RDF triples.
for index, row in csv_data.iterrows():
    # The names of the people act as subjects.
    subject = row['Name']
    # Create triples: e.g. "Cade_Tracey - age - 27"
    g.add((URIRef(ex + subject), URIRef(ex + "age"), Literal(row["Age"])))
    g.add((URIRef(ex + subject), URIRef(ex + "married"), URIRef(ex + row["Spouse"])))
    g.add((URIRef(ex + subject), URIRef(ex + "country"), URIRef(ex + row["Country"])))
    # If We want can add additional RDF/RDFS/OWL information e.g
    g.add((URIRef(ex + subject), RDF.type, FOAF.Person))
# I remove triples that I marked as unknown earlier.
g.remove((None, None, URIRef("http://example.org/unknown")))
# Clean printing of the graph.
print(g.serialize(format="turtle").decode())
</syntaxhighlight>
===CSV file for above example===
<syntaxhighlight>
"Name","Age","Spouse","Country"
"Cade Tracey","26","Mary Jackson","US"
"Bob Johnson","21","","Canada"
"Mary Jackson","25","","France"
"Phil Philips","32","Catherine Smith","Japan"
</syntaxhighlight>
<!--
==Lifting XML to RDF==
<syntaxhighlight>
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import RDF, XSD, RDFS
import xml.etree.ElementTree as ET
g = Graph()
ex = Namespace("http://example.org/TV/")
prov = Namespace("http://www.w3.org/ns/prov#")
g.bind("ex", ex)
g.bind("prov", prov)
tree = ET.parse("tv_shows.xml")
root = tree.getroot()
for tv_show in root.findall('tv_show'):
    show_id = tv_show.attrib["id"]
    title = tv_show.find("title").text
    g.add((URIRef(ex + show_id), ex.title, Literal(title, datatype=XSD.string)))
    g.add((URIRef(ex + show_id), RDF.type, ex.TV_Show))
    for actor in tv_show.findall("actor"):
        first_name = actor.find("firstname").text
        last_name = actor.find("lastname").text
        full_name = first_name + "_" + last_name
       
        g.add((URIRef(ex + show_id), ex.stars, URIRef(ex + full_name)))
        g.add((URIRef(ex + full_name), ex.starsIn, URIRef(title)))
        g.add((URIRef(ex + full_name), RDF.type, ex.Actor))
print(g.serialize(format="turtle").decode())
</syntaxhighlight>


==RDFS==
==RDFS==
Line 591: Line 675:




==Lifting CSV to RDF==
<syntaxhighlight>
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import RDF, FOAF, RDFS, OWL
import pandas as pd
g = Graph()
ex = Namespace("http://example.org/")
g.bind("ex", ex)
# Load the CSV data as a pandas Dataframe.
csv_data = pd.read_csv("task1.csv")
# Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
csv_data = csv_data.replace(to_replace=" ", value="_", regex=True)
# Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
csv_data = csv_data.fillna("unknown")
# Loop through the CSV data, and then make RDF triples.
for index, row in csv_data.iterrows():
    # The names of the people act as subjects.
    subject = row['Name']
    # Create triples: e.g. "Cade_Tracey - age - 27"
    g.add((URIRef(ex + subject), URIRef(ex + "age"), Literal(row["Age"])))
    g.add((URIRef(ex + subject), URIRef(ex + "married"), URIRef(ex + row["Spouse"])))
    g.add((URIRef(ex + subject), URIRef(ex + "country"), URIRef(ex + row["Country"])))
    # If We want can add additional RDF/RDFS/OWL information e.g
    g.add((URIRef(ex + subject), RDF.type, FOAF.Person))
# I remove triples that I marked as unknown earlier.
g.remove((None, None, URIRef("http://example.org/unknown")))
# Clean printing of the graph.
print(g.serialize(format="turtle").decode())
</syntaxhighlight>
===CSV file for above example===
<syntaxhighlight>
"Name","Age","Spouse","Country"
"Cade Tracey","26","Mary Jackson","US"
"Bob Johnson","21","","Canada"
"Mary Jackson","25","","France"
"Phil Philips","32","Catherine Smith","Japan"
</syntaxhighlight>
==Lifting XML to RDF==
<syntaxhighlight>
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import RDF, XSD, RDFS
import xml.etree.ElementTree as ET
g = Graph()
ex = Namespace("http://example.org/TV/")
prov = Namespace("http://www.w3.org/ns/prov#")
g.bind("ex", ex)
g.bind("prov", prov)
tree = ET.parse("tv_shows.xml")
root = tree.getroot()
for tv_show in root.findall('tv_show'):
    show_id = tv_show.attrib["id"]
    title = tv_show.find("title").text
    g.add((URIRef(ex + show_id), ex.title, Literal(title, datatype=XSD.string)))
    g.add((URIRef(ex + show_id), RDF.type, ex.TV_Show))
    for actor in tv_show.findall("actor"):
        first_name = actor.find("firstname").text
        last_name = actor.find("lastname").text
        full_name = first_name + "_" + last_name
       
        g.add((URIRef(ex + show_id), ex.stars, URIRef(ex + full_name)))
        g.add((URIRef(ex + full_name), ex.starsIn, URIRef(title)))
        g.add((URIRef(ex + full_name), RDF.type, ex.Actor))
print(g.serialize(format="turtle").decode())
</syntaxhighlight>





Revision as of 07:11, 8 March 2022

This page will be updated with Python examples related to the lectures and labs. We will add more examples after each lab has ended. The first examples will use Python's RDFlib. We will introduce other relevant libraries later.


Getting started

Printing the triples of the Graph in a readable way

# The turtle format has the purpose of being more readable for humans. 
print(g.serialize(format="turtle"))

Coding Tasks Lab 1

from rdflib import Graph, Namespace, URIRef, BNode, Literal
from rdflib.namespace import RDF, FOAF, XSD

g = Graph()
ex = Namespace("http://example.org/")

g.add((ex.Cade, ex.married, ex.Mary))
g.add((ex.France, ex.capital, ex.Paris))
g.add((ex.Cade, ex.age, Literal("27", datatype=XSD.integer)))
g.add((ex.Mary, ex.age, Literal("26", datatype=XSD.integer)))
g.add((ex.Mary, ex.interest, ex.Hiking))
g.add((ex.Mary, ex.interest, ex.Chocolate))
g.add((ex.Mary, ex.interest, ex.Biology))
g.add((ex.Mary, RDF.type, ex.Student))
g.add((ex.Paris, RDF.type, ex.City))
g.add((ex.Paris, ex.locatedIn, ex.France))
g.add((ex.Cade, ex.characteristic, ex.Kind))
g.add((ex.Mary, ex.characteristic, ex.Kind))
g.add((ex.Mary, RDF.type, FOAF.Person))
g.add((ex.Cade, RDF.type, FOAF.Person))

Basic RDF programming

Different ways to create an address

from rdflib import Graph, Namespace, URIRef, BNode, Literal
from rdflib.namespace import RDF, FOAF, XSD

g = Graph()
ex = Namespace("http://example.org/")


# How to represent the address of Cade Tracey. From probably the worst solution to the best.

# Solution 1 -
# Make the entire address into one Literal. However, Generally we want to separate each part of an address into their own triples. This is useful for instance if we want to find only the streets where people live. 

g.add((ex.Cade_Tracey, ex.livesIn, Literal("1516_Henry_Street, Berkeley, California 94709, USA")))


# Solution 2 - 
# Seperate the different pieces information into their own triples

g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
g.add((ex.Cade_tracey, ex.city, Literal("Berkeley")))
g.add((ex.Cade_tracey, ex.state, Literal("California")))
g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
g.add((ex.Cade_tracey, ex.country, Literal("USA")))


# Solution 3 - Some parts of the addresses can make more sense to be resources than Literals.
# Larger concepts like a city or state are typically represented as resources rather than Literals, but this is not necesarilly a requirement in the case that you don't intend to say more about them. 

g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
g.add((ex.Cade_tracey, ex.city, ex.Berkeley))
g.add((ex.Cade_tracey, ex.state, ex.California))
g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
g.add((ex.Cade_tracey, ex.country, ex.USA))


# Solution 4 
# Grouping of the information into an Address. We can Represent the address concept with its own URI OR with a Blank Node. 
# One advantage of this is that we can easily remove the entire address, instead of removing each individual part of the address. 
# Solution 4 or 5 is how I would recommend to make addresses. Here, ex.CadeAddress could also be called something like ex.address1 or so on, if you want to give each address a unique ID. 

# Address URI - CadeAdress

g.add((ex.Cade_Tracey, ex.address, ex.CadeAddress))
g.add((ex.CadeAddress, RDF.type, ex.Address))
g.add((ex.CadeAddress, ex.street, Literal("1516 Henry Street")))
g.add((ex.CadeAddress, ex.city, ex.Berkeley))
g.add((ex.CadeAddress, ex.state, ex.California))
g.add((ex.CadeAddress, ex.postalCode, Literal("94709")))
g.add((ex.CadeAddress, ex.country, ex.USA))

# OR

# Blank node for Address.  
address = BNode()
g.add((ex.Cade_Tracey, ex.address, address))
g.add((address, RDF.type, ex.Address))
g.add((address, ex.street, Literal("1516 Henry Street", datatype=XSD.string)))
g.add((address, ex.city, ex.Berkeley))
g.add((address, ex.state, ex.California))
g.add((address, ex.postalCode, Literal("94709", datatype=XSD.string)))
g.add((address, ex.country, ex.USA))


# Solution 5 using existing vocabularies for address 

# (in this case https://schema.org/PostalAddress from schema.org). 
# Also using existing ontology for places like California. (like http://dbpedia.org/resource/California from dbpedia.org)

schema = Namespace("https://schema.org/")
dbp = Namespace("https://dpbedia.org/resource/")

g.add((ex.Cade_Tracey, schema.address, ex.CadeAddress))
g.add((ex.CadeAddress, RDF.type, schema.PostalAddress))
g.add((ex.CadeAddress, schema.streetAddress, Literal("1516 Henry Street")))
g.add((ex.CadeAddress, schema.addresCity, dbp.Berkeley))
g.add((ex.CadeAddress, schema.addressRegion, dbp.California))
g.add((ex.CadeAddress, schema.postalCode, Literal("94709")))
g.add((ex.CadeAddress, schema.addressCountry, dbp.United_States))

Typed Literals

from rdflib import Graph, Literal, Namespace
from rdflib.namespace import XSD
g = Graph()
ex = Namespace("http://example.org/")

g.add((ex.Cade, ex.age, Literal(27, datatype=XSD.integer)))
g.add((ex.Cade, ex.gpa, Literal(3.3, datatype=XSD.float)))
g.add((ex.Cade, FOAF.name, Literal("Cade Tracey", datatype=XSD.string)))
g.add((ex.Cade, ex.birthday, Literal("2006-01-01", datatype=XSD.date)))


Writing and reading graphs/files

   # Writing the graph to a file on your system. Possible formats = turtle, n3, xml, nt.
g.serialize(destination="triples.txt", format="turtle")

   # Parsing a local file
parsed_graph = g.parse(location="triples.txt", format="turtle")

   # Parsing a remote endpoint like Dbpedia
dbpedia_graph = g.parse("http://dbpedia.org/resource/Pluto")

Graph Binding

#Graph Binding is useful for at least two reasons:
#(1) We no longer need to specify prefixes with SPARQL queries if they are already binded to the graph.
#(2) When serializing the graph, the serialization will show the correct expected prefix 
# instead of default namespace names ns1, ns2 etc.

g = Graph()

ex = Namespace("http://example.org/")
dbp = Namespace("http://dbpedia.org/resource/")
schema = Namespace("https://schema.org/")

g.bind("ex", ex)
g.bind("dbp", dbp)
g.bind("schema", schema)

Collection Example

from rdflib import Graph, Namespace
from rdflib.collection import Collection


# Sometimes we want to add many objects or subjects for the same predicate at once. 
# In these cases we can use Collection() to save some time.
# In this case I want to add all countries that Emma has visited at once.

b = BNode()
g.add((ex.Emma, ex.visit, b))
Collection(g, b,
    [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])

# OR

g.add((ex.Emma, ex.visit, ex.EmmaVisits))
Collection(g, ex.EmmaVisits,
    [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])

SPARQL

Also see the SPARQL Examples page!

Querying a local ("in memory") graph

Example contents of the file family.ttl:

@prefix rex: <http://example.org/royal#> .
@prefix fam: <http://example.org/family#> .

rex:IngridAlexandra fam:hasParent rex:HaakonMagnus .
rex:SverreMagnus fam:hasParent rex:HaakonMagnus .
rex:HaakonMagnus fam:hasParent rex:Harald .
rex:MarthaLouise fam:hasParent rex:Harald .
rex:HaakonMagnus fam:hasSister rex:MarthaLouise .
import rdflib

g = rdflib.Graph()
g.parse("family.ttl", format='ttl')

qres = g.query("""
PREFIX fam: <http://example.org/family#>
    SELECT ?child ?sister WHERE {
        ?child fam:hasParent ?parent .	
        ?parent fam:hasSister ?sister .
    }""")
for row in qres:
    print("%s has aunt %s" % row)

With a prepared query, you can write the query once, and then bind some of the variables each time you use it:

import rdflib

g = rdflib.Graph()
g.parse("family.ttl", format='ttl')

q = rdflib.plugins.sparql.prepareQuery(
        """SELECT ?child ?sister WHERE {
                  ?child fam:hasParent ?parent .
                  ?parent fam:hasSister ?sister .
       }""",
       initNs = { "fam": "http://example.org/family#"})

sm = rdflib.URIRef("http://example.org/royal#SverreMagnus")

for row in g.query(q, initBindings={'child': sm}):
        print(row)

Select all contents of lists (rdfllib.Collection)

# rdflib.Collection has a different interntal structure so it requires a slightly more advance query. Here I am selecting all places that Emma has visited.

PREFIX ex:   <http://example.org/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

SELECT ?visit
WHERE {
  ex:Emma ex:visit/rdf:rest*/rdf:first ?visit
}


Using parameters/variables in rdflib queries

from rdflib import Graph, Namespace, URIRef
from rdflib.plugins.sparql import prepareQuery

g = Graph()
ex = Namespace("http://example.org/")
g.bind("ex", ex)

g.add((ex.Cade, ex.livesIn, ex.France))
g.add((ex.Anne, ex.livesIn, ex.Norway))
g.add((ex.Sofie, ex.livesIn, ex.Sweden))
g.add((ex.Per, ex.livesIn, ex.Norway))
g.add((ex.John, ex.livesIn, ex.USA))


def find_people_from_country(country):
        country = URIRef(ex + country)
        q = prepareQuery(
         """
         PREFIX ex: <http://example.org/>
         SELECT ?person WHERE { 
         ?person ex:livesIn ?country.
         }
         """)

        capital_result = g.query(q, initBindings={'country': country})

        for row in capital_result:
            print(row)

find_people_from_country("Norway")

SELECTING data from Blazegraph via Python

from SPARQLWrapper import SPARQLWrapper, JSON

# This creates a server connection to the same URL that contains the graphic interface for Blazegraph. 
# You also need to add "sparql" to end of the URL like below.

sparql = SPARQLWrapper("http://localhost:9999/blazegraph/sparql")

# SELECT all triples in the database.

sparql.setQuery("""
    SELECT DISTINCT ?p WHERE {
    ?s ?p ?o.
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(result["p"]["value"])

# SELECT all interests of Cade

sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    SELECT DISTINCT ?interest WHERE {
    ex:Cade ex:interest ?interest.
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(result["interest"]["value"])

Updating data from Blazegraph via Python

from SPARQLWrapper import SPARQLWrapper, POST, DIGEST

namespace = "kb"
sparql = SPARQLWrapper("http://localhost:9999/blazegraph/namespace/"+ namespace + "/sparql")

sparql.setMethod(POST)
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    INSERT DATA{
    ex:Cade ex:interest ex:Mathematics.
    }
""")

results = sparql.query()
print(results.response.read())

Retrieving data from Wikidata with SparqlWrapper

from SPARQLWrapper import SPARQLWrapper, JSON

sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
# In the query I want to select all the Vitamins in wikidata.

sparql.setQuery("""
    SELECT ?nutrient ?nutrientLabel WHERE
{
  ?nutrient wdt:P279 wd:Q34956.
  SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(result["nutrient"]["value"], "   ", result["nutrientLabel"]["value"])


More examples can be found in the example section on the official query service here: https://query.wikidata.org/.

Download from BlazeGraph

"""
Dumps a database to a local RDF file.
You need to install the SPARQLWrapper package first...
"""

import datetime
from SPARQLWrapper import SPARQLWrapper, RDFXML

# your namespace, the default is 'kb'
ns = 'kb'

# the SPARQL endpoint
endpoint = 'http://info216.i2s.uib.no/bigdata/namespace/' + ns + '/sparql'

# - the endpoint just moved, the old one was:
# endpoint = 'http://i2s.uib.no:8888/bigdata/namespace/' + ns + '/sparql'

# create wrapper
wrapper = SPARQLWrapper(endpoint)

# prepare the SPARQL update
wrapper.setQuery('CONSTRUCT { ?s ?p ?o } WHERE { ?s ?p ?o }')
wrapper.setReturnFormat(RDFXML)

# execute the SPARQL update and convert the result to an rdflib.Graph 
graph = wrapper.query().convert()

# the destination file, with code to make it timestamped
destfile = 'rdf_dumps/slr-kg4news-' + datetime.datetime.now().strftime('%Y%m%d-%H%M') + '.rdf'

# serialize the result to file
graph.serialize(destination=destfile, format='ttl')

# report and quit
print('Wrote %u triples to file %s .' %
      (len(res), destfile))

Query Dbpedia with SparqlWrapper

from SPARQLWrapper import SPARQLWrapper, JSON

sparql = SPARQLWrapper("http://dbpedia.org/sparql")

sparql.setQuery("""
    PREFIX dbr: <http://dbpedia.org/resource/>
    PREFIX dbo: <http://dbpedia.org/ontology/>
    PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
    SELECT ?comment
    WHERE {
    dbr:Barack_Obama rdfs:comment ?comment.
    FILTER (langMatches(lang(?comment),"en"))
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(result["comment"]["value"])

Lifting CSV to RDF

from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import RDF, FOAF, RDFS, OWL
import pandas as pd

g = Graph()
ex = Namespace("http://example.org/")
g.bind("ex", ex)

# Load the CSV data as a pandas Dataframe.
csv_data = pd.read_csv("task1.csv")

# Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
csv_data = csv_data.replace(to_replace=" ", value="_", regex=True)

# Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
csv_data = csv_data.fillna("unknown")

# Loop through the CSV data, and then make RDF triples.
for index, row in csv_data.iterrows():
    # The names of the people act as subjects.
    subject = row['Name']
    # Create triples: e.g. "Cade_Tracey - age - 27"
    g.add((URIRef(ex + subject), URIRef(ex + "age"), Literal(row["Age"])))
    g.add((URIRef(ex + subject), URIRef(ex + "married"), URIRef(ex + row["Spouse"])))
    g.add((URIRef(ex + subject), URIRef(ex + "country"), URIRef(ex + row["Country"])))

    # If We want can add additional RDF/RDFS/OWL information e.g
    g.add((URIRef(ex + subject), RDF.type, FOAF.Person))

# I remove triples that I marked as unknown earlier.
g.remove((None, None, URIRef("http://example.org/unknown")))

# Clean printing of the graph.
print(g.serialize(format="turtle").decode())

CSV file for above example

"Name","Age","Spouse","Country"
"Cade Tracey","26","Mary Jackson","US"
"Bob Johnson","21","","Canada"
"Mary Jackson","25","","France"
"Phil Philips","32","Catherine Smith","Japan"