Lab Solutions: Difference between revisions

From info216
(Proposed solution for Lab 7 RDFS)
(48 intermediate revisions by 4 users not shown)
Line 1: Line 1:
This page will be updated with Python examples related to the lectures and labs. We will add more examples after each lab has ended. The first examples will use Python's RDFlib. We will introduce other relevant libraries later.
This page will be updated with Python examples related to the labs as the course progresses.


=Examples from the lectures=


==Getting started==
==Lecture 1: Introduction to KGs==
 
Turtle example:
 
===Printing the triples of the Graph in a readable way===
<syntaxhighlight>
<syntaxhighlight>
# The turtle format has the purpose of being more readable for humans.  
@prefix ex: <http://example.org/> .
print(g.serialize(format="turtle"))
ex:Roger_Stone
    ex:name "Roger Stone" ;
    ex:occupation ex:lobbyist ;
    ex:significant_person ex:Donald_Trump .
ex:Donald_Trump
    ex:name "Donald Trump" .
</syntaxhighlight>
</syntaxhighlight>


===Coding Tasks Lab 1===
==Lecture 2: RDF==
<syntaxhighlight>
Blank nodes for anonymity, or when we have not decided on a URI:
from rdflib import Graph, Namespace, URIRef, BNode, Literal
<syntaxhighlight lang="Python">
from rdflib.namespace import RDF, FOAF, XSD
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
 
EX = Namespace('http://example.org/')


g = Graph()
g = Graph()
ex = Namespace("http://example.org/")
g.bind('ex', EX)  # this is why the line '@prefix ex: <http://example.org/> .'
                  # and the 'ex.' prefix are used when we print out Turtle later


g.add((ex.Cade, ex.married, ex.Mary))
robertMueller = BNode()
g.add((ex.France, ex.capital, ex.Paris))
g.add((robertMueller, RDF.type, EX.Human))
g.add((ex.Cade, ex.age, Literal("27", datatype=XSD.integer)))
g.add((robertMueller, FOAF.name, Literal('Robert Mueller', lang='en')))
g.add((ex.Mary, ex.age, Literal("26", datatype=XSD.integer)))
g.add((robertMueller, EX.position_held, Literal('Director of the Federal Bureau of Investigation', lang='en')))
g.add((ex.Mary, ex.interest, ex.Hiking))
g.add((ex.Mary, ex.interest, ex.Chocolate))
g.add((ex.Mary, ex.interest, ex.Biology))
g.add((ex.Mary, RDF.type, ex.Student))
g.add((ex.Paris, RDF.type, ex.City))
g.add((ex.Paris, ex.locatedIn, ex.France))
g.add((ex.Cade, ex.characteristic, ex.Kind))
g.add((ex.Mary, ex.characteristic, ex.Kind))
g.add((ex.Mary, RDF.type, FOAF.Person))
g.add((ex.Cade, RDF.type, FOAF.Person))


print(g.serialize(format='turtle'))
</syntaxhighlight>
</syntaxhighlight>


==Basic RDF programming==
Blank nodes used to group related properties:
 
===Different ways to create an address===
 
<syntaxhighlight>
<syntaxhighlight>
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD


from rdflib import Graph, Namespace, URIRef, BNode, Literal
EX = Namespace('http://example.org/')
from rdflib.namespace import RDF, FOAF, XSD


g = Graph()
g = Graph()
ex = Namespace("http://example.org/")
g.bind('ex', EX)


# This is a task in Exercise 2


# How to represent the address of Cade Tracey. From probably the worst solution to the best.
print(g.serialize(format='turtle'))
</syntaxhighlight>


# Solution 1 -
Literals:
# Make the entire address into one Literal. However, Generally we want to separate each part of an address into their own triples. This is useful for instance if we want to find only the streets where people live.
<syntaxhighlight>
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD


g.add((ex.Cade_Tracey, ex.livesIn, Literal("1516_Henry_Street, Berkeley, California 94709, USA")))
EX = Namespace('http://example.org/')


g = Graph()
g.bind('ex', EX)


# Solution 2 -
g.add((EX.Robert_Mueller, RDF.type, EX.Human))
# Seperate the different pieces information into their own triples
g.add((EX.Robert_Mueller, FOAF.name, Literal('Robert Mueller', lang='en')))
g.add((EX.Robert_Mueller, FOAF.name, Literal('رابرت مولر', lang='fa')))
g.add((EX.Robert_Mueller, DC.description, Literal('sixth director of the FBI', datatype=XSD.string)))
g.add((EX.Robert_Mueller, EX.start_time, Literal(2001, datatype=XSD.integer)))


g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
print(g.serialize(format='turtle'))
g.add((ex.Cade_tracey, ex.city, Literal("Berkeley")))
</syntaxhighlight>
g.add((ex.Cade_tracey, ex.state, Literal("California")))
g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
g.add((ex.Cade_tracey, ex.country, Literal("USA")))


Alternative container (open):
<syntaxhighlight>
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD


# Solution 3 - Some parts of the addresses can make more sense to be resources than Literals.
EX = Namespace('http://example.org/')
# Larger concepts like a city or state are typically represented as resources rather than Literals, but this is not necesarilly a requirement in the case that you don't intend to say more about them.


g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
g = Graph()
g.add((ex.Cade_tracey, ex.city, ex.Berkeley))
g.bind('ex', EX)
g.add((ex.Cade_tracey, ex.state, ex.California))
g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
g.add((ex.Cade_tracey, ex.country, ex.USA))


muellerReportArchives = BNode()
g.add((muellerReportArchives, RDF.type, RDF.Alt))


# Solution 4
archive1 = 'https://archive.org/details/MuellerReportVolume1Searchable/' \
# Grouping of the information into an Address. We can Represent the address concept with its own URI OR with a Blank Node.  
                    'Mueller%20Report%20Volume%201%20Searchable/'
# One advantage of this is that we can easily remove the entire address, instead of removing each individual part of the address.  
archive2 = 'https://edition.cnn.com/2019/04/18/politics/full-mueller-report-pdf/index.html'
# Solution 4 or 5 is how I would recommend to make addresses. Here, ex.CadeAddress could also be called something like ex.address1 or so on, if you want to give each address a unique ID.  
archive3 = 'https://www.politico.com/story/2019/04/18/mueller-report-pdf-download-text-file-1280891'


# Address URI - CadeAdress
g.add((muellerReportArchives, RDFS.member, Literal(archive1, datatype=XSD.anyURI)))
g.add((muellerReportArchives, RDFS.member, Literal(archive2, datatype=XSD.anyURI)))
g.add((muellerReportArchives, RDFS.member, Literal(archive3, datatype=XSD.anyURI)))


g.add((ex.Cade_Tracey, ex.address, ex.CadeAddress))
g.add((EX.Mueller_Report, RDF.type, FOAF.Document))
g.add((ex.CadeAddress, RDF.type, ex.Address))
g.add((EX.Mueller_Report, DC.contributor, EX.Robert_Mueller))
g.add((ex.CadeAddress, ex.street, Literal("1516 Henry Street")))
g.add((EX.Mueller_Report, SCHEMA.archivedAt, muellerReportArchives))
g.add((ex.CadeAddress, ex.city, ex.Berkeley))
g.add((ex.CadeAddress, ex.state, ex.California))
g.add((ex.CadeAddress, ex.postalCode, Literal("94709")))
g.add((ex.CadeAddress, ex.country, ex.USA))


# OR
print(g.serialize(format='turtle'))
</syntaxhighlight>


# Blank node for Address. 
Sequence container (open):
address = BNode()
<syntaxhighlight>
g.add((ex.Cade_Tracey, ex.address, address))
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
g.add((address, RDF.type, ex.Address))
g.add((address, ex.street, Literal("1516 Henry Street", datatype=XSD.string)))
g.add((address, ex.city, ex.Berkeley))
g.add((address, ex.state, ex.California))
g.add((address, ex.postalCode, Literal("94709", datatype=XSD.string)))
g.add((address, ex.country, ex.USA))


EX = Namespace('http://example.org/')


# Solution 5 using existing vocabularies for address
g = Graph()
 
g.bind('ex', EX)
# (in this case https://schema.org/PostalAddress from schema.org).
# Also using existing ontology for places like California. (like http://dbpedia.org/resource/California from dbpedia.org)


schema = Namespace("https://schema.org/")
donaldTrumpSpouses = BNode()
dbp = Namespace("https://dpbedia.org/resource/")
g.add((donaldTrumpSpouses, RDF.type, RDF.Seq))
g.add((donaldTrumpSpouses, RDF._1, EX.IvanaTrump))
g.add((donaldTrumpSpouses, RDF._2, EX.MarlaMaples))
g.add((donaldTrumpSpouses, RDF._3, EX.MelaniaTrump))


g.add((ex.Cade_Tracey, schema.address, ex.CadeAddress))
g.add((EX.Donald_Trump, SCHEMA.spouse, donaldTrumpSpouses))
g.add((ex.CadeAddress, RDF.type, schema.PostalAddress))
g.add((ex.CadeAddress, schema.streetAddress, Literal("1516 Henry Street")))
g.add((ex.CadeAddress, schema.addresCity, dbp.Berkeley))
g.add((ex.CadeAddress, schema.addressRegion, dbp.California))
g.add((ex.CadeAddress, schema.postalCode, Literal("94709")))
g.add((ex.CadeAddress, schema.addressCountry, dbp.United_States))


print(g.serialize(format='turtle'))
</syntaxhighlight>
</syntaxhighlight>


===Typed Literals===
Collection (closed list):
<syntaxhighlight>
<syntaxhighlight>
from rdflib import Graph, Literal, Namespace
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
from rdflib.namespace import XSD
 
EX = Namespace('http://example.org/')
 
g = Graph()
g = Graph()
ex = Namespace("http://example.org/")
g.bind('ex', EX)
 
from rdflib.collection import Collection


g.add((ex.Cade, ex.age, Literal(27, datatype=XSD.integer)))
g = Graph()
g.add((ex.Cade, ex.gpa, Literal(3.3, datatype=XSD.float)))
g.bind('ex', EX)
g.add((ex.Cade, FOAF.name, Literal("Cade Tracey", datatype=XSD.string)))
g.add((ex.Cade, ex.birthday, Literal("2006-01-01", datatype=XSD.date)))
</syntaxhighlight>


donaldTrumpSpouses = BNode()
Collection(g, donaldTrumpSpouses, [
    EX.IvanaTrump, EX.MarlaMaples, EX.MelaniaTrump
])
g.add((EX.Donald_Trump, SCHEMA.spouse, donaldTrumpSpouses))


===Writing and reading graphs/files===
print(g.serialize(format='turtle'))
g.serialize(destination='s02_Donald_Trump_spouses_list.ttl', format='turtle')


<syntaxhighlight>
print(g.serialize(format='turtle'))
  # Writing the graph to a file on your system. Possible formats = turtle, n3, xml, nt.
</syntaxhighlight>
g.serialize(destination="triples.txt", format="turtle")


  # Parsing a local file
=Example lab solutions=
parsed_graph = g.parse(location="triples.txt", format="turtle")


  # Parsing a remote endpoint like Dbpedia
==Getting started (Lab 1)==
dbpedia_graph = g.parse("http://dbpedia.org/resource/Pluto")
</syntaxhighlight>


===Graph Binding===
<syntaxhighlight>
<syntaxhighlight>
#Graph Binding is useful for at least two reasons:
 
#(1) We no longer need to specify prefixes with SPARQL queries if they are already binded to the graph.
from rdflib import Graph, Namespace
#(2) When serializing the graph, the serialization will show the correct expected prefix
# instead of default namespace names ns1, ns2 etc.


g = Graph()
g = Graph()


ex = Namespace("http://example.org/")
ex = Namespace('http://example.org/')
dbp = Namespace("http://dbpedia.org/resource/")
schema = Namespace("https://schema.org/")


g.bind("ex", ex)
g.bind("ex", ex)
g.bind("dbp", dbp)
g.bind("schema", schema)
</syntaxhighlight>
===Collection Example===
<syntaxhighlight>
from rdflib import Graph, Namespace
from rdflib.collection import Collection


#The Mueller Investigation was lead by Robert Mueller.
g.add((ex.Mueller_Investigation, ex.leadBy, ex.Robert_Muller))


# Sometimes we want to add many objects or subjects for the same predicate at once.  
#It involved Paul Manafort, Rick Gates, George Papadopoulos, Michael Flynn, and Roger Stone.
# In these cases we can use Collection() to save some time.
g.add((ex.Mueller_Investigation, ex.involved, ex.Paul_Manafort))
# In this case I want to add all countries that Emma has visited at once.
g.add((ex.Mueller_Investigation, ex.involved, ex.Rick_Gates))
g.add((ex.Mueller_Investigation, ex.involved, ex.George_Papadopoulos))
g.add((ex.Mueller_Investigation, ex.involved, ex.Michael_Flynn))
g.add((ex.Mueller_Investigation, ex.involved, ex.Michael_Cohen))
g.add((ex.Mueller_Investigation, ex.involved, ex.Roger_Stone))


b = BNode()
# --- Paul Manafort ---
g.add((ex.Emma, ex.visit, b))
#Paul Manafort was business partner of Rick Gates.
Collection(g, b,
g.add((ex.Paul_Manafort, ex.businessManager, ex.Rick_Gates))
    [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])
# He was campaign chairman for Trump
g.add((ex.Paul_Manafort, ex.campaignChairman, ex.Donald_Trump))


# OR
# He was charged with money laundering, tax evasion, and foreign lobbying.
g.add((ex.Paul_Manafort, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.Paul_Manafort, ex.chargedWith, ex.TaxEvasion))
g.add((ex.Paul_Manafort, ex.chargedWith, ex.ForeignLobbying))


g.add((ex.Emma, ex.visit, ex.EmmaVisits))
# He was convicted for bank and tax fraud.
Collection(g, ex.EmmaVisits,
g.add((ex.Paul_Manafort, ex.convictedFor, ex.BankFraud))
    [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])
g.add((ex.Paul_Manafort, ex.convictedFor, ex.TaxFraud))


</syntaxhighlight>
# He pleaded guilty to conspiracy.
g.add((ex.Paul_Manafort, ex.pleadGuiltyTo, ex.Conspiracy))
# He was sentenced to prison.
g.add((ex.Paul_Manafort, ex.sentencedTo, ex.Prison))
# He negotiated a plea agreement.
g.add((ex.Paul_Manafort, ex.negoiated, ex.PleaBargain))


==SPARQL==
# --- Rick Gates ---
#Rick Gates was charged with money laundering, tax evasion and foreign lobbying.
g.add((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
g.add((ex.Rick_Gates, ex.chargedWith, ex.ForeignLobbying))


Also see the [[SPARQL Examples]] page!
#He pleaded guilty to conspiracy and lying to FBI.
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, ex.Conspiracy))
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, ex.LyingToFBI))


===Querying a local ("in memory") graph===
#Use the serialize method to write out the model in different formats on screen
print(g.serialize(format="ttl"))
# g.serialize("lab1.ttl", format="ttl") #or to file


Example contents of the file family.ttl:
#Loop through the triples in the model to print out all triples that have pleading guilty as predicate
@prefix rex: <http://example.org/royal#> .
for subject, object in g[ : ex.pleadGuiltyTo : ]:
@prefix fam: <http://example.org/family#> .
    print(subject, ex.pleadGuiltyTo, object)
rex:IngridAlexandra fam:hasParent rex:HaakonMagnus .
rex:SverreMagnus fam:hasParent rex:HaakonMagnus .
rex:HaakonMagnus fam:hasParent rex:Harald .
rex:MarthaLouise fam:hasParent rex:Harald .
rex:HaakonMagnus fam:hasSister rex:MarthaLouise .


import rdflib
# Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week
g = rdflib.Graph()
g.parse("family.ttl", format='ttl')
qres = g.query("""
PREFIX fam: <http://example.org/family#>
    SELECT ?child ?sister WHERE {
        ?child fam:hasParent ?parent .
        ?parent fam:hasSister ?sister .
    }""")
for row in qres:
    print("%s has aunt %s" % row)


With a prepared query, you can write the query once, and then bind some of the variables each time you use it:
#Write a method (function) that submits your model for rendering and saves the returned image to file.
import rdflib
import requests
import shutil
g = rdflib.Graph()
g.parse("family.ttl", format='ttl')
q = rdflib.plugins.sparql.prepareQuery(
        """SELECT ?child ?sister WHERE {
                  ?child fam:hasParent ?parent .
                  ?parent fam:hasSister ?sister .
        }""",
        initNs = { "fam": "http://example.org/family#"})
sm = rdflib.URIRef("http://example.org/royal#SverreMagnus")
for row in g.query(q, initBindings={'child': sm}):
        print(row)


===Select all contents of lists (rdfllib.Collection)===
def graphToImage(graph):
<syntaxhighlight>
    data = {"rdf":graph, "from":"ttl", "to":"png"}
    link = "http://www.ldf.fi/service/rdf-grapher"
    response = requests.get(link, params = data, stream=True)
    # print(response.content)
    print(response.raw)
    with open("lab1.png", "wb") as fil:
        shutil.copyfileobj(response.raw, fil)


# rdflib.Collection has a different interntal structure so it requires a slightly more advance query. Here I am selecting all places that Emma has visited.
graph = g.serialize(format="ttl")
 
graphToImage(graph)
PREFIX ex:  <http://example.org/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
 
SELECT ?visit
WHERE {
  ex:Emma ex:visit/rdf:rest*/rdf:first ?visit
}
</syntaxhighlight>
</syntaxhighlight>


==RDF programming with RDFlib (Lab 2)==


===Using parameters/variables in rdflib queries===
<syntaxhighlight>


<syntaxhighlight>
from rdflib import Graph, URIRef, Namespace, Literal, XSD, BNode
from rdflib import Graph, Namespace, URIRef
from rdflib.collection import Collection
from rdflib.plugins.sparql import prepareQuery


g = Graph()
g = Graph()
ex = Namespace("http://example.org/")
g.parse("lab1.ttl", format="ttl") #Retrives the triples from lab 1
g.bind("ex", ex)


g.add((ex.Cade, ex.livesIn, ex.France))
ex = Namespace('http://example.org/')
g.add((ex.Anne, ex.livesIn, ex.Norway))
g.add((ex.Sofie, ex.livesIn, ex.Sweden))
g.add((ex.Per, ex.livesIn, ex.Norway))
g.add((ex.John, ex.livesIn, ex.USA))


# --- Michael Cohen ---
#Michael Cohen was Donald Trump's attorney.
g.add((ex.Michael_Cohen, ex.attorneyTo, ex.Donald_Trump))
#He pleaded guilty to lying to the FBI.
g.add((ex.Michael_Cohen, ex.pleadGuiltyTo, ex.LyingToCongress))


def find_people_from_country(country):
# --- Michael Flynn ---
        country = URIRef(ex + country)
#Michael Flynn was adviser to Trump.
        q = prepareQuery(
g.add((ex.Michael_Flynn, ex.adviserTo, ex.Donald_Trump))
        """
#He pleaded guilty to lying to the FBI.
        PREFIX ex: <http://example.org/>
g.add((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI))
        SELECT ?person WHERE {
# He negotiated a plea agreement.
        ?person ex:livesIn ?country.
g.add((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain))  
        }
        """)


        capital_result = g.query(q, initBindings={'country': country})
#How can you modify your knowledge graph to account for the different lying?
#Remove these to not have duplicates
g.remove((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI))
g.remove((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.LyingToFBI))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.Conspiracy))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.ForeignLobbying))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
g.remove((ex.Michael_Cohen, ex.pleadGuiltyTo, ex.LyingToCongress))


        for row in capital_result:
# --- Michael Flynn ---
            print(row)
FlynnLying = BNode()
g.add((FlynnLying, ex.crime, ex.LyingToFBI))
g.add((FlynnLying, ex.pleadGulityOn, Literal("2017-12-1", datatype=XSD.date)))
g.add((FlynnLying, ex.liedAbout, Literal("His communications with a former Russian ambassador during the presidential transition", datatype=XSD.string)))
g.add((FlynnLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Flynn, ex.pleadGuiltyTo, FlynnLying))


find_people_from_country("Norway")
# --- Rick Gates ---
</syntaxhighlight>
GatesLying = BNode()
Crimes = BNode()
Charged = BNode()
Collection(g, Crimes, [ex.LyingToFBI, ex.Conspiracy])
Collection(g, Charged, [ex.ForeignLobbying, ex.MoneyLaundering, ex.TaxEvasion])
g.add((GatesLying, ex.crime, Crimes))
g.add((GatesLying, ex.chargedWith, Charged))
g.add((GatesLying, ex.pleadGulityOn, Literal("2018-02-23", datatype=XSD.date)))
g.add((GatesLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, GatesLying))


===SELECTING data from Blazegraph via Python===
# --- Michael Cohen ---
<syntaxhighlight>
CohenLying = BNode()
g.add((CohenLying, ex.crime, ex.LyingToCongress))
g.add((CohenLying, ex.liedAbout, ex.TrumpRealEstateDeal))
g.add((CohenLying, ex.prosecutorsAlleged, Literal("In an August 2017 letter Cohen sent to congressional committees investigating Russian election interference, he falsely stated that the project ended in January 2016", datatype=XSD.string)))
g.add((CohenLying, ex.mullerInvestigationAlleged, Literal("Cohen falsely stated that he had never agreed to travel to Russia for the real estate deal and that he did not recall any contact with the Russian government about the project", datatype=XSD.string)))
g.add((CohenLying, ex.pleadGulityOn, Literal("2018-11-29", datatype=XSD.date)))
g.add((CohenLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Cohen, ex.pleadGuiltyTo, CohenLying))


from SPARQLWrapper import SPARQLWrapper, JSON
print(g.serialize(format="ttl"))


# This creates a server connection to the same URL that contains the graphic interface for Blazegraph.  
#Save (serialize) your graph to a Turtle file.
# You also need to add "sparql" to end of the URL like below.
# g.serialize("lab2.ttl", format="ttl")


sparql = SPARQLWrapper("http://localhost:9999/blazegraph/sparql")
#Add a few triples to the Turtle file with more information about Donald Trump.
'''
ex:Donald_Trump ex:address [ ex:city ex:Palm_Beach ;
            ex:country ex:United_States ;
            ex:postalCode 33480 ;
            ex:residence ex:Mar_a_Lago ;
            ex:state ex:Florida ;
            ex:streetName "1100 S Ocean Blvd"^^xsd:string ] ;
    ex:previousAddress [ ex:city ex:Washington_DC ;
            ex:country ex:United_States ;
            ex:phoneNumber "1 202 456 1414"^^xsd:integer ;
            ex:postalCode "20500"^^xsd:integer ;
            ex:residence ex:The_White_House ;
            ex:streetName "1600 Pennsylvania Ave."^^xsd:string ];
    ex:marriedTo ex:Melania_Trump;
    ex:fatherTo (ex:Ivanka_Trump ex:Donald_Trump_Jr ex: ex:Tiffany_Trump ex:Eric_Trump ex:Barron_Trump).
'''


# SELECT all triples in the database.
#Read (parse) the Turtle file back into a Python program, and check that the new triples are there
def serialize_Graph():
    newGraph = Graph()
    newGraph.parse("lab2.ttl")
    print(newGraph.serialize())


sparql.setQuery("""
# serialize_Graph() #Don't need this to run until after adding the triples above to the ttl file
    SELECT DISTINCT ?p WHERE {
    ?s ?p ?o.
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()


for result in results["results"]["bindings"]:
#Write a method (function) that starts with Donald Trump prints out a graph depth-first to show how the other graph nodes are connected to him
    print(result["p"]["value"])
visited_nodes = set()


# SELECT all interests of Cade
def create_Tree(model, nodes):
    #Traverse the model breadth-first to create the tree.
    global visited_nodes
    tree = Graph()
    children = set()
    visited_nodes |= set(nodes)
    for s, p, o in model:
        if s in nodes and o not in visited_nodes:
            tree.add((s, p, o))
            visited_nodes.add(o)
            children.add(o)
        if o in nodes and s not in visited_nodes:
            invp = URIRef(f'{p}_inv') #_inv represents inverse of
            tree.add((o, invp, s))
            visited_nodes.add(s)
            children.add(s)
    if len(children) > 0:
        children_tree = create_Tree(model, children)
        for triple in children_tree:
            tree.add(triple)
    return tree


sparql.setQuery("""
def print_Tree(tree, root, indent=0):
     PREFIX ex: <http://example.org/>
     #Print the tree depth-first.
     SELECT DISTINCT ?interest WHERE {
     print(str(root))
     ex:Cade ex:interest ?interest.
     for s, p, o in tree:
    }
        if s==root:
""")
            print('    '*indent + '  ' + str(p), end=' ')
sparql.setReturnFormat(JSON)
            print_Tree(tree, o, indent+1)
results = sparql.query().convert()
   
tree = create_Tree(g, [ex.Donald_Trump])
print_Tree(tree, ex.Donald_Trump)


for result in results["results"]["bindings"]:
    print(result["interest"]["value"])
</syntaxhighlight>
</syntaxhighlight>


===Updating data from Blazegraph via Python===
==SPARQL Programming (Lab 4)==
'''NOTE: These tasks were performed on the old dataset, with the new dataset, some of these answers would be different.'''
<syntaxhighlight>
<syntaxhighlight>
from SPARQLWrapper import SPARQLWrapper, POST, DIGEST


namespace = "kb"
from rdflib import Graph, Namespace, RDF, FOAF
sparql = SPARQLWrapper("http://localhost:9999/blazegraph/namespace/"+ namespace + "/sparql")
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE


sparql.setMethod(POST)
g = Graph()
sparql.setQuery("""
g.parse("Russia_investigation_kg.ttl")
    PREFIX ex: <http://example.org/>
    INSERT DATA{
    ex:Cade ex:interest ex:Mathematics.
    }
""")


results = sparql.query()
# ----- RDFLIB -----
print(results.response.read())
ex = Namespace('http://example.org#')


NS = {
    '': ex,
    'rdf': RDF,
    'foaf': FOAF,
}


</syntaxhighlight>
# Print out a list of all the predicates used in your graph.
===Retrieving data from Wikidata with SparqlWrapper===
task1 = g.query("""
<syntaxhighlight>
SELECT DISTINCT ?p WHERE{
from SPARQLWrapper import SPARQLWrapper, JSON
    ?s ?p ?o .
}
""", initNs=NS)


sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
print(list(task1))
# In the query I want to select all the Vitamins in wikidata.


sparql.setQuery("""
# Print out a sorted list of all the presidents represented in your graph.
    SELECT ?nutrient ?nutrientLabel WHERE
task2 = g.query("""
{
SELECT DISTINCT ?president WHERE{
  ?nutrient wdt:P279 wd:Q34956.
    ?s :president ?president .
  SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
}
""")
ORDER BY ?president
""", initNs=NS)
 
print(list(task2))


sparql.setReturnFormat(JSON)
# Create dictionary (Python dict) with all the represented presidents as keys. For each key, the value is a list of names of people indicted under that president.
results = sparql.query().convert()
task3_dic = {}


for result in results["results"]["bindings"]:
task3 = g.query("""
    print(result["nutrient"]["value"], "  ", result["nutrientLabel"]["value"])
SELECT ?president ?person WHERE{
</syntaxhighlight>
    ?s :president ?president;
      :name ?person;
      :outcome :indictment.
}
""", initNs=NS)


for president, person in task3:
    if president not in task3_dic:
        task3_dic[president] = [person]
    else:
        task3_dic[president].append(person)


More examples can be found in the example section on the official query service here: https://query.wikidata.org/.
print(task3_dic)


===Download from BlazeGraph===
# Use an ASK query to investigate whether Donald Trump has pardoned more than 5 people.


<syntaxhighlight>
# This task is a lot trickier than it needs to be. As far as I'm aware RDFLib has no HAVING support, so a query like this:
"""
task4 = g.query("""
Dumps a database to a local RDF file.
ASK {
You need to install the SPARQLWrapper package first...
  SELECT (COUNT(?s) as ?count) WHERE{
"""
    ?s :pardoned :true;
    :president :Bill_Clinton  .
    }
    HAVING (?count > 5)
}
""", initNs=NS)


import datetime
print(task4.askAnswer)
from SPARQLWrapper import SPARQLWrapper, RDFXML


# your namespace, the default is 'kb'
# Which works fine in Blazegraph and is a valid SPARQL query will always provide false in RDFLib, cause it uses HAVING. Instead you have to use a nested SELECT query like below, where you use FILTER instead of HAVING. Donald Trump has no pardons, so I have instead chosen Bill Clinton (which has 13 pardons) to check if the query works.
ns = 'kb'


# the SPARQL endpoint
task4 = g.query("""
endpoint = 'http://info216.i2s.uib.no/bigdata/namespace/' + ns + '/sparql'
    ASK{
        SELECT ?count WHERE{{
          SELECT (COUNT(?s) as ?count) WHERE{
            ?s :pardoned :true;
                  :president :Bill_Clinton  .
                }}
        FILTER (?count > 5)
        }
    }
""", initNs=NS)


# - the endpoint just moved, the old one was:
print(task4.askAnswer)
# endpoint = 'http://i2s.uib.no:8888/bigdata/namespace/' + ns + '/sparql'


# create wrapper
# Use a DESCRIBE query to create a new graph with information about Donald Trump. Print out the graph in Turtle format.
wrapper = SPARQLWrapper(endpoint)


# prepare the SPARQL update
# By all accounts, it seems DESCRIBE queries are yet to be implemented in RDFLib, but they are attempting to implement it: https://github.com/RDFLib/rdflib/pull/2221 (Issue and proposed solution raised) & https://github.com/RDFLib/rdflib/commit/2325b4a81724c1ccee3a131067db4fbf9b4e2629 (Solution committed to RDFLib). This solution does not work. However, this proposed solution should work if DESCRIBE is implemented in RDFLib
wrapper.setQuery('CONSTRUCT { ?s ?p ?o } WHERE { ?s ?p ?o }')
wrapper.setReturnFormat(RDFXML)


# execute the SPARQL update and convert the result to an rdflib.Graph
# task5 = g.query("""
graph = wrapper.query().convert()
# DESCRIBE :Donald_Trump
# """, initNs=NS)


# the destination file, with code to make it timestamped
# print(task5.serialize())
destfile = 'rdf_dumps/slr-kg4news-' + datetime.datetime.now().strftime('%Y%m%d-%H%M') + '.rdf'


# serialize the result to file
# ----- SPARQLWrapper -----
graph.serialize(destination=destfile, format='ttl')


# report and quit
namespace = "kb" #Default namespace
print('Wrote %u triples to file %s .' %
sparql = SPARQLWrapper("http://localhost:9999/blazegraph/namespace/"+ namespace + "/sparql") #Replace localhost:9999 with your URL
      (len(res), destfile))
</syntaxhighlight>


===Query Dbpedia with SparqlWrapper===
# The current dates are URIs, we would want to change them to Literals with datatype "date" for task 1 & 2
update_str = """
    PREFIX ns1: <http://example.org#>


<syntaxhighlight>
    DELETE {
from SPARQLWrapper import SPARQLWrapper, JSON
        ?s ns1:cp_date ?cp;
            ns1:investigation_end ?end;
            ns1:investigation_start ?start.
    }
    INSERT{
        ?s ns1:cp_date ?cpDate;
            ns1:investigation_end ?endDate;
            ns1:investigation_start ?startDate.
    }
    WHERE{
        ?s ns1:cp_date ?cp . #Date conviction was recieved
        BIND (replace(str(?cp), str(ns1:), "")  AS ?cpRemoved)
        BIND (STRDT(STR(?cpRemoved), xsd:date) AS ?cpDate)
       
        ?s ns1:investigation_end ?end . #Investigation End
        BIND (replace(str(?end), str(ns1:), "")  AS ?endRemoved)
        BIND (STRDT(STR(?endRemoved), xsd:date) AS ?endDate)
       
        ?s ns1:investigation_start ?start . #Investigation Start
        BIND (replace(str(?start), str(ns1:), "")  AS ?startRemoved)
        BIND (STRDT(STR(?startRemoved), xsd:date) AS ?startDate)
}"""
 
sparql.setQuery(update_str)
sparql.setMethod(POST)
sparql.query()


sparql = SPARQLWrapper("http://dbpedia.org/sparql")
# Ask whether there was an ongoing indictment on the date 1990-01-01.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    ASK {
        SELECT ?end ?start
        WHERE{
            ?s ns1:investigation_end ?end;
              ns1:investigation_start ?start;
              ns1:outcome ns1:indictment.
            FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date)
    }
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(f"Are there any investigation on the 1990-01-01: {results['boolean']}")


# List ongoing indictments on that date 1990-01-01.
sparql.setQuery("""
sparql.setQuery("""
     PREFIX dbr: <http://dbpedia.org/resource/>
     PREFIX ns1: <http://example.org#>
    PREFIX dbo: <http://dbpedia.org/ontology/>
     SELECT ?s
    PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
     WHERE{
     SELECT ?comment
        ?s ns1:investigation_end ?end;
     WHERE {
          ns1:investigation_start ?start;
    dbr:Barack_Obama rdfs:comment ?comment.
          ns1:outcome ns1:indictment.
    FILTER (langMatches(lang(?comment),"en"))
        FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date)  
     }
     }
""")
""")
Line 435: Line 527:
results = sparql.query().convert()
results = sparql.query().convert()


print("The ongoing investigations on the 1990-01-01 are:")
for result in results["results"]["bindings"]:
for result in results["results"]["bindings"]:
     print(result["comment"]["value"])
     print(result["s"]["value"])
</syntaxhighlight>


==Lifting CSV to RDF==
# Describe investigation number 100 (muellerkg:investigation_100).
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    DESCRIBE ns1:investigation_100
""")


<syntaxhighlight>
sparql.setReturnFormat(TURTLE)
from rdflib import Graph, Literal, Namespace, URIRef
results = sparql.query().convert()
from rdflib.namespace import RDF, FOAF, RDFS, OWL
import pandas as pd


g = Graph()
print(results.serialize())
ex = Namespace("http://example.org/")
g.bind("ex", ex)


# Load the CSV data as a pandas Dataframe.
# Print out a list of all the types used in your graph.
csv_data = pd.read_csv("task1.csv")
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>


# Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
    SELECT DISTINCT ?types
csv_data = csv_data.replace(to_replace=" ", value="_", regex=True)
    WHERE{
        ?s rdf:type ?types .  
    }
""")


# Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
sparql.setReturnFormat(JSON)
csv_data = csv_data.fillna("unknown")
results = sparql.query().convert()


# Loop through the CSV data, and then make RDF triples.
rdf_Types = []
for index, row in csv_data.iterrows():
    # The names of the people act as subjects.
    subject = row['Name']
    # Create triples: e.g. "Cade_Tracey - age - 27"
    g.add((URIRef(ex + subject), URIRef(ex + "age"), Literal(row["Age"])))
    g.add((URIRef(ex + subject), URIRef(ex + "married"), URIRef(ex + row["Spouse"])))
    g.add((URIRef(ex + subject), URIRef(ex + "country"), URIRef(ex + row["Country"])))


    # If We want can add additional RDF/RDFS/OWL information e.g
for result in results["results"]["bindings"]:
     g.add((URIRef(ex + subject), RDF.type, FOAF.Person))
     rdf_Types.append(result["types"]["value"])


# I remove triples that I marked as unknown earlier.
print(rdf_Types)
g.remove((None, None, URIRef("http://example.org/unknown")))


# Clean printing of the graph.
# Update the graph to that every resource that is an object in a muellerkg:investigation triple has the rdf:type muellerkg:Investigation.
print(g.serialize(format="turtle").decode())
update_str = """
</syntaxhighlight>
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>


===CSV file for above example===
    INSERT{
        ?invest rdf:type ns1:Investigation .
    }
    WHERE{
        ?s ns1:investigation ?invest .
}"""


<syntaxhighlight>
sparql.setQuery(update_str)
"Name","Age","Spouse","Country"
sparql.setMethod(POST)
"Cade Tracey","26","Mary Jackson","US"
sparql.query()
"Bob Johnson","21","","Canada"
"Mary Jackson","25","","France"
"Phil Philips","32","Catherine Smith","Japan"
</syntaxhighlight>


#To Test
sparql.setQuery("""
    prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX ns1: <http://example.org#>


===Coding Tasks Lab 6===
    ASK{
<syntaxhighlight>
        ns1:watergate rdf:type ns1:Investigation.
import pandas as pd
    }
""")


sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(results['boolean'])


from rdflib import Graph, Namespace, URIRef, Literal, BNode
# Update the graph to that every resource that is an object in a muellerkg:person triple has the rdf:type muellerkg:IndictedPerson.
from rdflib.namespace import RDF, XSD
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>


    INSERT{
        ?person rdf:type ns1:IndictedPerson .
    }
    WHERE{
        ?s ns1:person ?person .
}"""


ex = Namespace("http://example.org/")
sparql.setQuery(update_str)
sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
sparql.setMethod(POST)
sparql.query()


g = Graph()
#To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson
g.bind("ex", ex)
g.bind("sem", sem)


# Update the graph so all the investigation nodes (such as muellerkg:watergate) become the subject in a dc:title triple with the corresponding string (watergate) as the literal.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX dc: <http://purl.org/dc/elements/1.1/>


# Removing unwanted characters
    INSERT{
df = pd.read_csv('russia-investigation.csv')
        ?invest dc:title ?investString.
# Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
    }
df = df.replace(to_replace=" ", value="_", regex=True)
    WHERE{
# This may seem odd, but in the data set we have a name like this:("Scooter"). So we have to remove quotation marks
        ?s ns1:investigation ?invest .
df = df.replace(to_replace=f'"', value="", regex=True)
        BIND (replace(str(?invest), str(ns1:), "")  AS ?investString)
# # Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
}"""
df = df.fillna("unknown")


# Loop through the CSV data, and then make RDF triples.
sparql.setQuery(update_str)
for index, row in df.iterrows():
sparql.setMethod(POST)
    name = row['investigation']
sparql.query()
    investigation = URIRef(ex + name)
    g.add((investigation, RDF.type, sem.Event))
    investigation_start = row["investigation-start"]
    g.add((investigation, sem.hasBeginTimeStamp, Literal(
        investigation_start, datatype=XSD.datetime)))
    investigation_end = row["investigation-end"]
    g.add((investigation, sem.hasEndTimeStamp, Literal(
        investigation_end, datatype=XSD.datetime)))
    investigation_end = row["investigation-days"]
    g.add((investigation, sem.hasXSDDuration, Literal(
        investigation_end, datatype=XSD.Days)))
    person = row["name"]
    person = URIRef(ex + person)
    g.add((investigation, sem.Actor, person))
    result = row['type']
    g.add((investigation, sem.hasSubEvent, Literal(result, datatype=XSD.string)))
    overturned = row["overturned"]
    g.add((investigation, ex.overtuned, Literal(overturned, datatype=XSD.boolean)))
    pardoned = row["pardoned"]
    g.add((investigation, ex.pardon, Literal(pardoned, datatype=XSD.boolean)))


g.serialize("output.ttl", format="ttl")
#Same test as above, replace it with e.g. ns1:watergate dc:title "watergate"
print(g.serialize(format="turtle"))


</syntaxhighlight>
# Print out a sorted list of all the indicted persons represented in your graph.
<!--
sparql.setQuery("""
==Lifting XML to RDF==
    PREFIX ns1: <http://example.org#>
<syntaxhighlight>
    PREFIX foaf: <http://xmlns.com/foaf/0.1/>
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import RDF, XSD, RDFS
import xml.etree.ElementTree as ET


g = Graph()
    SELECT ?name
ex = Namespace("http://example.org/TV/")
    WHERE{
prov = Namespace("http://www.w3.org/ns/prov#")
    ?s  ns1:person ?name;
g.bind("ex", ex)
        ns1:outcome ns1:indictment.
g.bind("prov", prov)
    }
    ORDER BY ?name
""")


tree = ET.parse("tv_shows.xml")
sparql.setReturnFormat(JSON)
root = tree.getroot()
results = sparql.query().convert()


for tv_show in root.findall('tv_show'):
names = []
    show_id = tv_show.attrib["id"]
    title = tv_show.find("title").text


    g.add((URIRef(ex + show_id), ex.title, Literal(title, datatype=XSD.string)))
for result in results["results"]["bindings"]:
     g.add((URIRef(ex + show_id), RDF.type, ex.TV_Show))
     names.append(result["name"]["value"])


    for actor in tv_show.findall("actor"):
print(names)
        first_name = actor.find("firstname").text
        last_name = actor.find("lastname").text
        full_name = first_name + "_" + last_name
       
        g.add((URIRef(ex + show_id), ex.stars, URIRef(ex + full_name)))
        g.add((URIRef(ex + full_name), ex.starsIn, URIRef(title)))
        g.add((URIRef(ex + full_name), RDF.type, ex.Actor))


print(g.serialize(format="turtle").decode())
# Print out the minimum, average and maximum indictment days for all the indictments in the graph.
</syntaxhighlight>
sparql.setQuery("""
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>


    SELECT (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
        ?s  ns1:indictment_days ?days;
            ns1:outcome ns1:indictment.
   
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
}
""")


==RDFS==
sparql.setReturnFormat(JSON)
results = sparql.query().convert()


===RDFS-plus (OWL) Properties===
for result in results["results"]["bindings"]:
<syntaxhighlight>
    print(f'The longest an investigation lasted was: {result["max"]["value"]}')
g.add((ex.married, RDF.type, OWL.SymmetricProperty))
    print(f'The shortest an investigation lasted was: {result["min"]["value"]}')
g.add((ex.married, RDF.type, OWL.IrreflexiveProperty))
    print(f'The average investigation lasted: {result["avg"]["value"]}')
g.add((ex.livesWith, RDF.type, OWL.ReflexiveProperty))
g.add((ex.livesWith, RDF.type, OWL.SymmetricProperty))
g.add((ex.sibling, RDF.type, OWL.TransitiveProperty))
g.add((ex.sibling, RDF.type, OWL.SymmetricProperty))
g.add((ex.sibling, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.hasFather, RDF.type, OWL.FunctionalProperty))
g.add((ex.hasFather, RDF.type, OWL.AsymmetricProperty))
g.add((ex.hasFather, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.fatherOf, RDF.type, OWL.AsymmetricProperty))
g.add((ex.fatherOf, RDF.type, OWL.IrreflexiveProperty))


# Sometimes there is no definite answer, and it comes down to how we want to model our properties
# Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation.
# e.g is livesWith a transitive property? Usually yes, but we can also want to specify that a child lives with both of her divorced parents.
sparql.setQuery("""
# which means that: (mother livesWith child % child livesWith father) != mother livesWith father. Which makes it non-transitive.
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
</syntaxhighlight>
    PREFIX ns1: <http://example.org#>


===RDFS inference with RDFLib===
    SELECT ?investigation (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
You can use the OWL-RL package to add inference capabilities to RDFLib. It can be installed using the pip install command:
    ?s  ns1:indictment_days ?days;
<syntaxhighlight>
        ns1:outcome ns1:indictment;
pip install owlrl
        ns1:investigation ?investigation.
</syntaxhighlight>
   
Or download it from [https://github.com/RDFLib/OWL-RL GitHub] and copy the ''owlrl'' subfolder into your project folder next to your Python files.
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
    }
    GROUP BY ?investigation
""")


[https://owl-rl.readthedocs.io/en/latest/owlrl.html OWL-RL documentation.]
sparql.setReturnFormat(JSON)
results = sparql.query().convert()


Example program to get you started. In this example we are creating the graph using sparql.update, but it is also possible to parse the data from a file.
for result in results["results"]["bindings"]:
<syntaxhighlight>
    print(f'{result["investigation"]["value"]} - min: {result["min"]["value"]}, max: {result["max"]["value"]}, avg: {result["avg"]["value"]}')
import rdflib.plugins.sparql.update
import owlrl.RDFSClosure


g = rdflib.Graph()
</syntaxhighlight>


ex = rdflib.Namespace('http://example.org#')
==CSV To RDF (Lab 5)==
g.bind('', ex)
<syntaxhighlight>
 
g.update("""
PREFIX ex: <http://example.org#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
INSERT DATA {
    ex:Socrates rdf:type ex:Man .
    ex:Man rdfs:subClassOf ex:Mortal .
}""")
 
rdfs = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
# RDF_Semantics parameters:
# - graph (rdflib.Graph) – The RDF graph to be extended.
# - axioms (bool) – Whether (non-datatype) axiomatic triples should be added or not.
# - daxioms (bool) – Whether datatype axiomatic triples should be added or not.
# - rdfs (bool) – Whether RDFS inference is also done (used in subclassed only).
# For now, you will in most cases use all False in RDFS_Semtantics.


# Generates the closure of the graph - generates the new entailed triples, but does not add them to the graph.
#Imports
rdfs.closure()
import re
# Adds the new triples to the graph and empties the RDFS triple-container.
from pandas import *
rdfs.flush_stored_triples()
from numpy import nan
from rdflib import Graph, Namespace, URIRef, Literal, RDF, XSD, FOAF
from spotlight import SpotlightException, annotate


# Ask-query to check whether a new triple has been generated from the entailment.
SERVER = "https://api.dbpedia-spotlight.org/en/annotate"
b = g.query("""
# Test around with the confidence, and see how many names changes depending on the confidence. However, be aware that anything lower than this (0.83) it will replace James W. McCord and other names that includes James with LeBron James
PREFIX ex: <http://example.org#>
CONFIDENCE = 0.83
ASK {
    ex:Socrates rdf:type ex:Mortal .
}
""")
print('Result: ' + bool(b))
</syntaxhighlight>


===Language tagged RDFS labels===  
def annotate_entity(entity, filters={'types': 'DBpedia:Person'}):
<syntaxhighlight>
annotations = []
from rdflib import Graph, Namespace, Literal
try:
from rdflib.namespace import RDFS
annotations = annotate(address=SERVER, text=entity, confidence=CONFIDENCE, filters=filters)
except SpotlightException as e:
print(e)
return annotations


g = Graph()
g = Graph()
ex = Namespace("http://example.org/")
ex = Namespace("http://example.org/")
g.bind("ex", ex)


g.add((ex.France, RDFS.label, Literal("Frankrike", lang="no")))
#Pandas' read_csv function to load russia-investigation.csv
g.add((ex.France, RDFS.label, Literal("France", lang="en")))
df = read_csv("russia-investigation.csv")
g.add((ex.France, RDFS.label, Literal("Francia", lang="es")))
#Replaces all instances of nan to None type with numpy's nan
df = df.replace(nan, None)


#Function that prepares the values to be added to the graph as a URI or Literal
def prepareValue(row):
if row == None: #none type
value = Literal(row)
elif isinstance(row, str) and re.match(r'\d{4}-\d{2}-\d{2}', row): #date
value = Literal(row, datatype=XSD.date)
elif isinstance(row, bool): #boolean value (true / false)
value = Literal(row, datatype=XSD.boolean)
elif isinstance(row, int): #integer
value = Literal(row, datatype=XSD.integer)
elif isinstance(row, str): #string
value = URIRef(ex + row.replace('"', '').replace(" ", "_").replace(",","").replace("-", "_"))
elif isinstance(row, float): #float
value = Literal(row, datatype=XSD.float)


</syntaxhighlight>
return value


==OWL==  
#Convert the non-semantic CSV dataset into a semantic RDF
===Basic inference with RDFLib===  
def csv_to_rdf(df):
for index, row in df.iterrows():
id = URIRef(ex + "Investigation_" + str(index))
investigation = prepareValue(row["investigation"])
investigation_start = prepareValue(row["investigation-start"])
investigation_end = prepareValue(row["investigation-end"])
investigation_days = prepareValue(row["investigation-days"])
indictment_days = prepareValue(row["indictment-days "])
cp_date = prepareValue(row["cp-date"])
cp_days = prepareValue(row["cp-days"])
overturned = prepareValue(row["overturned"])
pardoned = prepareValue(row["pardoned"])
american = prepareValue(row["american"])
outcome = prepareValue(row["type"])
name_ex = prepareValue(row["name"])
president_ex = prepareValue(row["president"])


You can use the OWL-RL package again as for Lecture 5.
#Spotlight Search
name = annotate_entity(str(row['name']))
                # Removing the period as some presidents won't be found with it
president = annotate_entity(str(row['president']).replace(".", ""))
#Adds the tripples to the graph
g.add((id, RDF.type, ex.Investigation))
g.add((id, ex.investigation, investigation))
g.add((id, ex.investigation_start, investigation_start))
g.add((id, ex.investigation_end, investigation_end))
g.add((id, ex.investigation_days, investigation_days))
g.add((id, ex.indictment_days, indictment_days))
g.add((id, ex.cp_date, cp_date))
g.add((id, ex.cp_days, cp_days))
g.add((id, ex.overturned, overturned))
g.add((id, ex.pardoned, pardoned))
g.add((id, ex.american, american))
g.add((id, ex.outcome, outcome))


Instead of:  
#Spotlight search
<syntaxhighlight>
#Name
# The next three lines add inferred triples to g.
try:
rdfs = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
g.add((id, ex.person, URIRef(name[0]["URI"])))
rdfs.closure()
except:
rdfs.flush_stored_triples()
g.add((id, ex.person, name_ex))
</syntaxhighlight>
you can write this to get both RDFS and basic RDFS Plus / OWL inference:
<syntaxhighlight>
# The next three lines add inferred triples to g.
owl = owlrl.CombinedClosure.RDFS_OWLRL_Semantics(g, False, False, False)
owl.closure()
owl.flush_stored_triples()
</syntaxhighlight>


Example updates and queries:
#President
<syntaxhighlight>
try:
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
g.add((id, ex.president, URIRef(president[0]["URI"])))
PREFIX owl: <http://www.w3.org/2002/07/owl#>
except:
PREFIX ex: <http://example.org#>
g.add((id, ex.president, president_ex))


INSERT DATA {
csv_to_rdf(df)
    ex:Socrates ex:hasWife ex:Xanthippe .
print(g.serialize())
    ex:hasHusband owl:inverseOf ex:hasWife .
}
</syntaxhighlight>


<syntaxhighlight>
ASK {
  ex:Xanthippe ex:hasHusband ex:Socrates .
}
</syntaxhighlight>
</syntaxhighlight>


==SHACL (Lab 6)==
<syntaxhighlight>
<syntaxhighlight>
ASK {
  ex:Socrates ^ex:hasHusband ex:Xanthippe .
}
</syntaxhighlight>


<syntaxhighlight>
from pyshacl import validate
INSERT DATA {
from rdflib import Graph
    ex:hasWife rdfs:subPropertyOf ex:hasSpouse .
    ex:hasSpouse rdf:type owl:SymmetricProperty .
}
</syntaxhighlight>


<syntaxhighlight>
data_graph = Graph()
ASK {
# parses the Turtle examples from the lab
  ex:Socrates ex:hasSpouse ex:Xanthippe .
data_graph.parse("data_graph.ttl")
}
</syntaxhighlight>


<syntaxhighlight>
# Remember to test you need to change the rules so they conflict with the data graph (or vice versa). For example, change "exactly one name" to have exactly two, and see the output
ASK {
shape_graph = """
  ex:Socrates ^ex:hasSpouse ex:Xanthippe .
@prefix ex: <http://example.org/> .
}
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
</syntaxhighlight>
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .


ex:LabTasks_Shape
    a sh:NodeShape ;
    sh:targetClass ex:PersonUnderInvestigation ;
    sh:property [
        sh:path foaf:name ;
        sh:minCount 1 ; #Every person under investigation has exactly one name.
        sh:maxCount 1 ; #Every person under investigation has exactly one name.
        sh:datatype rdf:langString ; #All person names must be language-tagged
    ] ;
    sh:property [
        sh:path ex:chargedWith ;
        sh:nodeKind sh:IRI ; #The object of a charged with property must be a URI.
        sh:class ex:Offense ; #The object of a charged with property must be an offense.
    ] .


# --- If you have more time tasks ---
ex:MoreTime_Shape rdf:type sh:NodeShape;
    sh:targetClass ex:Indictment;
   
    # The only allowed values for ex:american are true, false or unknown.
    sh:property [
        sh:path ex:american;
        sh:pattern "(true|false|unknown)" ;
    ] ;
   
    # The value of a property that counts days must be an integer.
    sh:property [
        sh:path ex:indictment_days;
        sh:datatype xsd:integer;
    ] ; 
    sh:property [
        sh:path ex:investigation_days;
        sh:datatype xsd:integer;
    ] ;
   
    # The value of a property that indicates a start date must be xsd:date.
    sh:property [
        sh:path ex:investigation_start;
        sh:datatype xsd:date;
    ] ;


    # The value of a property that indicates an end date must be xsd:date or unknown (tip: you can use sh:or (...) ).
    sh:property [
        sh:path ex:investigation_end;
        sh:or (
        [ sh:datatype xsd:date ]
        [ sh:hasValue "unknown" ]
    )] ;
   
    # Every indictment must have exactly one FOAF name for the investigated person.
    sh:property [
        sh:path foaf:name;
        sh:minCount 1;
        sh:maxCount 1;
    ] ;
   
    # Every indictment must have exactly one investigated person property, and that person must have the type ex:PersonUnderInvestigation.
    sh:property [
        sh:path ex:investigatedPerson ;
        sh:minCount 1 ;
        sh:maxCount 1 ;
        sh:class ex:PersonUnderInvestigation ;
        sh:nodeKind sh:IRI ;
    ] ;


    # No URI-s can contain hyphens ('-').
    sh:property [
        sh:path ex:outcome ;
        sh:nodeKind sh:IRI ;
        sh:pattern "^[^-]*$" ;
    ] ;


    # Presidents must be identified with URIs.
    sh:property [
        sh:path ex:president ;
        sh:class ex:President ;
        sh:nodeKind sh:IRI ;
    ] .
"""


===XML Data for above example===
shacl_graph = Graph()
<syntaxhighlight>
# parses the contents of a shape_graph made in the tasks
<data>
shacl_graph.parse(data=shape_graph)
    <tv_show id="1050">
        <title>The_Sopranos</title>
        <actor>
            <firstname>James</firstname>
            <lastname>Gandolfini</lastname>
        </actor>
    </tv_show>
    <tv_show id="1066">
        <title>Seinfeld</title>
        <actor>
            <firstname>Jerry</firstname>
            <lastname>Seinfeld</lastname>
        </actor>
        <actor>
            <firstname>Julia</firstname>
            <lastname>Louis-dreyfus</lastname>
        </actor>
        <actor>
            <firstname>Jason</firstname>
            <lastname>Alexander</lastname>
        </actor>
    </tv_show>
</data>
</syntaxhighlight>


==Lifting HTML to RDF==
# uses pySHACL's validate method to apply the shape_graph constraints to the data_graph
<syntaxhighlight>
results = validate(
from bs4 import BeautifulSoup as bs, NavigableString
    data_graph,
from rdflib import Graph, URIRef, Namespace
    shacl_graph=shacl_graph,
from rdflib.namespace import RDF
    inference='both'
)


g = Graph()
# prints out the validation result
ex = Namespace("http://example.org/")
boolean_value, results_graph, results_text = results
g.bind("ex", ex)


html = open("tv_shows.html").read()
# print(boolean_value)
html = bs(html, features="html.parser")
print(results_graph.serialize(format='ttl'))
# print(results_text)


shows = html.find_all('li', attrs={'class': 'show'})
#Write a SPARQL query to print out each distinct sh:resultMessage in the results_graph
for show in shows:
distinct_messages = """
    title = show.find("h3").text
PREFIX sh: <http://www.w3.org/ns/shacl#>
    actors = show.find('ul', attrs={'class': 'actor_list'})
    for actor in actors:
        if isinstance(actor, NavigableString):
            continue
        else:
            actor = actor.text.replace(" ", "_")
            g.add((URIRef(ex + title), ex.stars, URIRef(ex + actor)))
            g.add((URIRef(ex + actor), RDF.type, ex.Actor))


     g.add((URIRef(ex + title), RDF.type, ex.TV_Show))
SELECT DISTINCT ?message WHERE {
     [] sh:result ?errorBlankNode.
    ?errorBlankNode sh:resultMessage ?message.  


    # Alternativ and cleaner solution, look at https://www.w3.org/TR/sparql11-query/#pp-language (9.1 Property Path Syntax)
    # [] sh:result / sh:resultMessage ?message .
}
"""
messages = results_graph.query(distinct_messages)
for row in messages:
    print(row.message)


print(g.serialize(format="turtle").decode())
#each sh:resultMessage in the results_graph once, along with the number of times that message has been repeated in the results
</syntaxhighlight>
count_messages = """
 
PREFIX sh: <http://www.w3.org/ns/shacl#>  
===HTML code for the example above===
<syntaxhighlight>
<!DOCTYPE html>
<html>
<head>
    <meta charset="utf-8">
    <title></title>
</head>
<body>
    <div class="tv_shows">
        <ul>
            <li class="show">
                <h3>The_Sopranos</h3>
                <div class="irrelevant_data"></div>
                <ul class="actor_list">
                    <li>James Gandolfini</li>
                </ul>
            </li>
            <li class="show">
                <h3>Seinfeld</h3>
                <div class="irrelevant_data"></div>
                <ul class="actor_list">
                    <li >Jerry Seinfeld</li>
                    <li>Jason Alexander</li>
                    <li>Julia Louis-Dreyfus</li>
                </ul>
            </li>
        </ul>
    </div>
</body>
</html>
</syntaxhighlight>
 
==Web APIs with JSON==
<syntaxhighlight>
import requests
import json
import pprint
 
# Retrieve JSON data from API service URL. Then load it with the json library as a json object.
url = "http://api.geonames.org/postalCodeLookupJSON?postalcode=46020&#country=ES&username=demo"
data = requests.get(url).content.decode("utf-8")
data = json.loads(data)
pprint.pprint(data)
</syntaxhighlight>
 
 
==JSON-LD==
 
<syntaxhighlight>
import rdflib
 
g = rdflib.Graph()


example = """
SELECT ?message (COUNT(?node) AS ?num_messages) WHERE {
{
     [] sh:result ?errorBlankNode .
  "@context": {
     ?errorBlankNode sh:resultMessage ?message ;
     "name": "http://xmlns.com/foaf/0.1/name",
                    sh:focusNode ?node .
    "homepage": {
      "@id": "http://xmlns.com/foaf/0.1/homepage",
      "@type": "@id"
     }
  },
  "@id": "http://me.markus-lanthaler.com/",
  "name": "Markus Lanthaler",
  "homepage": "http://www.markus-lanthaler.com/"
}
}
GROUP BY ?message
ORDER BY DESC(?count) ?message
"""
"""


# json-ld parsing automatically deals with @contexts
messages = results_graph.query(count_messages)
g.parse(data=example, format='json-ld')
for row in messages:
 
     print(f"COUNT: {row.num_messages} | MESSAGE: {row.message}")
# serialisation does expansion by default
for line in g.serialize(format='json-ld').decode().splitlines():
     print(line)


# by supplying a context object, serialisation can do compaction
context = {
    "foaf": "http://xmlns.com/foaf/0.1/"
}
for line in g.serialize(format='json-ld', context=context).decode().splitlines():
    print(line)
</syntaxhighlight>
</syntaxhighlight>


==RDFS (Lab 7)==
<syntaxhighlight>


<div class="credits" style="text-align: right; direction: ltr; margin-left: 1em;">''INFO216, UiB, 2017-2020. All code examples are [https://creativecommons.org/choose/zero/ CC0].'' </div>
==OWL - Complex Classes and Restrictions==
<syntaxhighlight>
import owlrl
import owlrl
from rdflib import Graph, Literal, Namespace, BNode
from rdflib import Graph, RDF, Namespace, FOAF, RDFS
from rdflib.namespace import RDF, OWL, RDFS
from rdflib.collection import Collection


g = Graph()
g = Graph()
ex = Namespace("http://example.org/")
ex = Namespace('http://example.org/')
 
g.bind("ex", ex)
g.bind("ex", ex)
g.bind("owl", OWL)
g.bind("foaf", FOAF)
 
NS = {
    'ex': ex,
    'rdf': RDF,
    'rdfs': RDFS,
    'foaf': FOAF,
}


# a Season is either Autumn, Winter, Spring, Summer
#Write a small function that computes the RDFS closure on your graph.
seasons = BNode()
def flush():
Collection(g, seasons, [ex.Winter, ex.Autumn, ex.Spring, ex.Summer])
    owlrl.DeductiveClosure(owlrl.RDFS_Semantics).expand(g)
g.add((ex.Season, OWL.oneOf, seasons))


# A Parent is a Father or Mother
#Rick Gates was charged with money laundering and tax evasion.
b = BNode()
g.add((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
Collection(g, b, [ex.Father, ex.Mother])
g.add((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
g.add((ex.Parent, OWL.unionOf, b))


# A Woman is a person who has the "female" gender
#When one thing that is charged with another thing,
br = BNode()
g.add((ex.chargedWith, RDFS.domain, ex.PersonUnderInvestigation)) #the first thing is a person under investigation and
g.add((br, RDF.type, OWL.Restriction))
g.add((ex.chargedWith, RDFS.range, ex.Offense)) #the second thing is an offense.
g.add((br, OWL.onProperty, ex.gender))
g.add((br, OWL.hasValue, ex.Female))
bi = BNode()
Collection(g, bi, [ex.Person, br])
g.add((ex.Woman, OWL.intersectionOf, bi))


# A vegetarian is a Person who only eats vegetarian food
#Write a SPARQL query that checks the RDF type(s) of Rick Gates and money laundering in your RDF graph.
br = BNode()
print('Is Rick Gates a ex:PersonUnderInvestigation:', g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer)
g.add((br, RDF.type, OWL.Restriction))
print('Is Money Laundering a ex:Offense:', g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer)
g.add((br, OWL.onProperty, ex.eats))
flush()
g.add((br, OWL.allValuesFrom, ex.VeganFood))
print('Is Rick Gates a ex:PersonUnderInvestigation:', g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer)
bi = BNode()
print('Is Money Laundering a ex:Offense:', g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer)
Collection(g, bi, [ex.Person, br])
g.add((ex.Vegetarian, OWL.intersectionOf, bi))


# A vegetarian is a Person who can not eat meat.
#A person under investigation is a FOAF person
br = BNode()
g.add((ex.PersonUnderInvestigation, RDFS.subClassOf, FOAF.Person))
g.add((br, RDF.type, OWL.Restriction))
print('Is Rick Gates a foaf:Person:', g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer)
g.add((br, OWL.onProperty, ex.eats))
flush()
g.add((br, OWL.QualifiedCardinality, Literal(0)))
print('Is Rick Gates a foaf:Person:', g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer)
g.add((br, OWL.onClass, ex.Meat))
bi = BNode()
Collection(g, bi, [ex.Person, br])
g.add((ex.Vegetarian, OWL.intersectionOf, bi))


# A Worried Parent is a parent who has at least one sick child
#Paul Manafort was convicted for tax evasion.
br = BNode()
g.add((ex.Paul_Manafort, ex.convictedFor, ex.TaxEvasion))
g.add((br, RDF.type, OWL.Restriction))
#the first thing is also charged with the second thing
g.add((br, OWL.onProperty, ex.hasChild))
g.add((ex.convictedFor, RDFS.subPropertyOf, ex.chargedWith))  
g.add((br, OWL.QualifiedMinCardinality, Literal(1)))
print('Is Paul Manafort charged with Tax Evasion:', g.query('ASK {ex:Paul_Manafort ex:chargedWith ex:TaxEvasion}', initNs=NS).askAnswer)
g.add((br, OWL.onClass, ex.Sick))
flush()
bi = BNode()
print('Is Paul Manafort charged with Tax Evasion:', g.query('ASK {ex:Paul_Manafort ex:chargedWith ex:TaxEvasion}', initNs=NS).askAnswer)
Collection(g, bi, [ex.Parent, br])
g.add((ex.WorriedParent, OWL.intersectionOf, bi))


# using the restriction above, If we now write...:
print(g.serialize())
g.add((ex.Bob, RDF.type, ex.Parent))
g.add((ex.Bob, ex.hasChild, ex.John))
g.add((ex.John, RDF.type, ex.Sick))
# ...we can infer with owl reasoning that Bob is a worried Parent even though we didn't specify it ourselves because Bob fullfills the restriction and Parent requirements.


</syntaxhighlight>
</syntaxhighlight>
==Protege-OWL reasoning with HermiT==
[[:File:DL-reasoning-RoyalFamily-final.owl.txt | Example file]] from Lecture 13 about OWL-DL, rules and reasoning.
-->

Revision as of 14:00, 21 March 2023

This page will be updated with Python examples related to the labs as the course progresses.

Examples from the lectures

Lecture 1: Introduction to KGs

Turtle example:

@prefix ex: <http://example.org/> .
ex:Roger_Stone
    ex:name "Roger Stone" ;
    ex:occupation ex:lobbyist ;
    ex:significant_person ex:Donald_Trump .
ex:Donald_Trump
    ex:name "Donald Trump" .

Lecture 2: RDF

Blank nodes for anonymity, or when we have not decided on a URI:

from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD

EX = Namespace('http://example.org/')

g = Graph()
g.bind('ex', EX)  # this is why the line '@prefix ex: <http://example.org/> .'
                  # and the 'ex.' prefix are used when we print out Turtle later

robertMueller = BNode()
g.add((robertMueller, RDF.type, EX.Human))
g.add((robertMueller, FOAF.name, Literal('Robert Mueller', lang='en')))
g.add((robertMueller, EX.position_held, Literal('Director of the Federal Bureau of Investigation', lang='en')))

print(g.serialize(format='turtle'))

Blank nodes used to group related properties:

from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD

EX = Namespace('http://example.org/')

g = Graph()
g.bind('ex', EX)

# This is a task in Exercise 2

print(g.serialize(format='turtle'))

Literals:

from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD

EX = Namespace('http://example.org/')

g = Graph()
g.bind('ex', EX)

g.add((EX.Robert_Mueller, RDF.type, EX.Human))
g.add((EX.Robert_Mueller, FOAF.name, Literal('Robert Mueller', lang='en')))
g.add((EX.Robert_Mueller, FOAF.name, Literal('رابرت مولر', lang='fa')))
g.add((EX.Robert_Mueller, DC.description, Literal('sixth director of the FBI', datatype=XSD.string)))
g.add((EX.Robert_Mueller, EX.start_time, Literal(2001, datatype=XSD.integer)))

print(g.serialize(format='turtle'))

Alternative container (open):

from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD

EX = Namespace('http://example.org/')

g = Graph()
g.bind('ex', EX)

muellerReportArchives = BNode()
g.add((muellerReportArchives, RDF.type, RDF.Alt))

archive1 = 'https://archive.org/details/MuellerReportVolume1Searchable/' \
                    'Mueller%20Report%20Volume%201%20Searchable/'
archive2 = 'https://edition.cnn.com/2019/04/18/politics/full-mueller-report-pdf/index.html'
archive3 = 'https://www.politico.com/story/2019/04/18/mueller-report-pdf-download-text-file-1280891'

g.add((muellerReportArchives, RDFS.member, Literal(archive1, datatype=XSD.anyURI)))
g.add((muellerReportArchives, RDFS.member, Literal(archive2, datatype=XSD.anyURI)))
g.add((muellerReportArchives, RDFS.member, Literal(archive3, datatype=XSD.anyURI)))

g.add((EX.Mueller_Report, RDF.type, FOAF.Document))
g.add((EX.Mueller_Report, DC.contributor, EX.Robert_Mueller))
g.add((EX.Mueller_Report, SCHEMA.archivedAt, muellerReportArchives))

print(g.serialize(format='turtle'))

Sequence container (open):

from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD

EX = Namespace('http://example.org/')

g = Graph()
g.bind('ex', EX)

donaldTrumpSpouses = BNode()
g.add((donaldTrumpSpouses, RDF.type, RDF.Seq))
g.add((donaldTrumpSpouses, RDF._1, EX.IvanaTrump))
g.add((donaldTrumpSpouses, RDF._2, EX.MarlaMaples))
g.add((donaldTrumpSpouses, RDF._3, EX.MelaniaTrump))

g.add((EX.Donald_Trump, SCHEMA.spouse, donaldTrumpSpouses))

print(g.serialize(format='turtle'))

Collection (closed list):

from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD

EX = Namespace('http://example.org/')

g = Graph()
g.bind('ex', EX)

from rdflib.collection import Collection

g = Graph()
g.bind('ex', EX)

donaldTrumpSpouses = BNode()
Collection(g, donaldTrumpSpouses, [
    EX.IvanaTrump, EX.MarlaMaples, EX.MelaniaTrump
])
g.add((EX.Donald_Trump, SCHEMA.spouse, donaldTrumpSpouses))

print(g.serialize(format='turtle'))
g.serialize(destination='s02_Donald_Trump_spouses_list.ttl', format='turtle')

print(g.serialize(format='turtle'))

Example lab solutions

Getting started (Lab 1)

from rdflib import Graph, Namespace

g = Graph()

ex = Namespace('http://example.org/')

g.bind("ex", ex)

#The Mueller Investigation was lead by Robert Mueller.
g.add((ex.Mueller_Investigation, ex.leadBy, ex.Robert_Muller))

#It involved Paul Manafort, Rick Gates, George Papadopoulos, Michael Flynn, and Roger Stone.
g.add((ex.Mueller_Investigation, ex.involved, ex.Paul_Manafort))
g.add((ex.Mueller_Investigation, ex.involved, ex.Rick_Gates))
g.add((ex.Mueller_Investigation, ex.involved, ex.George_Papadopoulos))
g.add((ex.Mueller_Investigation, ex.involved, ex.Michael_Flynn))
g.add((ex.Mueller_Investigation, ex.involved, ex.Michael_Cohen))
g.add((ex.Mueller_Investigation, ex.involved, ex.Roger_Stone))

# --- Paul Manafort ---
#Paul Manafort was business partner of Rick Gates.
g.add((ex.Paul_Manafort, ex.businessManager, ex.Rick_Gates))
# He was campaign chairman for Trump
g.add((ex.Paul_Manafort, ex.campaignChairman, ex.Donald_Trump))

# He was charged with money laundering, tax evasion, and foreign lobbying.
g.add((ex.Paul_Manafort, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.Paul_Manafort, ex.chargedWith, ex.TaxEvasion))
g.add((ex.Paul_Manafort, ex.chargedWith, ex.ForeignLobbying))

# He was convicted for bank and tax fraud.
g.add((ex.Paul_Manafort, ex.convictedFor, ex.BankFraud))
g.add((ex.Paul_Manafort, ex.convictedFor, ex.TaxFraud))

# He pleaded guilty to conspiracy.
g.add((ex.Paul_Manafort, ex.pleadGuiltyTo, ex.Conspiracy))
# He was sentenced to prison.
g.add((ex.Paul_Manafort, ex.sentencedTo, ex.Prison))
# He negotiated a plea agreement.
g.add((ex.Paul_Manafort, ex.negoiated, ex.PleaBargain))

# --- Rick Gates ---
#Rick Gates was charged with money laundering, tax evasion and foreign lobbying.
g.add((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
g.add((ex.Rick_Gates, ex.chargedWith, ex.ForeignLobbying))

#He pleaded guilty to conspiracy and lying to FBI.
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, ex.Conspiracy))
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, ex.LyingToFBI))

#Use the serialize method to write out the model in different formats on screen
print(g.serialize(format="ttl"))
# g.serialize("lab1.ttl", format="ttl") #or to file

#Loop through the triples in the model to print out all triples that have pleading guilty as predicate
for subject, object in g[ : ex.pleadGuiltyTo : ]:
    print(subject, ex.pleadGuiltyTo, object)

# Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week 

#Write a method (function) that submits your model for rendering and saves the returned image to file.
import requests
import shutil

def graphToImage(graph):
    data = {"rdf":graph, "from":"ttl", "to":"png"}
    link = "http://www.ldf.fi/service/rdf-grapher"
    response = requests.get(link, params = data, stream=True)
    # print(response.content)
    print(response.raw)
    with open("lab1.png", "wb") as fil:
        shutil.copyfileobj(response.raw, fil)

graph = g.serialize(format="ttl")
graphToImage(graph)

RDF programming with RDFlib (Lab 2)

from rdflib import Graph, URIRef, Namespace, Literal, XSD, BNode
from rdflib.collection import Collection

g = Graph()
g.parse("lab1.ttl", format="ttl") #Retrives the triples from lab 1

ex = Namespace('http://example.org/')

# --- Michael Cohen ---
#Michael Cohen was Donald Trump's attorney.
g.add((ex.Michael_Cohen, ex.attorneyTo, ex.Donald_Trump))
#He pleaded guilty to lying to the FBI.
g.add((ex.Michael_Cohen, ex.pleadGuiltyTo, ex.LyingToCongress))

# --- Michael Flynn ---
#Michael Flynn was adviser to Trump.
g.add((ex.Michael_Flynn, ex.adviserTo, ex.Donald_Trump))
#He pleaded guilty to lying to the FBI.
g.add((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI))
# He negotiated a plea agreement.
g.add((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain)) 

#How can you modify your knowledge graph to account for the different lying?
#Remove these to not have duplicates
g.remove((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI)) 
g.remove((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.LyingToFBI))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.Conspiracy))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.ForeignLobbying))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
g.remove((ex.Michael_Cohen, ex.pleadGuiltyTo, ex.LyingToCongress))

# --- Michael Flynn ---
FlynnLying = BNode() 
g.add((FlynnLying, ex.crime, ex.LyingToFBI))
g.add((FlynnLying, ex.pleadGulityOn, Literal("2017-12-1", datatype=XSD.date)))
g.add((FlynnLying, ex.liedAbout, Literal("His communications with a former Russian ambassador during the presidential transition", datatype=XSD.string)))
g.add((FlynnLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Flynn, ex.pleadGuiltyTo, FlynnLying))

# --- Rick Gates ---
GatesLying = BNode()
Crimes = BNode()
Charged = BNode()
Collection(g, Crimes, [ex.LyingToFBI, ex.Conspiracy])
Collection(g, Charged, [ex.ForeignLobbying, ex.MoneyLaundering, ex.TaxEvasion])
g.add((GatesLying, ex.crime, Crimes))
g.add((GatesLying, ex.chargedWith, Charged))
g.add((GatesLying, ex.pleadGulityOn, Literal("2018-02-23", datatype=XSD.date)))
g.add((GatesLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, GatesLying))

# --- Michael Cohen ---
CohenLying = BNode()
g.add((CohenLying, ex.crime, ex.LyingToCongress))
g.add((CohenLying, ex.liedAbout, ex.TrumpRealEstateDeal))
g.add((CohenLying, ex.prosecutorsAlleged, Literal("In an August 2017 letter Cohen sent to congressional committees investigating Russian election interference, he falsely stated that the project ended in January 2016", datatype=XSD.string)))
g.add((CohenLying, ex.mullerInvestigationAlleged, Literal("Cohen falsely stated that he had never agreed to travel to Russia for the real estate deal and that he did not recall any contact with the Russian government about the project", datatype=XSD.string)))
g.add((CohenLying, ex.pleadGulityOn, Literal("2018-11-29", datatype=XSD.date)))
g.add((CohenLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Cohen, ex.pleadGuiltyTo, CohenLying))

print(g.serialize(format="ttl"))

#Save (serialize) your graph to a Turtle file.
# g.serialize("lab2.ttl", format="ttl")

#Add a few triples to the Turtle file with more information about Donald Trump.
'''
ex:Donald_Trump ex:address [ ex:city ex:Palm_Beach ;
            ex:country ex:United_States ;
            ex:postalCode 33480 ;
            ex:residence ex:Mar_a_Lago ;
            ex:state ex:Florida ;
            ex:streetName "1100 S Ocean Blvd"^^xsd:string ] ;
    ex:previousAddress [ ex:city ex:Washington_DC ;
            ex:country ex:United_States ;
            ex:phoneNumber "1 202 456 1414"^^xsd:integer ;
            ex:postalCode "20500"^^xsd:integer ;
            ex:residence ex:The_White_House ;
            ex:streetName "1600 Pennsylvania Ave."^^xsd:string ];
    ex:marriedTo ex:Melania_Trump;
    ex:fatherTo (ex:Ivanka_Trump ex:Donald_Trump_Jr ex: ex:Tiffany_Trump ex:Eric_Trump ex:Barron_Trump).
'''

#Read (parse) the Turtle file back into a Python program, and check that the new triples are there
def serialize_Graph():
    newGraph = Graph()
    newGraph.parse("lab2.ttl")
    print(newGraph.serialize())

# serialize_Graph() #Don't need this to run until after adding the triples above to the ttl file

#Write a method (function) that starts with Donald Trump prints out a graph depth-first to show how the other graph nodes are connected to him
visited_nodes = set()

def create_Tree(model, nodes):
    #Traverse the model breadth-first to create the tree.
    global visited_nodes
    tree = Graph()
    children = set()
    visited_nodes |= set(nodes)
    for s, p, o in model:
        if s in nodes and o not in visited_nodes:
            tree.add((s, p, o))
            visited_nodes.add(o)
            children.add(o)
        if o in nodes and s not in visited_nodes:
            invp = URIRef(f'{p}_inv') #_inv represents inverse of
            tree.add((o, invp, s))
            visited_nodes.add(s)
            children.add(s)
    if len(children) > 0:
        children_tree = create_Tree(model, children)
        for triple in children_tree:
            tree.add(triple)
    return tree

def print_Tree(tree, root, indent=0):
    #Print the tree depth-first.
    print(str(root))
    for s, p, o in tree:
        if s==root:
            print('    '*indent + '  ' + str(p), end=' ')
            print_Tree(tree, o, indent+1)
    
tree = create_Tree(g, [ex.Donald_Trump])
print_Tree(tree, ex.Donald_Trump)

SPARQL Programming (Lab 4)

NOTE: These tasks were performed on the old dataset, with the new dataset, some of these answers would be different.

from rdflib import Graph, Namespace, RDF, FOAF
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE

g = Graph()
g.parse("Russia_investigation_kg.ttl")

# ----- RDFLIB -----
ex = Namespace('http://example.org#')

NS = {
    '': ex,
    'rdf': RDF,
    'foaf': FOAF,
}

# Print out a list of all the predicates used in your graph.
task1 = g.query("""
SELECT DISTINCT ?p WHERE{
    ?s ?p ?o .
}
""", initNs=NS)

print(list(task1))

# Print out a sorted list of all the presidents represented in your graph.
task2 = g.query("""
SELECT DISTINCT ?president WHERE{
    ?s :president ?president .
}
ORDER BY ?president
""", initNs=NS)

print(list(task2))

# Create dictionary (Python dict) with all the represented presidents as keys. For each key, the value is a list of names of people indicted under that president.
task3_dic = {}

task3 = g.query("""
SELECT ?president ?person WHERE{
    ?s :president ?president;
       :name ?person;
       :outcome :indictment.
}
""", initNs=NS)

for president, person in task3:
    if president not in task3_dic:
        task3_dic[president] = [person]
    else:
        task3_dic[president].append(person)

print(task3_dic)

# Use an ASK query to investigate whether Donald Trump has pardoned more than 5 people.

# This task is a lot trickier than it needs to be. As far as I'm aware RDFLib has no HAVING support, so a query like this:
task4 = g.query("""
ASK {
  	SELECT (COUNT(?s) as ?count) WHERE{
    	?s :pardoned :true;
   	   :president :Bill_Clinton  .
    }
    HAVING (?count > 5)
}
""", initNs=NS)

print(task4.askAnswer)

# Which works fine in Blazegraph and is a valid SPARQL query will always provide false in RDFLib, cause it uses HAVING. Instead you have to use a nested SELECT query like below, where you use FILTER instead of HAVING. Donald Trump has no pardons, so I have instead chosen Bill Clinton (which has 13 pardons) to check if the query works. 

task4 = g.query("""
    ASK{
        SELECT ?count WHERE{{
  	        SELECT (COUNT(?s) as ?count) WHERE{
    	        ?s :pardoned :true;
                   :president :Bill_Clinton  .
                }}
        FILTER (?count > 5) 
        }
    }
""", initNs=NS)

print(task4.askAnswer)

# Use a DESCRIBE query to create a new graph with information about Donald Trump. Print out the graph in Turtle format.

# By all accounts, it seems DESCRIBE queries are yet to be implemented in RDFLib, but they are attempting to implement it: https://github.com/RDFLib/rdflib/pull/2221 (Issue and proposed solution raised) & https://github.com/RDFLib/rdflib/commit/2325b4a81724c1ccee3a131067db4fbf9b4e2629 (Solution committed to RDFLib). This solution does not work. However, this proposed solution should work if DESCRIBE is implemented in RDFLib

# task5 = g.query(""" 
# DESCRIBE :Donald_Trump
# """, initNs=NS)

# print(task5.serialize())

# ----- SPARQLWrapper -----

namespace = "kb" #Default namespace
sparql = SPARQLWrapper("http://localhost:9999/blazegraph/namespace/"+ namespace + "/sparql") #Replace localhost:9999 with your URL

# The current dates are URIs, we would want to change them to Literals with datatype "date" for task 1 & 2
update_str = """
    PREFIX ns1: <http://example.org#>

    DELETE {
        ?s ns1:cp_date ?cp;
            ns1:investigation_end ?end;
            ns1:investigation_start ?start.
    }
    INSERT{
        ?s ns1:cp_date ?cpDate;
            ns1:investigation_end ?endDate;
            ns1:investigation_start ?startDate.
    }
    WHERE{
        ?s ns1:cp_date ?cp . #Date conviction was recieved
        BIND (replace(str(?cp), str(ns1:), "")  AS ?cpRemoved)
        BIND (STRDT(STR(?cpRemoved), xsd:date) AS ?cpDate)
        
        ?s ns1:investigation_end ?end . #Investigation End
        BIND (replace(str(?end), str(ns1:), "")  AS ?endRemoved)
        BIND (STRDT(STR(?endRemoved), xsd:date) AS ?endDate)
        
        ?s ns1:investigation_start ?start . #Investigation Start
        BIND (replace(str(?start), str(ns1:), "")  AS ?startRemoved)
        BIND (STRDT(STR(?startRemoved), xsd:date) AS ?startDate)
}"""

sparql.setQuery(update_str)
sparql.setMethod(POST)
sparql.query()

# Ask whether there was an ongoing indictment on the date 1990-01-01.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    ASK {
        SELECT ?end ?start
        WHERE{
            ?s ns1:investigation_end ?end;
               ns1:investigation_start ?start;
               ns1:outcome ns1:indictment.
            FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) 
	    }
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(f"Are there any investigation on the 1990-01-01: {results['boolean']}")

# List ongoing indictments on that date 1990-01-01.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    SELECT ?s
    WHERE{
        ?s ns1:investigation_end ?end;
           ns1:investigation_start ?start;
           ns1:outcome ns1:indictment.
        FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) 
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

print("The ongoing investigations on the 1990-01-01 are:")
for result in results["results"]["bindings"]:
    print(result["s"]["value"])

# Describe investigation number 100 (muellerkg:investigation_100).
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    DESCRIBE ns1:investigation_100
""")

sparql.setReturnFormat(TURTLE)
results = sparql.query().convert()

print(results.serialize())

# Print out a list of all the types used in your graph.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

    SELECT DISTINCT ?types
    WHERE{
        ?s rdf:type ?types . 
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

rdf_Types = []

for result in results["results"]["bindings"]:
    rdf_Types.append(result["types"]["value"])

print(rdf_Types)

# Update the graph to that every resource that is an object in a muellerkg:investigation triple has the rdf:type muellerkg:Investigation.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

    INSERT{
        ?invest rdf:type ns1:Investigation .
    }
    WHERE{
        ?s ns1:investigation ?invest .
}"""

sparql.setQuery(update_str)
sparql.setMethod(POST)
sparql.query()

#To Test
sparql.setQuery("""
    prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX ns1: <http://example.org#>

    ASK{
        ns1:watergate rdf:type ns1:Investigation.
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(results['boolean'])

# Update the graph to that every resource that is an object in a muellerkg:person triple has the rdf:type muellerkg:IndictedPerson.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

    INSERT{
        ?person rdf:type ns1:IndictedPerson .
    }
    WHERE{
        ?s ns1:person ?person .
}"""

sparql.setQuery(update_str)
sparql.setMethod(POST)
sparql.query()

#To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson

# Update the graph so all the investigation nodes (such as muellerkg:watergate) become the subject in a dc:title triple with the corresponding string (watergate) as the literal.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX dc: <http://purl.org/dc/elements/1.1/>

    INSERT{
        ?invest dc:title ?investString.
    }
    WHERE{
        ?s ns1:investigation ?invest .
        BIND (replace(str(?invest), str(ns1:), "")  AS ?investString)
}"""

sparql.setQuery(update_str)
sparql.setMethod(POST)
sparql.query()

#Same test as above, replace it with e.g. ns1:watergate dc:title "watergate"

# Print out a sorted list of all the indicted persons represented in your graph.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    PREFIX foaf: <http://xmlns.com/foaf/0.1/>

    SELECT ?name
    WHERE{
    ?s  ns1:person ?name;
        ns1:outcome ns1:indictment.
    }
    ORDER BY ?name
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

names = []

for result in results["results"]["bindings"]:
    names.append(result["name"]["value"])

print(names)

# Print out the minimum, average and maximum indictment days for all the indictments in the graph.
sparql.setQuery("""
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>

    SELECT (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
        ?s  ns1:indictment_days ?days;
            ns1:outcome ns1:indictment.
    
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
}
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(f'The longest an investigation lasted was: {result["max"]["value"]}')
    print(f'The shortest an investigation lasted was: {result["min"]["value"]}')
    print(f'The average investigation lasted: {result["avg"]["value"]}')

# Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation.
sparql.setQuery("""
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>

    SELECT ?investigation (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
    ?s  ns1:indictment_days ?days;
        ns1:outcome ns1:indictment;
        ns1:investigation ?investigation.
    
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
    }
    GROUP BY ?investigation
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(f'{result["investigation"]["value"]} - min: {result["min"]["value"]}, max: {result["max"]["value"]}, avg: {result["avg"]["value"]}')

CSV To RDF (Lab 5)

#Imports
import re
from pandas import *
from numpy import nan
from rdflib import Graph, Namespace, URIRef, Literal, RDF, XSD, FOAF
from spotlight import SpotlightException, annotate

SERVER = "https://api.dbpedia-spotlight.org/en/annotate"
# Test around with the confidence, and see how many names changes depending on the confidence. However, be aware that anything lower than this (0.83) it will replace James W. McCord and other names that includes James with LeBron James
CONFIDENCE = 0.83 

def annotate_entity(entity, filters={'types': 'DBpedia:Person'}):
	annotations = []
	try:
		annotations = annotate(address=SERVER, text=entity, confidence=CONFIDENCE, filters=filters)
	except SpotlightException as e:
		print(e)
	return annotations

g = Graph()
ex = Namespace("http://example.org/")
g.bind("ex", ex)

#Pandas' read_csv function to load russia-investigation.csv
df = read_csv("russia-investigation.csv")
#Replaces all instances of nan to None type with numpy's nan
df = df.replace(nan, None)

#Function that prepares the values to be added to the graph as a URI or Literal
def prepareValue(row):
	if row == None: #none type
		value = Literal(row)
	elif isinstance(row, str) and re.match(r'\d{4}-\d{2}-\d{2}', row): #date
		value = Literal(row, datatype=XSD.date)
	elif isinstance(row, bool): #boolean value (true / false)
		value = Literal(row, datatype=XSD.boolean)
	elif isinstance(row, int): #integer
		value = Literal(row, datatype=XSD.integer)
	elif isinstance(row, str): #string
		value = URIRef(ex + row.replace('"', '').replace(" ", "_").replace(",","").replace("-", "_"))
	elif isinstance(row, float): #float
		value = Literal(row, datatype=XSD.float)

	return value

#Convert the non-semantic CSV dataset into a semantic RDF 
def csv_to_rdf(df):
	for index, row in df.iterrows():
		id = URIRef(ex + "Investigation_" + str(index))
		investigation = prepareValue(row["investigation"])
		investigation_start = prepareValue(row["investigation-start"])
		investigation_end = prepareValue(row["investigation-end"])
		investigation_days = prepareValue(row["investigation-days"])
		indictment_days = prepareValue(row["indictment-days "])
		cp_date = prepareValue(row["cp-date"])
		cp_days = prepareValue(row["cp-days"])
		overturned = prepareValue(row["overturned"])
		pardoned = prepareValue(row["pardoned"])
		american = prepareValue(row["american"])
		outcome = prepareValue(row["type"])
		name_ex = prepareValue(row["name"])
		president_ex = prepareValue(row["president"])

		#Spotlight Search
		name = annotate_entity(str(row['name']))
                # Removing the period as some presidents won't be found with it
		president = annotate_entity(str(row['president']).replace(".", ""))
		
		#Adds the tripples to the graph
		g.add((id, RDF.type, ex.Investigation))
		g.add((id, ex.investigation, investigation))
		g.add((id, ex.investigation_start, investigation_start))
		g.add((id, ex.investigation_end, investigation_end))
		g.add((id, ex.investigation_days, investigation_days))
		g.add((id, ex.indictment_days, indictment_days))
		g.add((id, ex.cp_date, cp_date))
		g.add((id, ex.cp_days, cp_days))
		g.add((id, ex.overturned, overturned))
		g.add((id, ex.pardoned, pardoned))
		g.add((id, ex.american, american))
		g.add((id, ex.outcome, outcome))

		#Spotlight search
		#Name
		try:
			g.add((id, ex.person, URIRef(name[0]["URI"])))
		except:
			g.add((id, ex.person, name_ex))

		#President
		try:
			g.add((id, ex.president, URIRef(president[0]["URI"])))
		except:
			g.add((id, ex.president, president_ex))

csv_to_rdf(df)
print(g.serialize())

SHACL (Lab 6)

from pyshacl import validate
from rdflib import Graph

data_graph = Graph()
# parses the Turtle examples from the lab
data_graph.parse("data_graph.ttl")

# Remember to test you need to change the rules so they conflict with the data graph (or vice versa). For example, change "exactly one name" to have exactly two, and see the output 
shape_graph = """
@prefix ex: <http://example.org/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .

ex:LabTasks_Shape
    a sh:NodeShape ;
    sh:targetClass ex:PersonUnderInvestigation ;
    sh:property [
        sh:path foaf:name ;
        sh:minCount 1 ; #Every person under investigation has exactly one name. 
        sh:maxCount 1 ; #Every person under investigation has exactly one name.
        sh:datatype rdf:langString ; #All person names must be language-tagged
    ] ;
    sh:property [
        sh:path ex:chargedWith ;
        sh:nodeKind sh:IRI ; #The object of a charged with property must be a URI.
        sh:class ex:Offense ; #The object of a charged with property must be an offense.
    ] .

# --- If you have more time tasks ---
ex:MoreTime_Shape rdf:type sh:NodeShape;
    sh:targetClass ex:Indictment;
    
    # The only allowed values for ex:american are true, false or unknown.
    sh:property [
        sh:path ex:american;
        sh:pattern "(true|false|unknown)" ;
    ] ;
    
    # The value of a property that counts days must be an integer.
    sh:property [
        sh:path ex:indictment_days;
        sh:datatype xsd:integer;
    ] ;   
    sh:property [
        sh:path ex:investigation_days;
        sh:datatype xsd:integer;
    ] ;
    
    # The value of a property that indicates a start date must be xsd:date.
    sh:property [
        sh:path ex:investigation_start;
        sh:datatype xsd:date;
    ] ;

    # The value of a property that indicates an end date must be xsd:date or unknown (tip: you can use sh:or (...) ).
    sh:property [
        sh:path ex:investigation_end;
        sh:or (
         [ sh:datatype xsd:date ]
         [ sh:hasValue "unknown" ]
    )] ;
    
    # Every indictment must have exactly one FOAF name for the investigated person.
    sh:property [
        sh:path foaf:name;
        sh:minCount 1;
        sh:maxCount 1;
    ] ;
    
    # Every indictment must have exactly one investigated person property, and that person must have the type ex:PersonUnderInvestigation.
    sh:property [
        sh:path ex:investigatedPerson ;
        sh:minCount 1 ;
        sh:maxCount 1 ;
        sh:class ex:PersonUnderInvestigation ;
        sh:nodeKind sh:IRI ;
    ] ;

    # No URI-s can contain hyphens ('-').
    sh:property [
        sh:path ex:outcome ;
        sh:nodeKind sh:IRI ;
        sh:pattern "^[^-]*$" ;
    ] ;

    # Presidents must be identified with URIs.
    sh:property [
        sh:path ex:president ;
        sh:class ex:President ;
        sh:nodeKind sh:IRI ;
    ] .
"""

shacl_graph = Graph()
# parses the contents of a shape_graph made in the tasks
shacl_graph.parse(data=shape_graph)

# uses pySHACL's validate method to apply the shape_graph constraints to the data_graph
results = validate(
    data_graph,
    shacl_graph=shacl_graph,
    inference='both'
)

# prints out the validation result
boolean_value, results_graph, results_text = results

# print(boolean_value)
print(results_graph.serialize(format='ttl'))
# print(results_text)

#Write a SPARQL query to print out each distinct sh:resultMessage in the results_graph
distinct_messages = """
PREFIX sh: <http://www.w3.org/ns/shacl#> 

SELECT DISTINCT ?message WHERE {
    [] sh:result ?errorBlankNode.
    ?errorBlankNode sh:resultMessage ?message.    

    # Alternativ and cleaner solution, look at https://www.w3.org/TR/sparql11-query/#pp-language (9.1 Property Path Syntax)
    # [] sh:result / sh:resultMessage ?message .
}
"""
messages = results_graph.query(distinct_messages)
for row in messages:
    print(row.message)

#each sh:resultMessage in the results_graph once, along with the number of times that message has been repeated in the results
count_messages = """
PREFIX sh: <http://www.w3.org/ns/shacl#> 

SELECT ?message (COUNT(?node) AS ?num_messages) WHERE {
    [] sh:result ?errorBlankNode .
    ?errorBlankNode sh:resultMessage ?message ;
                    sh:focusNode ?node .
}
GROUP BY ?message
ORDER BY DESC(?count) ?message
"""

messages = results_graph.query(count_messages)
for row in messages:
    print(f"COUNT: {row.num_messages} | MESSAGE: {row.message}")

RDFS (Lab 7)

import owlrl
from rdflib import Graph, RDF, Namespace, FOAF, RDFS

g = Graph()
ex = Namespace('http://example.org/')

g.bind("ex", ex)
g.bind("foaf", FOAF)

NS = {
    'ex': ex,
    'rdf': RDF,
    'rdfs': RDFS,
    'foaf': FOAF,
}

#Write a small function that computes the RDFS closure on your graph.
def flush():
    owlrl.DeductiveClosure(owlrl.RDFS_Semantics).expand(g)

#Rick Gates was charged with money laundering and tax evasion.
g.add((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))

#When one thing that is charged with another thing,
g.add((ex.chargedWith, RDFS.domain, ex.PersonUnderInvestigation))  #the first thing is a person under investigation and
g.add((ex.chargedWith, RDFS.range, ex.Offense))  #the second thing is an offense.

#Write a SPARQL query that checks the RDF type(s) of Rick Gates and money laundering in your RDF graph.
print('Is Rick Gates a ex:PersonUnderInvestigation:', g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer)
print('Is Money Laundering a ex:Offense:', g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer)
flush()
print('Is Rick Gates a ex:PersonUnderInvestigation:', g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer)
print('Is Money Laundering a ex:Offense:', g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer)

#A person under investigation is a FOAF person
g.add((ex.PersonUnderInvestigation, RDFS.subClassOf, FOAF.Person))
print('Is Rick Gates a foaf:Person:', g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer)
flush()
print('Is Rick Gates a foaf:Person:', g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer)

#Paul Manafort was convicted for tax evasion.
g.add((ex.Paul_Manafort, ex.convictedFor, ex.TaxEvasion))
#the first thing is also charged with the second thing
g.add((ex.convictedFor, RDFS.subPropertyOf, ex.chargedWith)) 
print('Is Paul Manafort charged with Tax Evasion:', g.query('ASK {ex:Paul_Manafort ex:chargedWith ex:TaxEvasion}', initNs=NS).askAnswer)
flush()
print('Is Paul Manafort charged with Tax Evasion:', g.query('ASK {ex:Paul_Manafort ex:chargedWith ex:TaxEvasion}', initNs=NS).askAnswer)

print(g.serialize())