Dear all,
I'm trying to run tdbquery on my triple store which has a UnionGraph
(assembler attached), but it seems that even the most simple query
doesn't work:
SELECT ?subject ?predicate ?object
WHERE {
?subject ?predicate ?object
}
LIMIT 25
Looking at the results, it seems that tdbquery is running a
quadpattern in <urn:x-arq:DefaultGraphNode>, which makes the query
return no result:
$ tdbquery --explain --time
--loc=/usr/local/fuseki/base/databases/newcore/ --query
/tmp/dumbselect.arq
09:45:47 INFO exec :: QUERY
SELECT ?subject ?predicate ?object
WHERE
{ ?subject ?predicate ?object }
LIMIT 25
09:45:47 INFO exec :: ALGEBRA
(slice _ 25
(project (?subject ?predicate ?object)
(quadpattern (quad <urn:x-arq:DefaultGraphNode> ?subject
?predicate ?object))))
09:45:47 INFO exec :: TDB
(slice _ 25
(project (?subject ?predicate ?object)
(quadpattern (quad <urn:x-arq:DefaultGraphNode> ?subject
?predicate ?object))))
09:45:47 INFO exec :: Execute :: ?subject ?predicate ?object
--------------------------------
| subject | predicate | object |
================================
--------------------------------
Time: 0.146 sec
Ideally I would like to be able to test my usual queries in tdbquery
(without adding some GRAPH information). I can't find a way to specify
the UnionGraph pattern (no --graph argument like in tdbstats). Is
there a way to make it work?
Best,
--
Elie
@prefix fuseki: <http://jena.apache.org/fuseki#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix tdb: <http://jena.hpl.hp.com/2008/tdb#> .
@prefix tdb2: <http://jena.apache.org/2016/tdb#> .
@prefix ja: <http://jena.hpl.hp.com/2005/11/Assembler#> .
@prefix : <http://base/#> .
@prefix text: <http://jena.apache.org/text#> .
@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
@prefix adm: <http://purl.bdrc.io/ontology/admin/> .
@prefix bdo: <http://purl.bdrc.io/ontology/core/> .
@prefix bdr: <http://purl.bdrc.io/resource/> .
# [] ja:loadClass "org.seaborne.tdb2.TDB2" .
# tdb2:DatasetTDB2 rdfs:subClassOf ja:RDFDataset .
# tdb2:GraphTDB2 rdfs:subClassOf ja:Model .
[] ja:loadClass "io.bdrc.libraries.BdrcDateType" .
[] rdf:type fuseki:Server ;
fuseki:services (
:newcorerw
) .
:newcorerw rdf:type fuseki:Service ;
fuseki:name "newcorerw" ; # name of the dataset in the url
fuseki:serviceQuery "query" ; # SPARQL query service
fuseki:serviceUpdate "update" ; # SPARQL update service
fuseki:serviceUpload "upload" ; # Non-SPARQL upload service
fuseki:serviceReadWriteGraphStore "data" ; # SPARQL Graph store protocol (read and write)
fuseki:dataset :newcore_text_dataset ;
.
:newcore_text_dataset rdf:type text:TextDataset ;
text:dataset :dataset_newcore ;
text:index :newcore_lucene_index ;
.
# using TDB
:dataset_newcore rdf:type tdb:DatasetTDB ;
tdb:location "/usr/local/fuseki/base/databases/newcore" ;
tdb:unionDefaultGraph true ;
.
# using TDB2
# :dataset_newcore rdf:type tdb2:DatasetTDB2 ;
# tdb2:location "/usr/local/fuseki/base/databases/newcore" ;
# tdb2:unionDefaultGraph true ;
# .
# Text index description
:newcore_lucene_index a text:TextIndexLucene ;
text:directory <file:/usr/local/fuseki/base/lucene-newcore> ;
text:storeValues true ;
text:multilingualSupport true ;
text:ignoreIndexErrors true ;
text:entityMap :newcore_entmap ;
text:propLists (
[ text:propListProp bdo:labels ;
text:props ( skos:prefLabel
skos:altLabel
rdfs:label ) ;
]
[ text:propListProp bdo:skosLabels ;
text:props ( skos:prefLabel
skos:altLabel ) ;
]
[ text:propListProp bdo:workStmts ;
text:props ( bdo:workColophon
bdo:workAuthorshipStatement
bdo:workEditionStatement ) ;
]
) ;
text:defineAnalyzers (
[ text:defineAnalyzer :romanSylAnalyzer ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.sa.SanskritAnalyzer" ;
text:params (
[ text:paramName "mode" ;
text:paramValue "syl" ]
[ text:paramName "inputEncoding" ;
text:paramValue "roman" ]
[ text:paramName "mergePrepositions" ;
text:paramValue true ]
[ text:paramName "filterGeminates" ;
text:paramValue true ]
[ text:paramName "normalizeAnusvara" ;
text:paramValue true ]
)
] ;
]
[ text:defineAnalyzer :devaSylAnalyzer ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.sa.SanskritAnalyzer" ;
text:params (
[ text:paramName "mode" ;
text:paramValue "syl" ]
[ text:paramName "inputEncoding" ;
text:paramValue "deva" ]
[ text:paramName "mergePrepositions" ;
text:paramValue true ]
[ text:paramName "filterGeminates" ;
text:paramValue true ]
[ text:paramName "normalizeAnusvara" ;
text:paramValue true ]
)
] ;
]
[ text:defineAnalyzer :slpSylAnalyzer ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.sa.SanskritAnalyzer" ;
text:params (
[ text:paramName "mode" ;
text:paramValue "syl" ]
[ text:paramName "inputEncoding" ;
text:paramValue "SLP" ]
[ text:paramName "mergePrepositions" ;
text:paramValue true ]
[ text:paramName "filterGeminates" ;
text:paramValue true ]
[ text:paramName "normalizeAnusvara" ;
text:paramValue true ]
)
] ;
]
[ text:defineAnalyzer :romanLenientIndexAnalyzer ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.sa.SanskritAnalyzer" ;
text:params (
[ text:paramName "mode" ;
text:paramValue "syl" ]
[ text:paramName "inputEncoding" ;
text:paramValue "roman" ]
[ text:paramName "mergePrepositions" ;
text:paramValue false ]
[ text:paramName "filterGeminates" ;
text:paramValue true ]
[ text:paramName "lenient" ;
text:paramValue "index" ]
)
] ;
]
[ text:defineAnalyzer :devaLenientIndexAnalyzer ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.sa.SanskritAnalyzer" ;
text:params (
[ text:paramName "mode" ;
text:paramValue "syl" ]
[ text:paramName "inputEncoding" ;
text:paramValue "deva" ]
[ text:paramName "mergePrepositions" ;
text:paramValue false ]
[ text:paramName "filterGeminates" ;
text:paramValue true ]
[ text:paramName "lenient" ;
text:paramValue "index" ]
)
] ;
]
[ text:defineAnalyzer :slpLenientIndexAnalyzer ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.sa.SanskritAnalyzer" ;
text:params (
[ text:paramName "mode" ;
text:paramValue "syl" ]
[ text:paramName "inputEncoding" ;
text:paramValue "SLP" ]
[ text:paramName "mergePrepositions" ;
text:paramValue false ]
[ text:paramName "filterGeminates" ;
text:paramValue true ]
[ text:paramName "lenient" ;
text:paramValue "index" ]
)
] ;
]
[ text:defineAnalyzer :romanLenientQueryAnalyzer ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.sa.SanskritAnalyzer" ;
text:params (
[ text:paramName "mode" ;
text:paramValue "syl" ]
[ text:paramName "inputEncoding" ;
text:paramValue "roman" ]
[ text:paramName "mergePrepositions" ;
text:paramValue false ]
[ text:paramName "filterGeminates" ;
text:paramValue false ]
[ text:paramName "lenient" ;
text:paramValue "query" ]
)
] ;
]
[ text:defineAnalyzer :hanzAnalyzer ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.zh.ChineseAnalyzer" ;
text:params (
[ text:paramName "profile" ;
text:paramValue "TC2SC" ]
[ text:paramName "stopwords" ;
text:paramValue false ]
[ text:paramName "filterChars" ;
text:paramValue 0 ]
)
] ;
]
[ text:defineAnalyzer :han2pinyin ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.zh.ChineseAnalyzer" ;
text:params (
[ text:paramName "profile" ;
text:paramValue "TC2PYstrict" ]
[ text:paramName "stopwords" ;
text:paramValue false ]
[ text:paramName "filterChars" ;
text:paramValue 0 ]
)
] ;
]
[ text:defineAnalyzer :pinyin ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.zh.ChineseAnalyzer" ;
text:params (
[ text:paramName "profile" ;
text:paramValue "PYstrict" ]
)
] ;
]
[ text:addLang "bo" ;
text:searchFor ( "bo" "bo-x-ewts" "bo-alalc97" ) ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.bo.TibetanAnalyzer" ;
text:params (
[ text:paramName "segmentInWords" ;
text:paramValue false ]
[ text:paramName "lemmatize" ;
text:paramValue true ]
[ text:paramName "filterChars" ;
text:paramValue false ]
[ text:paramName "inputMode" ;
text:paramValue "unicode" ]
[ text:paramName "stopFilename" ;
text:paramValue "" ]
)
] ;
]
[ text:addLang "bo-x-ewts" ;
text:searchFor ( "bo" "bo-x-ewts" "bo-alalc97" ) ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.bo.TibetanAnalyzer" ;
text:params (
[ text:paramName "segmentInWords" ;
text:paramValue false ]
[ text:paramName "lemmatize" ;
text:paramValue true ]
[ text:paramName "filterChars" ;
text:paramValue false ]
[ text:paramName "inputMode" ;
text:paramValue "ewts" ]
[ text:paramName "stopFilename" ;
text:paramValue "" ]
)
] ;
]
[ text:addLang "bo-alalc97" ;
text:searchFor ( "bo" "bo-x-ewts" "bo-alalc97" ) ;
text:analyzer [
a text:GenericAnalyzer ;
text:class "io.bdrc.lucene.bo.TibetanAnalyzer" ;
text:params (
[ text:paramName "segmentInWords" ;
text:paramValue false ]
[ text:paramName "lemmatize" ;
text:paramValue true ]
[ text:paramName "filterChars" ;
text:paramValue false ]
[ text:paramName "inputMode" ;
text:paramValue "alalc" ]
[ text:paramName "stopFilename" ;
text:paramValue "" ]
)
] ;
]
[ text:addLang "zh-hans" ;
text:searchFor ( "zh-hans" "zh-hant" "zh-hani" ) ;
text:auxIndex ( "zh-aux-han2pinyin" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :hanzAnalyzer ] ;
]
[ text:addLang "zh-hant" ;
text:searchFor ( "zh-hans" "zh-hant" "zh-hani" ) ;
text:auxIndex ( "zh-aux-han2pinyin" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :hanzAnalyzer
] ;
]
[ text:addLang "zh-hani" ;
text:searchFor ( "zh-hans" "zh-hant" "zh-hani" ) ;
text:auxIndex ( "zh-aux-han2pinyin" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :hanzAnalyzer
] ;
]
[ text:addLang "zh-latn-pinyin" ;
text:searchFor ( "zh-latn-pinyin" "zh-aux-han2pinyin" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :pinyin
] ;
]
[ text:addLang "zh-aux-han2pinyin" ;
text:searchFor ( "zh-latn-pinyin" "zh-aux-han2pinyin" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :pinyin
] ;
text:indexAnalyzer :han2pinyin ;
]
[ text:addLang "sa-x-ndia" ;
text:searchFor ( "sa-x-ndia" "sa-aux-deva2Ndia" "sa-aux-roman2Ndia" "sa-aux-slp2Ndia" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanLenientQueryAnalyzer
] ;
]
[ text:addLang "sa-aux-deva2Ndia" ;
text:searchFor ( "sa-x-ndia" "sa-aux-roman2Ndia" "sa-aux-slp2Ndia" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanLenientQueryAnalyzer
] ;
text:indexAnalyzer :devaLenientIndexAnalyzer ;
]
[ text:addLang "sa-aux-roman2Ndia" ;
text:searchFor ( "sa-x-ndia" "sa-aux-deva2Ndia" "sa-aux-slp2Ndia" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanLenientQueryAnalyzer
] ;
text:indexAnalyzer :romanLenientIndexAnalyzer ;
]
[ text:addLang "sa-aux-slp2Ndia" ;
text:searchFor ( "sa-x-ndia" "sa-aux-deva2Ndia" "sa-aux-roman2Ndia" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanLenientQueryAnalyzer
] ;
text:indexAnalyzer :slpLenientIndexAnalyzer ;
]
[ text:addLang "sa-deva" ;
text:searchFor ( "sa-deva" "sa-x-iast" "sa-x-slp1" "sa-x-iso" "sa-alalc97" ) ;
text:auxIndex ( "sa-aux-deva2Ndia" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :devaSylAnalyzer ] ;
]
[ text:addLang "sa-x-iso" ;
text:searchFor ( "sa-x-iso" "sa-x-iast" "sa-x-slp1" "sa-deva" "sa-alalc97" ) ;
text:auxIndex ( "sa-aux-roman2Ndia" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanSylAnalyzer ] ;
]
[ text:addLang "sa-x-slp1" ;
text:searchFor ( "sa-x-slp1" "sa-x-iast" "sa-x-iso" "sa-deva" "sa-alalc97" ) ;
text:auxIndex ( "sa-aux-slp2Ndia" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :slpSylAnalyzer ] ;
]
[ text:addLang "sa-x-iast" ;
text:searchFor ( "sa-x-iast" "sa-x-slp1" "sa-x-iso" "sa-deva" "sa-alalc97" ) ;
text:auxIndex ( "sa-aux-roman2Ndia" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanSylAnalyzer ] ;
]
[ text:addLang "sa-alalc97" ;
text:searchFor ( "sa-alalc97" "sa-x-slp1" "sa-x-iso" "sa-deva" "sa-iast" ) ;
text:auxIndex ( "sa-aux-roman2Ndia" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanSylAnalyzer ] ;
]
[ text:addLang "km-x-kmfemc" ;
text:searchFor ( "pi-x-iast" "pi-x-kmfemc" "km-x-kmfemc" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanLenientQueryAnalyzer
] ;
]
[ text:addLang "pi-x-iast" ;
text:searchFor ( "pi-x-iast" "pi-x-kmfemc" "km-x-kmfemc" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanLenientQueryAnalyzer
] ;
]
[ text:addLang "pi-x-kmfemc" ;
text:searchFor ( "pi-x-iast" "pi-x-kmfemc" "km-x-kmfemc" ) ;
text:analyzer [
a text:DefinedAnalyzer ;
text:useAnalyzer :romanLenientQueryAnalyzer
] ;
]
) ;
.
# Index mappings
:newcore_entmap a text:EntityMap ;
text:entityField "uri" ;
text:uidField "uid" ;
text:defaultField "label" ;
text:langField "lang" ;
text:graphField "graph" ; ## enable graph-specific indexing
text:map (
[ text:field "label" ;
text:predicate skos:prefLabel ]
[ text:field "altLabel" ;
text:predicate skos:altLabel ; ]
[ text:field "rdfsLabel" ;
text:predicate rdfs:label ; ]
[ text:field "chunkContents" ;
text:predicate bdo:chunkContents ; ]
[ text:field "eTextTitle" ;
text:predicate bdo:eTextTitle ; ]
[ text:field "logMessage" ;
text:predicate adm:logMessage ; ]
[ text:field "noteText" ;
text:predicate bdo:noteText ; ]
[ text:field "workAuthorshipStatement" ;
text:predicate bdo:workAuthorshipStatement ; ]
[ text:field "workBiblioNote" ;
text:predicate bdo:workBiblioNote ; ]
[ text:field "workCatalogInfo" ;
text:predicate bdo:workCatalogInfo ; ]
[ text:field "workColophon" ;
text:predicate bdo:workColophon ; ]
[ text:field "workEditionStatement" ;
text:predicate bdo:workEditionStatement ; ]
[ text:field "workPublisherLocation" ;
text:predicate bdo:workPublisherLocation ; ]
[ text:field "workPublisherName" ;
text:predicate bdo:workPublisherName ; ]
[ text:field "workSeriesName" ;
text:predicate bdo:workSeriesName ; ]
) ;
.