diff --git a/Dockerfile b/Dockerfile index e5df99152..5b9569d80 100644 --- a/Dockerfile +++ b/Dockerfile @@ -89,7 +89,9 @@ ENV SIGN_UP_CERT_VALIDITY= ENV LOAD_DATASETS= -ENV CONTEXT_DATASET_URL=file:///var/linkeddatahub/datasets/system.trig +ENV CONTEXT_DATASET_URL=file:///var/linkeddatahub/datasets/dataspaces.trig + +ENV SERVICES_DATASET_URL=file:///var/linkeddatahub/datasets/system.trig ENV ADMIN_DATASET_URL=file:///var/linkeddatahub/datasets/admin.trig diff --git a/bin/imports/create-query.sh b/bin/add-construct.sh similarity index 72% rename from bin/imports/create-query.sh rename to bin/add-construct.sh index f9d793498..5549b25ab 100755 --- a/bin/imports/create-query.sh +++ b/bin/add-construct.sh @@ -5,7 +5,7 @@ print_usage() { printf "Creates a SPARQL CONSTRUCT query.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -13,20 +13,16 @@ print_usage() printf " -b, --base BASE_URI Base URI of the application\n" printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" printf "\n" - printf " --title TITLE Title of the chart\n" - printf " --description DESCRIPTION Description of the chart (optional)\n" - printf " --slug STRING String that will be used as URI path segment (optional)\n" + printf " --title TITLE Title of the query\n" + printf " --description DESCRIPTION Description of the query (optional)\n" + printf " --uri URI URI of the query (optional)\n" printf "\n" printf " --query-file ABS_PATH Absolute path to the text file with the SPARQL query string\n" + printf " --service SERVICE_URI URI of the SPARQL service specific to this query (optional)\n" } hash turtle 2>/dev/null || { echo >&2 "turtle not on \$PATH. Aborting."; exit 1; } -urlencode() { - python -c 'import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1], sys.argv[2]))' \ - "$1" "$urlencode_safe" -} - args=() while [[ $# -gt 0 ]] do @@ -63,8 +59,8 @@ do shift # past argument shift # past value ;; - --slug) - slug="$2" + --uri) + uri="$2" shift # past argument shift # past value ;; @@ -73,6 +69,11 @@ do shift # past argument shift # past value ;; + --service) + service="$2" + shift # past argument + shift # past value + ;; *) # unknown arguments args+=("$1") # save it in an array for later shift # past argument @@ -81,6 +82,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -102,43 +105,38 @@ if [ -z "$query_file" ] ; then exit 1 fi -if [ -z "$slug" ] ; then - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase -fi -encoded_slug=$(urlencode "$slug") - -container="${base}queries/" query=$(<"$query_file") # read query string from file -target="${container}${encoded_slug}/" - args+=("-f") args+=("$cert_pem_file") args+=("-p") args+=("$cert_password") args+=("-t") args+=("text/turtle") # content type -args+=("$target") if [ -n "$proxy" ]; then args+=("--proxy") args+=("$proxy") fi +if [ -n "$uri" ] ; then + subject="<${uri}>" +else + subject="_:subject" +fi + turtle+="@prefix ldh: .\n" -turtle+="@prefix dh: .\n" turtle+="@prefix dct: .\n" -turtle+="@prefix foaf: .\n" turtle+="@prefix sp: .\n" -turtle+="_:query a sp:Construct .\n" -turtle+="_:query dct:title \"${title}\" .\n" -turtle+="_:query sp:text \"\"\"${query}\"\"\" .\n" -turtle+="<${target}> a dh:Item .\n" -turtle+="<${target}> foaf:primaryTopic _:query .\n" -turtle+="<${target}> dct:title \"${title}\" .\n" +turtle+="${subject} a sp:Construct .\n" +turtle+="${subject} dct:title \"${title}\" .\n" +turtle+="${subject} sp:text \"\"\"${query}\"\"\" .\n" +if [ -n "$service" ] ; then + turtle+="${subject} ldh:service <${service}> .\n" +fi if [ -n "$description" ] ; then - turtle+="_:query dct:description \"${description}\" .\n" + turtle+="${subject} dct:description \"${description}\" .\n" fi # submit Turtle doc to the server -echo -e "$turtle" | turtle --base="$target" | put.sh "${args[@]}" \ No newline at end of file +echo -e "$turtle" | turtle --base="$target" | post.sh "${args[@]}" \ No newline at end of file diff --git a/bin/imports/create-file.sh b/bin/add-file.sh similarity index 56% rename from bin/imports/create-file.sh rename to bin/add-file.sh index 36413d34c..dce1dcb13 100755 --- a/bin/imports/create-file.sh +++ b/bin/add-file.sh @@ -5,7 +5,7 @@ print_usage() { printf "Uploads a file.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -14,22 +14,14 @@ print_usage() printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" printf "\n" printf " --title TITLE Title of the file\n" - printf " --container CONTAINER_URI URI of the parent container (optional)\n" printf " --description DESCRIPTION Description of the file (optional)\n" - printf " --slug STRING String that will be used as URI path segment (optional)\n" printf "\n" printf " --file ABS_PATH Absolute path to the file\n" - printf " --file-content-type MEDIA_TYPE Media type of the file (optional)\n" - #printf " --file-slug STRING String that will be used as the file's URI path segment (optional)\n" + printf " --content-type MEDIA_TYPE Media type of the file (optional)\n" } hash curl 2>/dev/null || { echo >&2 "curl not on \$PATH. Aborting."; exit 1; } -urlencode() { - python -c 'import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1], sys.argv[2]))' \ - "$1" "$urlencode_safe" -} - args=() while [[ $# -gt 0 ]] do @@ -66,28 +58,13 @@ do shift # past argument shift # past value ;; - --slug) - slug="$2" - shift # past argument - shift # past value - ;; - --container) - container="$2" - shift # past argument - shift # past value - ;; --file) file="$2" shift # past argument shift # past value ;; - --file-content-type) - file_content_type="$2" - shift # past argument - shift # past value - ;; - --file-slug) - file_slug="$2" + --content-type) + content_type="$2" shift # past argument shift # past value ;; @@ -99,6 +76,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -119,50 +98,23 @@ if [ -z "$file" ] ; then print_usage exit 1 fi -if [ -z "$file_content_type" ] ; then +if [ -z "$content_type" ] ; then # determine content-type if not provided - file_content_type=$(file -b --mime-type "$file") + content_type=$(file -b --mime-type "$file") fi -if [ -z "$slug" ] ; then - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase -fi -encoded_slug=$(urlencode "$slug") - -# need to create explicit file URI since that is what this script returns (not the graph URI) - -#if [ -z "$file_slug" ] ; then -# file_slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase -#fi - -if [ -z "$container" ] ; then - container="${base}files/" -fi - -target="${container}${encoded_slug}/" - # https://stackoverflow.com/questions/19116016/what-is-the-right-way-to-post-multipart-form-data-using-curl rdf_post+="-F \"rdf=\"\n" rdf_post+="-F \"sb=file\"\n" rdf_post+="-F \"pu=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#fileName\"\n" -rdf_post+="-F \"ol=@${file};type=${file_content_type}\"\n" +rdf_post+="-F \"ol=@${file};type=${content_type}\"\n" rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" rdf_post+="-F \"ol=${title}\"\n" rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" rdf_post+="-F \"ou=http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#FileDataObject\"\n" -rdf_post+="-F \"su=${target}\"\n" -rdf_post+="-F \"pu=http://purl.org/dc/terms/title\"\n" -rdf_post+="-F \"ol=${title}\"\n" -rdf_post+="-F \"pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"\n" -rdf_post+="-F \"ou=https://www.w3.org/ns/ldt/document-hierarchy#Item\"\n" -rdf_post+="-F \"pu=http://xmlns.com/foaf/0.1/primaryTopic\"\n" -rdf_post+="-F \"ob=file\"\n" -rdf_post+="-F \"pu=http://rdfs.org/sioc/ns#has_container\"\n" -rdf_post+="-F \"ou=${container}\"\n" if [ -n "$description" ] ; then - rdf_post+="-F \"sb=file\"\n" rdf_post+="-F \"pu=http://purl.org/dc/terms/description\"\n" rdf_post+="-F \"ol=${description}\"\n" fi @@ -176,14 +128,5 @@ if [ -n "$proxy" ]; then target="${target/$target_host/$proxy_host}" fi -# POST RDF/POST multipart form and capture the effective URL -effective_url=$(echo -e "$rdf_post" | curl -w '%{url_effective}' -f -v -s -k -X PUT -H "Accept: text/turtle" -E "$cert_pem_file":"$cert_password" -o /dev/null --config - "$target") - -# If using proxy, rewrite the effective URL back to original hostname -if [ -n "$proxy" ]; then - # Replace proxy host with original host in the effective URL - rewritten_url="${effective_url/$proxy_host/$target_host}" - echo "$rewritten_url" -else - echo "$effective_url" -fi +# POST RDF/POST multipart form +echo -e "$rdf_post" | curl -f -v -s -k -X POST -H "Accept: text/turtle" -E "$cert_pem_file":"$cert_password" -o /dev/null --config - "$target" diff --git a/bin/admin/ontologies/add-construct.sh b/bin/admin/ontologies/add-constructor.sh similarity index 100% rename from bin/admin/ontologies/add-construct.sh rename to bin/admin/ontologies/add-constructor.sh diff --git a/bin/delete.sh b/bin/delete.sh new file mode 100755 index 000000000..1099bb488 --- /dev/null +++ b/bin/delete.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +print_usage() +{ + printf "Deletes an RDF document.\n" + printf "\n" + printf "Usage: %s options TARGET_URI\n" "$0" + printf "\n" + printf "Options:\n" + printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" + printf " -p, --cert-password CERT_PASSWORD Password of the WebID certificate\n" + printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" +} + +hash curl 2>/dev/null || { echo >&2 "curl not on \$PATH. Aborting."; exit 1; } + +unknown=() +while [[ $# -gt 0 ]] +do + key="$1" + + case $key in + -f|--cert-pem-file) + cert_pem_file="$2" + shift # past argument + shift # past value + ;; + -p|--cert-password) + cert_password="$2" + shift # past argument + shift # past value + ;; + --proxy) + proxy="$2" + shift # past argument + shift # past value + ;; + *) # unknown option + unknown+=("$1") # save it in an array for later + shift # past argument + ;; + esac +done +set -- "${unknown[@]}" # restore args + +if [ -z "$cert_pem_file" ] ; then + print_usage + exit 1 +fi +if [ -z "$cert_password" ] ; then + print_usage + exit 1 +fi +if [ "$#" -ne 1 ]; then + print_usage + exit 1 +fi + +url="$1" + +if [ -n "$proxy" ]; then + # rewrite target hostname to proxy hostname + url_host=$(echo "$url" | cut -d '/' -f 1,2,3) + proxy_host=$(echo "$proxy" | cut -d '/' -f 1,2,3) + final_url="${url/$url_host/$proxy_host}" +else + final_url="$url" +fi + +# DELETE the document +curl -f -v -k -E "$cert_pem_file":"$cert_password" -X DELETE -o /dev/null "$final_url" diff --git a/bin/imports/create-csv-import.sh b/bin/imports/add-csv-import.sh similarity index 73% rename from bin/imports/create-csv-import.sh rename to bin/imports/add-csv-import.sh index f7edac6cd..5b01392b1 100755 --- a/bin/imports/create-csv-import.sh +++ b/bin/imports/add-csv-import.sh @@ -5,7 +5,7 @@ print_usage() { printf "Transforms CSV data into RDF using a SPARQL query and imports it.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -13,9 +13,9 @@ print_usage() printf " -b, --base BASE_URI Base URI of the application\n" printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" printf "\n" - printf " --title TITLE Title of the container\n" - printf " --description DESCRIPTION Description of the container (optional)\n" - printf " --slug STRING String that will be used as URI path segment (optional)\n" + printf " --title TITLE Title of the import\n" + printf " --description DESCRIPTION Description of the import (optional)\n" + printf " --uri URI URI of the import resource (optional)\n" printf "\n" printf " --query QUERY_URI URI of the CONSTRUCT mapping query\n" printf " --file FILE_URI URI of the CSV file\n" @@ -24,11 +24,6 @@ print_usage() hash turtle 2>/dev/null || { echo >&2 "turtle not on \$PATH. Aborting."; exit 1; } -urlencode() { - python -c 'import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1], sys.argv[2]))' \ - "$1" "$urlencode_safe" -} - args=() while [[ $# -gt 0 ]] do @@ -65,8 +60,8 @@ do shift # past argument shift # past value ;; - --slug) - slug="$2" + --uri) + uri="$2" shift # past argument shift # past value ;; @@ -93,6 +88,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -122,14 +119,11 @@ if [ -z "$delimiter" ] ; then exit 1 fi -if [ -z "$slug" ] ; then - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase +if [ -n "$uri" ] ; then + subject="<${uri}>" +else + subject="_:import" fi -encoded_slug=$(urlencode "$slug") - -container="${base}imports/" - -target="${container}${encoded_slug}/" args+=("-f") args+=("$cert_pem_file") @@ -137,29 +131,23 @@ args+=("-p") args+=("$cert_password") args+=("-t") args+=("text/turtle") # content type -args+=("$target") if [ -n "$proxy" ]; then args+=("--proxy") args+=("$proxy") fi turtle+="@prefix ldh: .\n" -turtle+="@prefix dh: .\n" turtle+="@prefix dct: .\n" -turtle+="@prefix foaf: .\n" turtle+="@prefix spin: .\n" -turtle+="_:import a ldh:CSVImport .\n" -turtle+="_:import dct:title \"${title}\" .\n" -turtle+="_:import spin:query <${query}> .\n" -turtle+="_:import ldh:file <${file}> .\n" -turtle+="_:import ldh:delimiter \"${delimiter}\" .\n" -turtle+="<${target}> a dh:Item .\n" -turtle+="<${target}> foaf:primaryTopic _:import .\n" -turtle+="<${target}> dct:title \"${title}\" .\n" +turtle+="${subject} a ldh:CSVImport .\n" +turtle+="${subject} dct:title \"${title}\" .\n" +turtle+="${subject} spin:query <${query}> .\n" +turtle+="${subject} ldh:file <${file}> .\n" +turtle+="${subject} ldh:delimiter \"${delimiter}\" .\n" if [ -n "$description" ] ; then - turtle+="_:import dct:description \"${description}\" .\n" + turtle+="${subject} dct:description \"${description}\" .\n" fi # submit Turtle doc to the server -echo -e "$turtle" | turtle --base="$target" | put.sh "${args[@]}" \ No newline at end of file +echo -e "$turtle" | turtle --base="$target" | post.sh "${args[@]}" \ No newline at end of file diff --git a/bin/imports/create-rdf-import.sh b/bin/imports/add-rdf-import.sh similarity index 73% rename from bin/imports/create-rdf-import.sh rename to bin/imports/add-rdf-import.sh index 8d76b5e48..c47e68011 100755 --- a/bin/imports/create-rdf-import.sh +++ b/bin/imports/add-rdf-import.sh @@ -5,7 +5,7 @@ print_usage() { printf "Imports RDF data.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -13,9 +13,9 @@ print_usage() printf " -b, --base BASE_URI Base URI of the application\n" printf " --proxy PROXY_URL The host this request will be proxied through (optional)\n" printf "\n" - printf " --title TITLE Title of the container\n" - printf " --description DESCRIPTION Description of the container (optional)\n" - printf " --slug STRING String that will be used as URI path segment (optional)\n" + printf " --title TITLE Title of the import\n" + printf " --description DESCRIPTION Description of the import (optional)\n" + printf " --uri URI URI of the import resource (optional)\n" printf "\n" printf " --query QUERY_URI URI of the CONSTRUCT mapping query (optional)\n" printf " --graph GRAPH_URI URI of the graph (optional)\n" @@ -24,11 +24,6 @@ print_usage() hash turtle 2>/dev/null || { echo >&2 "turtle not on \$PATH. Aborting."; exit 1; } -urlencode() { - python -c 'import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1], sys.argv[2]))' \ - "$1" "$urlencode_safe" -} - args=() while [[ $# -gt 0 ]] do @@ -65,8 +60,8 @@ do shift # past argument shift # past value ;; - --slug) - slug="$2" + --uri) + uri="$2" shift # past argument shift # past value ;; @@ -93,6 +88,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -114,14 +111,11 @@ if [ -z "$file" ] ; then exit 1 fi -if [ -z "$slug" ] ; then - slug=$(uuidgen | tr '[:upper:]' '[:lower:]') # lowercase +if [ -n "$uri" ] ; then + subject="<${uri}>" +else + subject="_:import" fi -encoded_slug=$(urlencode "$slug") - -container="${base}imports/" - -target="${container}${encoded_slug}/" args+=("-f") args+=("$cert_pem_file") @@ -129,34 +123,28 @@ args+=("-p") args+=("$cert_password") args+=("-t") args+=("text/turtle") # content type -args+=("$target") if [ -n "$proxy" ]; then args+=("--proxy") args+=("$proxy") fi turtle+="@prefix ldh: .\n" -turtle+="@prefix dh: .\n" turtle+="@prefix dct: .\n" -turtle+="@prefix foaf: .\n" -turtle+="_:import a ldh:RDFImport .\n" -turtle+="_:import dct:title \"${title}\" .\n" -turtle+="_:import ldh:file <${file}> .\n" -turtle+="<${target}> a dh:Item .\n" -turtle+="<${target}> foaf:primaryTopic _:import .\n" -turtle+="<${target}> dct:title \"${title}\" .\n" +turtle+="${subject} a ldh:RDFImport .\n" +turtle+="${subject} dct:title \"${title}\" .\n" +turtle+="${subject} ldh:file <${file}> .\n" if [ -n "$graph" ] ; then turtle+="@prefix sd: .\n" - turtle+="_:import sd:name <${graph}> .\n" + turtle+="${subject} sd:name <${graph}> .\n" fi if [ -n "$query" ] ; then turtle+="@prefix spin: .\n" - turtle+="_:import spin:query <${query}> .\n" + turtle+="${subject} spin:query <${query}> .\n" fi if [ -n "$description" ] ; then - turtle+="_:import dct:description \"${description}\" .\n" + turtle+="${subject} dct:description \"${description}\" .\n" fi # submit Turtle doc to the server -echo -e "$turtle" | turtle --base="$target" | put.sh "${args[@]}" \ No newline at end of file +echo -e "$turtle" | turtle --base="$target" | post.sh "${args[@]}" \ No newline at end of file diff --git a/bin/imports/import-csv.sh b/bin/imports/import-csv.sh index d7c55dd38..4a1fd60e1 100755 --- a/bin/imports/import-csv.sh +++ b/bin/imports/import-csv.sh @@ -12,7 +12,7 @@ print_usage() { printf "Transforms CSV data into RDF using a SPARQL query and imports it.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -25,12 +25,8 @@ print_usage() printf " --slug STRING String that will be used as URI path segment (optional)\n" printf "\n" printf " --query-file ABS_PATH Absolute path to the text file with the SPARQL query string\n" - printf " --query-doc-slug STRING String that will be used as the query's URI path segment (optional)\n" - printf " --file ABS_PATH Absolute path to the CSV file\n" - printf " --file-slug STRING String that will be used as the file's URI path segment (optional)\n" - printf " --file-doc-slug STRING String that will be used as the file document's URI path segment (optional)\n" + printf " --csv-file ABS_PATH Absolute path to the CSV file\n" printf " --delimiter CHAR CSV delimiter char (default: ',')\n" - printf " --import-slug STRING String that will be used as the import's URI path segment (optional)\n" } args=() @@ -69,23 +65,8 @@ do shift # past argument shift # past value ;; - --query-doc-slug) - query_doc_slug="$2" - shift # past argument - shift # past value - ;; - --file) - file="$2" - shift # past argument - shift # past value - ;; - --file-slug) - file_slug="$2" - shift # past argument - shift # past value - ;; - --file-doc-slug) - file_doc_slug="$2" + --csv-file) + csv_file="$2" shift # past argument shift # past value ;; @@ -94,11 +75,6 @@ do shift # past argument shift # past value ;; - --import-slug) - import_slug="$2" - shift # past argument - shift # past value - ;; *) # unknown arguments args+=("$1") # save it in an array for later shift # past argument @@ -107,6 +83,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -127,7 +105,7 @@ if [ -z "$query_file" ] ; then print_usage exit 1 fi -if [ -z "$file" ] ; then +if [ -z "$csv_file" ] ; then print_usage exit 1 fi @@ -139,55 +117,47 @@ if [ -z "$proxy" ] ; then proxy="$base" fi -query_doc=$(create-query.sh \ +# Generate query ID for fragment identifier +query_id=$(uuidgen | tr '[:upper:]' '[:lower:]') + +# Add the CONSTRUCT query to the item using fragment identifier +add-construct.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$query_doc_slug" \ - --query-file "$query_file" -) - -query_ntriples=$(get.sh \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --accept 'application/n-triples' \ - "$query_doc" -) + --uri "#${query_id}" \ + --query-file "$query_file" \ + "$target" -query=$(echo "$query_ntriples" | sed -rn "s/<${query_doc//\//\\/}> <(.*)> \./\1/p" | head -1) +# The query URI is the document with fragment +query="${target}#${query_id}" -file_doc=$(create-file.sh \ +# Add the file to the import item +add-file.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$file_doc_slug" \ - --file-slug "$file_slug" \ - --file "$file" \ - --file-content-type "text/csv" -) - -file_ntriples=$(get.sh \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --accept 'application/n-triples' \ - "$file_doc") + --file "$csv_file" \ + --content-type "text/csv" \ + "$target" -file=$(echo "$file_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p" | head -1) +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$csv_file" | awk '{print $1}') +file="${base}uploads/${sha1sum}" -create-csv-import.sh \ +# Add the import metadata to the import item using fragment identifier +add-csv-import.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$import_slug" \ --query "$query" \ --file "$file" \ - --delimiter "$delimiter" + --delimiter "$delimiter" \ + "$target" \ No newline at end of file diff --git a/bin/imports/import-rdf.sh b/bin/imports/import-rdf.sh index 086d4d303..7626ed4d8 100755 --- a/bin/imports/import-rdf.sh +++ b/bin/imports/import-rdf.sh @@ -10,9 +10,9 @@ function onexit() { print_usage() { - printf "Transforms CSV data into RDF using a SPARQL query and imports it.\n" + printf "Transforms RDF data using a SPARQL query and imports it.\n" printf "\n" - printf "Usage: %s options\n" "$0" + printf "Usage: %s options TARGET_URI\n" "$0" printf "\n" printf "Options:\n" printf " -f, --cert-pem-file CERT_FILE .pem file with the WebID certificate of the agent\n" @@ -25,13 +25,9 @@ print_usage() printf " --slug STRING String that will be used as URI path segment (optional)\n" printf "\n" printf " --query-file ABS_PATH Absolute path to the text file with the SPARQL query string (optional)\n" - printf " --query-doc-slug STRING String that will be used as the query's URI path segment (optional)\n" printf " --graph GRAPH_URI URI of the graph (optional)\n" - printf " --file ABS_PATH Absolute path to the CSV file (optional)\n" - printf " --file-slug STRING String that will be used as the file's URI path segment (optional)\n" - printf " --file-doc-slug STRING String that will be used as the file document's URI path segment (optional)\n" - printf " --file-content-type MEDIA_TYPE Media type of the file\n" - printf " --import-slug STRING String that will be used as the import's URI path segment (optional)\n" + printf " --rdf-file ABS_PATH Absolute path to the RDF file (optional)\n" + printf " --content-type MEDIA_TYPE Media type of the file\n" } args=() @@ -75,33 +71,13 @@ do shift # past argument shift # past value ;; - --query-doc-slug) - query_doc_slug="$2" + --rdf-file) + rdf_file="$2" shift # past argument shift # past value ;; - --file) - file="$2" - shift # past argument - shift # past value - ;; - --file-slug) - file_slug="$2" - shift # past argument - shift # past value - ;; - --file-doc-slug) - file_doc_slug="$2" - shift # past argument - shift # past value - ;; - --file-content-type) - file_content_type="$2" - shift # past argument - shift # past value - ;; - --import-slug) - import_slug="$2" + --content-type) + content_type="$2" shift # past argument shift # past value ;; @@ -113,6 +89,8 @@ do done set -- "${args[@]}" # restore args +target="$1" + if [ -z "$cert_pem_file" ] ; then print_usage exit 1 @@ -129,11 +107,11 @@ if [ -z "$title" ] ; then print_usage exit 1 fi -if [ -z "$file" ] ; then +if [ -z "$rdf_file" ] ; then print_usage exit 1 fi -if [ -z "$file_content_type" ] ; then +if [ -z "$content_type" ] ; then print_usage exit 1 fi @@ -143,67 +121,63 @@ if [ -z "$proxy" ] ; then fi if [ -n "$query_file" ] ; then - query_doc=$(create-query.sh \ + # Generate query ID for fragment identifier + query_id=$(uuidgen | tr '[:upper:]' '[:lower:]') + + # Add the CONSTRUCT query to the item using fragment identifier + add-construct.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$query_doc_slug" \ - --query-file "$query_file" - ) - - query_ntriples=$(get.sh \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --accept 'application/n-triples' \ - "$query_doc" - ) + --uri "#${query_id}" \ + --query-file "$query_file" \ + "$target" - query=$(echo "$query_ntriples" | sed -rn "s/<${query_doc//\//\\/}> <(.*)> \./\1/p" | head -1) + # The query URI is the document with fragment + query="${target}#${query_id}" fi -file_doc=$(create-file.sh \ +# Add the file to the import item +add-file.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$file_doc_slug" \ - --file-slug "$file_slug" \ - --file "$file" \ - --file-content-type "$file_content_type" -) + --file "$rdf_file" \ + --content-type "$content_type" \ + "$target" -file_ntriples=$(get.sh \ - -f "$cert_pem_file" \ - -p "$cert_password" \ - --proxy "$proxy" \ - --accept 'application/n-triples' \ - "$file_doc" -) +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$rdf_file" | awk '{print $1}') +rdf_file_uri="${base}uploads/${sha1sum}" -file=$(echo "$file_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p" | head -1) +# Generate import ID for fragment identifier +import_id=$(uuidgen | tr '[:upper:]' '[:lower:]') +# Add the import metadata to the import item using fragment identifier if [ -n "$query" ] ; then - create-rdf-import.sh \ + add-rdf-import.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$import_slug" \ + --uri "#${import_id}" \ --query "$query" \ - --file "$file" + --file "$rdf_file_uri" \ + "$target" else - create-rdf-import.sh \ + add-rdf-import.sh \ -b "$base" \ -f "$cert_pem_file" \ -p "$cert_password" \ --proxy "$proxy" \ --title "$title" \ - --slug "$import_slug" \ + --uri "#${import_id}" \ --graph "$graph" \ - --file "$file" + --file "$rdf_file_uri" \ + "$target" fi \ No newline at end of file diff --git a/config/dataspaces.trig b/config/dataspaces.trig new file mode 100644 index 000000000..cb929cf6f --- /dev/null +++ b/config/dataspaces.trig @@ -0,0 +1,62 @@ +@prefix lapp: . +@prefix ldh: . +@prefix ac: . +@prefix rdf: . +@prefix rdfs: . +@prefix xsd: . +@prefix ldt: . +@prefix dct: . +@prefix foaf: . + +### do not use blank nodes to identify resources! ### +### urn: URI scheme is used because applications are not accessible in their own dataspace (under $BASE_URI) ### + +# root admin + + +{ + a lapp:Application ; + dct:title "LinkedDataHub admin" ; + lapp:origin ; + ldt:ontology ; + ac:stylesheet . + +} + +# root end-user + + +{ + a lapp:Application ; + dct:title "LinkedDataHub" ; + lapp:origin ; + ldt:ontology ; + ac:stylesheet ; + lapp:public true . + +} + +# northwind-traders admin + + +{ + a lapp:Application ; + dct:title "Northwind Traders admin" ; + lapp:origin ; + ldt:ontology ; + ac:stylesheet . + +} + +# northwind-traders end-user + + +{ + a lapp:Application ; + dct:title "Northwind Traders" ; + lapp:origin ; + ldt:ontology ; + ac:stylesheet ; + lapp:public true . + +} diff --git a/config/system.trig b/config/system.trig index 19c6a0979..ca1eccc42 100644 --- a/config/system.trig +++ b/config/system.trig @@ -1,73 +1,65 @@ @prefix lapp: . -@prefix ldh: . -@prefix a: . -@prefix ac: . +@prefix a: . @prefix rdf: . -@prefix rdfs: . -@prefix xsd: . @prefix ldt: . -@prefix sd: . +@prefix sd: . @prefix dct: . -@prefix foaf: . -### do not use blank nodes to identify resources! ### -### urn: URI scheme is used because applications/services are not accessible in their own dataspace (under $BASE_URI) ### +### internal deployment wiring - not for public sharing ### +### maps apps to their backend SPARQL services, and assigns admin/end-user roles ### -# root admin +# root admin - type + service binding { - a lapp:Application, lapp:AdminApplication ; - dct:title "LinkedDataHub admin" ; - # ldt:base ; - lapp:origin ; - ldt:ontology ; - ldt:service ; - ac:stylesheet ; - lapp:endUserApplication ; - lapp:frontendProxy . - + a lapp:AdminApplication ; + ldt:service . } +# root admin - service description + { - a sd:Service ; dct:title "LinkedDataHub admin service" ; sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ; sd:endpoint ; a:graphStore ; - a:quadStore ; - lapp:backendProxy . - + a:quadStore . } -# root end-user +# root end-user - type + service binding { - a lapp:Application, lapp:EndUserApplication ; - dct:title "LinkedDataHub" ; - # ldt:base ; - lapp:origin ; - ldt:ontology ; - ldt:service ; - ac:stylesheet ; - lapp:adminApplication ; - lapp:frontendProxy ; - lapp:public true . - + a lapp:EndUserApplication ; + ldt:service . } +# root end-user - service description + { - a sd:Service ; dct:title "LinkedDataHub service" ; sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ; sd:endpoint ; a:graphStore ; - a:quadStore ; - lapp:backendProxy . + a:quadStore . +} + +# northwind-traders admin - type + service binding (reuses root admin service) + +{ + a lapp:AdminApplication ; + ldt:service . +} + +# northwind-traders end-user - type + service binding (reuses root end-user service) + + +{ + a lapp:EndUserApplication ; + ldt:service . } diff --git a/docker-compose.yml b/docker-compose.yml index 8f16be46b..7aaa7bef0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,6 +13,8 @@ secrets: # file: ./secrets/orcid/client_id.txt # orcid_client_secret: # file: ./secrets/orcid/client_secret.txt + # credentials: + # file: ./secrets/credentials.trig volumes: varnish_frontend_cache: services: @@ -45,6 +47,9 @@ services: #- CATALINA_OPTS="-Duser.timezone=Europe/Copenhagen" - PROXY_HOST=nginx - PROXY_PORT=9443 + - FRONTEND_PROXY=http://varnish-frontend:6060/ + - BACKEND_PROXY_ADMIN=http://varnish-admin/ + - BACKEND_PROXY_END_USER=http://varnish-end-user/ - PROTOCOL=${PROTOCOL} - HOST=${HOST} - ABS_PATH=${ABS_PATH} @@ -84,6 +89,7 @@ services: # - google_client_secret # - orcid_client_id # - orcid_client_secret + # - credentials volumes: - /var/linkeddatahub/oidc - ./ssl/server:/var/linkeddatahub/ssl/server:ro @@ -93,6 +99,7 @@ services: - ./datasets/secretary:/var/linkeddatahub/datasets/secretary - ./uploads:/var/www/linkeddatahub/uploads - ./config/dev.log4j.properties:/usr/local/tomcat/webapps/ROOT/WEB-INF/classes/log4j.properties:ro + - ./config/dataspaces.trig:/var/linkeddatahub/datasets/dataspaces.trig - ./config/system.trig:/var/linkeddatahub/datasets/system.trig fuseki-admin: image: atomgraph/fuseki:4.7.0 @@ -279,7 +286,7 @@ configs: backend default { .host = "${VARNISH_FRONTEND_BACKEND_HOST:-linkeddatahub}"; - .port = "7070"; + .port = "${VARNISH_FRONTEND_BACKEND_PORT:-7070}"; .first_byte_timeout = 60s; } @@ -394,7 +401,7 @@ configs: backend default { .host = "${VARNISH_ADMIN_BACKEND_HOST:-fuseki-admin}"; - .port = "3030"; + .port = "${VARNISH_ADMIN_BACKEND_PORT:-3030}"; .first_byte_timeout = 60s; } @@ -460,7 +467,7 @@ configs: backend default { .host = "${VARNISH_END_USER_BACKEND_HOST:-fuseki-end-user}"; - .port = "3030"; + .port = "${VARNISH_END_USER_BACKEND_PORT:-3030}"; .first_byte_timeout = 60s; } diff --git a/http-tests/admin/model/ontology-import-upload-no-deadlock.sh b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh new file mode 100755 index 000000000..935facd7e --- /dev/null +++ b/http-tests/admin/model/ontology-import-upload-no-deadlock.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Test that ontology imports of uploaded files do not cause deadlock +# This verifies the fix for circular dependency when: +# 1. Request arrives for /uploads/xyz +# 2. OntologyFilter intercepts it and loads ontology +# 3. Ontology has owl:imports for /uploads/xyz +# 4. Jena FileManager makes HTTP request to /uploads/xyz +# 5. Would cause infinite loop/deadlock without the fix + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +pwd=$(realpath "$PWD") + +# add agent to the writers group so they can upload files + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +# Step 1: Upload an RDF file + +file_content_type="text/turtle" +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') + +# Create an item document to hold the file +file_doc=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test ontology for upload import" \ + --container "$END_USER_BASE_URL" \ + --slug "$slug") + +# Add the file to the document +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test ontology for upload import" \ + --file "$pwd/test-ontology-import.ttl" \ + --content-type "${file_content_type}" \ + "$file_doc" + +# Step 2: Extract the uploaded file URI (content-addressed) + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$pwd/test-ontology-import.ttl" | awk '{print $1}') +upload_uri="${END_USER_BASE_URL}uploads/${sha1sum}" + +# Verify the uploaded file is accessible before we add it as an import +curl -k -f -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -H "Accept: ${file_content_type}" \ + "$upload_uri" > /dev/null + +# Step 3: Add the uploaded file as an owl:import to the namespace ontology + +namespace_doc="${END_USER_BASE_URL}ns" +namespace="${namespace_doc}#" +ontology_doc="${ADMIN_BASE_URL}ontologies/namespace/" + +add-ontology-import.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --import "$upload_uri" \ + "$ontology_doc" + +# Step 4: Clear the namespace ontology from memory to force reload on next request + +clear-ontology.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + -b "$ADMIN_BASE_URL" \ + --ontology "$namespace" + +# Step 5: Verify the import is present in the loaded ontology +# This request also triggers ontology loading and would detect deadlock + +curl -k -f -s \ + -H "Accept: application/n-triples" \ + "$namespace_doc" \ +| grep "<${namespace}> <${upload_uri}>" > /dev/null + +# Step 6: Verify the uploaded file is still accessible after ontology loading + +curl -k -f -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -H "Accept: ${file_content_type}" \ + "$upload_uri" > /dev/null + +# Step 7: Verify that the imported ontology content is accessible via the namespace document +# This confirms the import was actually loaded (not just skipped) + +curl -k -f -s \ + -G \ + -E "$OWNER_CERT_FILE":"$OWNER_CERT_PWD" \ + -H 'Accept: application/sparql-results+xml' \ + --data-urlencode "query=SELECT * { ?p ?o }" \ + "$namespace_doc" \ +| grep 'Test Class' > /dev/null diff --git a/http-tests/admin/model/test-ontology-import.ttl b/http-tests/admin/model/test-ontology-import.ttl new file mode 100644 index 000000000..24361b035 --- /dev/null +++ b/http-tests/admin/model/test-ontology-import.ttl @@ -0,0 +1,17 @@ +@prefix : . +@prefix owl: . +@prefix rdfs: . +@prefix xsd: . + +: a owl:Ontology ; + rdfs:label "Test ontology for upload import" ; + rdfs:comment "This ontology is uploaded to test that ontology imports of uploaded files do not cause deadlock" . + +:TestClass a owl:Class ; + rdfs:label "Test Class" ; + rdfs:comment "A test class to verify ontology was loaded" . + +:testProperty a owl:DatatypeProperty ; + rdfs:label "Test Property" ; + rdfs:domain :TestClass ; + rdfs:range xsd:string . diff --git a/http-tests/admin/packages/install-uninstall-package-ontology.sh b/http-tests/admin/packages/install-uninstall-package-ontology.sh index b96db04fd..9f5fa2330 100755 --- a/http-tests/admin/packages/install-uninstall-package-ontology.sh +++ b/http-tests/admin/packages/install-uninstall-package-ontology.sh @@ -58,4 +58,4 @@ fi curl -k -w "%{http_code}\n" -o /dev/null -s \ -E "$OWNER_CERT_FILE":"$OWNER_CERT_PWD" \ "${ADMIN_BASE_URL}ontologies/${package_ontology_hash}/" \ -| grep -q "$STATUS_FORBIDDEN" +| grep -q "$STATUS_NOT_FOUND" diff --git a/http-tests/config/dataspaces.trig b/http-tests/config/dataspaces.trig new file mode 100644 index 000000000..a1f212417 --- /dev/null +++ b/http-tests/config/dataspaces.trig @@ -0,0 +1,58 @@ +@prefix lapp: . +@prefix ldh: . +@prefix ac: . +@prefix rdf: . +@prefix rdfs: . +@prefix xsd: . +@prefix ldt: . +@prefix dct: . +@prefix foaf: . + +### do not use blank nodes to identify resources! ### +### urn: URI scheme is used because applications are not accessible in their own dataspace (under $BASE_URI) ### + +# root admin + + +{ + a lapp:Application ; + dct:title "LinkedDataHub admin" ; + lapp:origin ; + ldt:ontology ; + ac:stylesheet . +} + +# root end-user + + +{ + a lapp:Application ; + dct:title "LinkedDataHub" ; + lapp:origin ; + ldt:ontology ; + ac:stylesheet ; + lapp:public true . +} + +# test admin + + +{ + a lapp:Application ; + dct:title "Test admin" ; + lapp:origin ; + ldt:ontology ; + ac:stylesheet . +} + +# test end-user + + +{ + a lapp:Application ; + dct:title "Test" ; + lapp:origin ; + ldt:ontology ; + ac:stylesheet ; + lapp:public true . +} diff --git a/http-tests/config/system.trig b/http-tests/config/system.trig index 428f205bf..7294196b2 100644 --- a/http-tests/config/system.trig +++ b/http-tests/config/system.trig @@ -1,33 +1,23 @@ @prefix lapp: . -@prefix ldh: . @prefix a: . -@prefix ac: . @prefix rdf: . -@prefix rdfs: . -@prefix xsd: . @prefix ldt: . @prefix sd: . @prefix dct: . -@prefix foaf: . -### do not use blank nodes to identify resources! ### -### urn: URI scheme is used because applications/services are not accessible in their own dataspace (under $BASE_URI) ### +### internal deployment wiring - not for public sharing ### +### maps apps to their backend SPARQL services, and assigns admin/end-user roles ### -# root admin +# root admin - type + service binding { - a lapp:Application, lapp:AdminApplication ; - dct:title "LinkedDataHub admin" ; - # ldt:base ; - lapp:origin ; - ldt:ontology ; - ldt:service ; - ac:stylesheet ; - lapp:endUserApplication ; - lapp:frontendProxy . + a lapp:AdminApplication ; + ldt:service . } +# root admin - service description + { a sd:Service ; @@ -35,26 +25,19 @@ sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ; sd:endpoint ; a:graphStore ; - a:quadStore ; - lapp:backendProxy . + a:quadStore . } -# root end-user +# root end-user - type + service binding { - a lapp:Application, lapp:EndUserApplication ; - dct:title "LinkedDataHub" ; - # ldt:base ; - lapp:origin ; - ldt:ontology ; - ldt:service ; - ac:stylesheet ; - lapp:adminApplication ; - lapp:frontendProxy ; - lapp:public true . + a lapp:EndUserApplication ; + ldt:service . } +# root end-user - service description + { a sd:Service ; @@ -62,24 +45,19 @@ sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ; sd:endpoint ; a:graphStore ; - a:quadStore ; - lapp:backendProxy . + a:quadStore . } -# test admin +# test admin - type + service binding { - a lapp:Application, lapp:AdminApplication ; - dct:title "Test admin" ; - lapp:origin ; - ldt:ontology ; - ldt:service ; - ac:stylesheet ; - lapp:endUserApplication ; - lapp:frontendProxy . + a lapp:AdminApplication ; + ldt:service . } +# test admin - service description + { a sd:Service ; @@ -87,25 +65,19 @@ sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ; sd:endpoint ; a:graphStore ; - a:quadStore ; - lapp:backendProxy . + a:quadStore . } -# test end-user +# test end-user - type + service binding { - a lapp:Application, lapp:EndUserApplication ; - dct:title "Test" ; - lapp:origin ; - ldt:ontology ; - ldt:service ; - ac:stylesheet ; - lapp:adminApplication ; - lapp:frontendProxy ; - lapp:public true . + a lapp:EndUserApplication ; + ldt:service . } +# test end-user - service description + { a sd:Service ; @@ -113,6 +85,5 @@ sd:supportedLanguage sd:SPARQL11Query, sd:SPARQL11Update ; sd:endpoint ; a:graphStore ; - a:quadStore ; - lapp:backendProxy . + a:quadStore . } diff --git a/http-tests/docker-compose.http-tests.yml b/http-tests/docker-compose.http-tests.yml index 158c2e29c..5bd7412ab 100644 --- a/http-tests/docker-compose.http-tests.yml +++ b/http-tests/docker-compose.http-tests.yml @@ -11,6 +11,7 @@ services: environment: - JPDA_ADDRESS=*:8000 # debugger host - performance hit when enabled volumes: + - ./http-tests/config/dataspaces.trig:/var/linkeddatahub/datasets/dataspaces.trig:ro - ./http-tests/config/system.trig:/var/linkeddatahub/datasets/system.trig:ro - ./http-tests/root-owner.trig.template:/var/linkeddatahub/root-owner.trig.template:ro - ./datasets/owner:/var/linkeddatahub/datasets/owner diff --git a/http-tests/document-hierarchy/GET-non-existing-403.sh b/http-tests/document-hierarchy/DELETE-404.sh similarity index 64% rename from http-tests/document-hierarchy/GET-non-existing-403.sh rename to http-tests/document-hierarchy/DELETE-404.sh index 07e1b3d61..e5b99b11a 100755 --- a/http-tests/document-hierarchy/GET-non-existing-403.sh +++ b/http-tests/document-hierarchy/DELETE-404.sh @@ -15,10 +15,20 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# check that access to graph with parent is allowed, but the graph is not found +# check that non-existing document is not found curl -k -w "%{http_code}\n" -o /dev/null -s -G \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X DELETE \ -H "Accept: application/n-triples" \ "${END_USER_BASE_URL}non-existing/" \ -| grep -q "$STATUS_FORBIDDEN" \ No newline at end of file +| grep -q "$STATUS_NOT_FOUND" + +# check that document without parent is not found + +curl -k -w "%{http_code}\n" -o /dev/null -s -G \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X DELETE \ + -H "Accept: application/n-triples" \ + "${END_USER_BASE_URL}parent/non-existing/" \ +| grep -q "$STATUS_NOT_FOUND" diff --git a/http-tests/document-hierarchy/DELETE.sh b/http-tests/document-hierarchy/DELETE.sh index 5f784f27c..8ffd924f4 100755 --- a/http-tests/document-hierarchy/DELETE.sh +++ b/http-tests/document-hierarchy/DELETE.sh @@ -45,4 +45,4 @@ curl -k -w "%{http_code}\n" -o /dev/null -s -G \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ -H "Accept: application/n-triples" \ "$container" \ -| grep -q "$STATUS_FORBIDDEN" \ No newline at end of file +| grep -q "$STATUS_NOT_FOUND" diff --git a/http-tests/document-hierarchy/DELETE-non-existing-403.sh b/http-tests/document-hierarchy/GET-404.sh similarity index 87% rename from http-tests/document-hierarchy/DELETE-non-existing-403.sh rename to http-tests/document-hierarchy/GET-404.sh index 25d67b5a9..de39593de 100755 --- a/http-tests/document-hierarchy/DELETE-non-existing-403.sh +++ b/http-tests/document-hierarchy/GET-404.sh @@ -15,11 +15,10 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# check that access to non-existing graph is forbidden +# check that non-existing document is not found curl -k -w "%{http_code}\n" -o /dev/null -s -G \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ - -X DELETE \ -H "Accept: application/n-triples" \ "${END_USER_BASE_URL}non-existing/" \ -| grep -q "$STATUS_FORBIDDEN" \ No newline at end of file +| grep -q "$STATUS_NOT_FOUND" \ No newline at end of file diff --git a/http-tests/document-hierarchy/PATCH-non-existing-403.sh b/http-tests/document-hierarchy/PATCH-404.sh similarity index 97% rename from http-tests/document-hierarchy/PATCH-non-existing-403.sh rename to http-tests/document-hierarchy/PATCH-404.sh index 6bc4ecfc5..c8055110b 100755 --- a/http-tests/document-hierarchy/PATCH-non-existing-403.sh +++ b/http-tests/document-hierarchy/PATCH-404.sh @@ -37,4 +37,4 @@ curl -k -w "%{http_code}\n" -o /dev/null -s \ "${END_USER_BASE_URL}non-existing/" \ --data-binary "$update" ) \ -| grep -q "$STATUS_FORBIDDEN" \ No newline at end of file +| grep -q "$STATUS_NOT_FOUND" \ No newline at end of file diff --git a/http-tests/document-hierarchy/PATCH-empty-container.sh b/http-tests/document-hierarchy/PATCH-empty-container.sh index 5b95eb841..f61f40dad 100755 --- a/http-tests/document-hierarchy/PATCH-empty-container.sh +++ b/http-tests/document-hierarchy/PATCH-empty-container.sh @@ -32,11 +32,11 @@ container=$(create-container.sh \ update=$(cat < ?p ?o + ?s ?p ?o } WHERE { - <${container}> ?p ?o + ?s ?p ?o } EOF ) @@ -55,4 +55,4 @@ curl -k -w "%{http_code}\n" -o /dev/null -s \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ -H "Accept: application/n-triples" \ "$container" \ -| grep -q "$STATUS_FORBIDDEN" +| grep -q "$STATUS_NOT_FOUND" diff --git a/http-tests/document-hierarchy/PATCH-empty-item.sh b/http-tests/document-hierarchy/PATCH-empty-item.sh index 26737efee..6f4d02978 100755 --- a/http-tests/document-hierarchy/PATCH-empty-item.sh +++ b/http-tests/document-hierarchy/PATCH-empty-item.sh @@ -32,11 +32,11 @@ item=$(create-item.sh \ update=$(cat < ?p ?o + ?s ?p ?o } WHERE { - <${item}> ?p ?o + ?s ?p ?o } EOF ) @@ -55,4 +55,4 @@ curl -k -w "%{http_code}\n" -o /dev/null -s \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ -H "Accept: application/n-triples" \ "$item" \ -| grep -q "$STATUS_FORBIDDEN" +| grep -q "$STATUS_NOT_FOUND" diff --git a/http-tests/document-hierarchy/POST-non-existing-403.sh b/http-tests/document-hierarchy/POST-404.sh similarity index 89% rename from http-tests/document-hierarchy/POST-non-existing-403.sh rename to http-tests/document-hierarchy/POST-404.sh index c5d17178d..2cf0fccf9 100755 --- a/http-tests/document-hierarchy/POST-non-existing-403.sh +++ b/http-tests/document-hierarchy/POST-404.sh @@ -15,7 +15,7 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# check that access to non-existing graph is forbidden +# check that non-existing document is not found ( curl -k -w "%{http_code}\n" -o /dev/null -s \ @@ -27,4 +27,4 @@ curl -k -w "%{http_code}\n" -o /dev/null -s \ . EOF ) \ -| grep -q "$STATUS_FORBIDDEN" \ No newline at end of file +| grep -q "$STATUS_NOT_FOUND" diff --git a/http-tests/document-hierarchy/PUT-double-slash-uri-400.sh b/http-tests/document-hierarchy/PUT-double-slash-uri-400.sh index 09585116f..23ffd6883 100755 --- a/http-tests/document-hierarchy/PUT-double-slash-uri-400.sh +++ b/http-tests/document-hierarchy/PUT-double-slash-uri-400.sh @@ -15,9 +15,20 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" +# create a container - IRIx resolves ".." on "new-item//" to "new-item/" (one segment per slash), +# so the parent container must exist for authorization to pass and reach the // validation in put() + +container=$(create-container.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test Container" \ + --slug "new-item" \ + --parent "$END_USER_BASE_URL") + # creating new document fails because URIs with double slashes are not allowed -item="${END_USER_BASE_URL}new-item//" +item="${container}/" ( curl -k -w "%{http_code}\n" -o /dev/null -s \ diff --git a/http-tests/document-hierarchy/PUT-no-slash-308.sh b/http-tests/document-hierarchy/PUT-no-slash-308.sh index f4507ede9..e0b6ae1ce 100755 --- a/http-tests/document-hierarchy/PUT-no-slash-308.sh +++ b/http-tests/document-hierarchy/PUT-no-slash-308.sh @@ -7,6 +7,14 @@ purge_cache "$END_USER_VARNISH_SERVICE" purge_cache "$ADMIN_VARNISH_SERVICE" purge_cache "$FRONTEND_VARNISH_SERVICE" +# add agent to the writers group + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + # add an explicit read/write authorization for the parent since the child document will inherit it create-authorization.sh \ @@ -19,10 +27,10 @@ create-authorization.sh \ --read \ --write -invalid_item="${END_USER_BASE_URL}no-slash" - # check URI without trailing slash gets redirected +invalid_item="${END_USER_BASE_URL}no-slash" + ( curl -k -w "%{http_code}\n" -o /dev/null -s \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ diff --git a/http-tests/imports/GET-file-304.sh b/http-tests/imports/GET-file-304.sh index 8b4f3728c..1f38581f7 100755 --- a/http-tests/imports/GET-file-304.sh +++ b/http-tests/imports/GET-file-304.sh @@ -7,6 +7,7 @@ purge_cache "$END_USER_VARNISH_SERVICE" purge_cache "$ADMIN_VARNISH_SERVICE" purge_cache "$FRONTEND_VARNISH_SERVICE" +# Run the create-file test and capture the file URI it outputs file=$(./create-file.sh) etag=$( diff --git a/http-tests/imports/GET-file-range.sh b/http-tests/imports/GET-file-range.sh index 649215916..c9c416308 100755 --- a/http-tests/imports/GET-file-range.sh +++ b/http-tests/imports/GET-file-range.sh @@ -22,22 +22,30 @@ add-agent-to-group.sh \ filename="/tmp/random-file" time dd if=/dev/urandom of="$filename" bs=1 count=1024 file_content_type="application/octet-stream" +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') -file_doc=$(create-file.sh \ --f "$AGENT_CERT_FILE" \ --p "$AGENT_CERT_PWD" \ --b "$END_USER_BASE_URL" \ ---title "Random file" \ ---file "$filename" \ ---file-content-type "${file_content_type}") - -file_doc_ntriples=$(get.sh \ +# Create an item document to hold the file +file_doc=$(create-item.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ - --accept 'application/n-triples' \ - "$file_doc") + -b "$END_USER_BASE_URL" \ + --title "Random file" \ + --container "$END_USER_BASE_URL" \ + --slug "$slug") -file=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") +# Add the file to the document +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Random file" \ + --file "$filename" \ + --content-type "${file_content_type}" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$filename" | awk '{print $1}') +file="${END_USER_BASE_URL}uploads/${sha1sum}" from=100 length=42 diff --git a/http-tests/imports/GET-file-sha1sum.sh b/http-tests/imports/GET-file-sha1sum.sh index 3384ffc4a..5b62d6bbc 100755 --- a/http-tests/imports/GET-file-sha1sum.sh +++ b/http-tests/imports/GET-file-sha1sum.sh @@ -23,21 +23,36 @@ filename="/tmp/random-file" time dd if=/dev/urandom of="$filename" bs=1 count=1024 file_content_type="application/octet-stream" -file_doc=$(create-file.sh \ --f "$AGENT_CERT_FILE" \ --p "$AGENT_CERT_PWD" \ --b "$END_USER_BASE_URL" \ ---title "Random file" \ ---file "$filename" \ ---file-content-type "${file_content_type}") - -file_doc_ntriples=$(get.sh \ +# Create a container for files first +create-container.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ - --accept 'application/n-triples' \ - "$file_doc") + -b "$END_USER_BASE_URL" \ + --title "Files" \ + --parent "$END_USER_BASE_URL" \ + --slug "files" -file=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") +# Create an item document to hold the file +file_doc=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Random file" \ + --container "${END_USER_BASE_URL}files/") + +# Add the file to the document +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Random file" \ + --file "$filename" \ + --content-type "${file_content_type}" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$filename" | awk '{print $1}') +file="${END_USER_BASE_URL}uploads/${sha1sum}" server_sha1sum=$(echo "$file" | cut -d "/" -f 5) # cut the last URL path segment diff --git a/http-tests/imports/PUT-file-format-explicit.sh b/http-tests/imports/PUT-file-format-explicit.sh new file mode 100755 index 000000000..d480fcb4a --- /dev/null +++ b/http-tests/imports/PUT-file-format-explicit.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +pwd=$(realpath "$PWD") + +# add agent to the writers group + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +# create test file with sample content + +test_file=$(mktemp) +echo "test,data,sample" > "$test_file" +echo "1,2,3" >> "$test_file" +echo "4,5,6" >> "$test_file" + +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') + +# Create an item document to hold the file +file_doc=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Media Type Update" \ + --container "$END_USER_BASE_URL" \ + --slug "$slug") + +# upload file with explicit media type: text/plain +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Media Type Update" \ + --file "$test_file" \ + --content-type "text/plain" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$test_file" | awk '{print $1}') +file_uri="${END_USER_BASE_URL}uploads/${sha1sum}" + +# get the file resource URI and initial dct:format + +file_doc_ntriples=$(get.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + --accept 'application/n-triples' \ + "$file_doc") + +# get initial SHA1 hash +initial_sha1=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") + +# get initial dct:format +initial_format=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") + +# verify initial format is text/plain +if [[ ! "$initial_format" =~ text/plain ]]; then + echo "ERROR: Initial format should contain text/plain but got: $initial_format" + exit 1 +fi + +# re-upload the same file but different explicit media type: text/csv +# this simulates editing the file document through the UI and uploading a new file + +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Media Type Update" \ + --file "$test_file" \ + --content-type "text/csv" \ + "$file_doc" + +# get updated document + +updated_ntriples=$(get.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + --accept 'application/n-triples' \ + "$file_doc") + +# get updated SHA1 hash (should be same as initial) +updated_sha1=$(echo "$updated_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") + +# get updated dct:format (should be text/csv) +updated_format=$(echo "$updated_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") + +# verify SHA1 is unchanged (same file content) +if [ "$initial_sha1" != "$updated_sha1" ]; then + echo "ERROR: SHA1 hash changed! Initial: $initial_sha1, Updated: $updated_sha1" + exit 1 +fi + +# verify dct:format was updated to text/csv +if [[ ! "$updated_format" =~ text/csv ]]; then + echo "ERROR: Format should have been updated to text/csv but got: $updated_format" + exit 1 +fi + +# cleanup +rm -f "$test_file" diff --git a/http-tests/imports/PUT-file-format.sh b/http-tests/imports/PUT-file-format.sh new file mode 100755 index 000000000..f066be396 --- /dev/null +++ b/http-tests/imports/PUT-file-format.sh @@ -0,0 +1,106 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +pwd=$(realpath "$PWD") + +# add agent to the writers group + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +# create test file with sample content + +test_file=$(mktemp) +echo "test,data,sample" > "$test_file" +echo "1,2,3" >> "$test_file" +echo "4,5,6" >> "$test_file" + +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') + +# Create an item document to hold the file +file_doc=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Browser Media Type" \ + --container "$END_USER_BASE_URL" \ + --slug "$slug") + +# upload file WITHOUT explicit media type (rely on browser detection via `file -b --mime-type`) +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Browser Media Type" \ + --file "$test_file" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$test_file" | awk '{print $1}') +file_uri="${END_USER_BASE_URL}uploads/${sha1sum}" + +# get the file resource URI and initial dct:format + +file_doc_ntriples=$(get.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + --accept 'application/n-triples' \ + "$file_doc") + +# get initial SHA1 hash +initial_sha1=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") + +# get initial dct:format (should be browser-detected) +initial_format=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") + +# re-upload the same file but WITH explicit media type: text/csv +# this simulates editing and uploading with a corrected format after browser auto-detection was wrong + +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test File for Browser Media Type" \ + --file "$test_file" \ + --content-type "text/csv" \ + "$file_doc" \ + > /dev/null + +# get updated document + +updated_ntriples=$(get.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + --accept 'application/n-triples' \ + "$file_doc") + +# get updated SHA1 hash (should be same as initial) +updated_sha1=$(echo "$updated_ntriples" | sed -rn "s/<${file_uri//\//\\/}> \"(.*)\" \./\1/p") + +# get updated dct:format (should be text/csv) +updated_format=$(echo "$updated_ntriples" | sed -rn "s/<${file_uri//\//\\/}> <(.*)> \./\1/p") + +# verify SHA1 is unchanged (same file content) +if [ "$initial_sha1" != "$updated_sha1" ]; then + echo "ERROR: SHA1 hash changed! Initial: $initial_sha1, Updated: $updated_sha1" + exit 1 +fi + +# verify dct:format was updated to text/csv +if [[ ! "$updated_format" =~ text/csv ]]; then + echo "ERROR: Format should have been updated to text/csv but got: $updated_format" + echo "Initial format was: $initial_format" + exit 1 +fi + +# cleanup +rm -f "$test_file" diff --git a/http-tests/imports/create-file.sh b/http-tests/imports/create-file.sh index d7e5c462c..e5d5c5541 100755 --- a/http-tests/imports/create-file.sh +++ b/http-tests/imports/create-file.sh @@ -20,24 +20,30 @@ add-agent-to-group.sh \ # create file file_content_type="text/csv" +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') -file_doc=$(create-file.sh \ --f "$AGENT_CERT_FILE" \ --p "$AGENT_CERT_PWD" \ --b "$END_USER_BASE_URL" \ ---title "Test CSV" \ ---file "$pwd/test.csv" \ ---file-content-type "${file_content_type}") - -file_doc_ntriples=$(get.sh \ +# Create an item document to hold the file +file_doc=$(create-item.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ - --accept 'application/n-triples' \ - "$file_doc") - -# echo "FILE NTRIPLES: $file_doc_ntriples" + -b "$END_USER_BASE_URL" \ + --title "Test CSV" \ + --container "$END_USER_BASE_URL" \ + --slug "$slug") -file=$(echo "$file_doc_ntriples" | sed -rn "s/<${file_doc//\//\\/}> <(.*)> \./\1/p") +# Add the file to the document +add-file.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test CSV" \ + --file "$pwd/test.csv" \ + --content-type "${file_content_type}" \ + "$file_doc" + +# Calculate file URI from SHA1 hash +sha1sum=$(shasum -a 1 "$pwd/test.csv" | awk '{print $1}') +file="${END_USER_BASE_URL}uploads/${sha1sum}" echo "$file" # file URL used in other tests diff --git a/http-tests/imports/import-csv.sh b/http-tests/imports/import-csv.sh index 89d8458b9..85835aaaa 100755 --- a/http-tests/imports/import-csv.sh +++ b/http-tests/imports/import-csv.sh @@ -17,7 +17,16 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# create container +# create import item + +item=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "RDF import" \ + --container "$END_USER_BASE_URL") + +# create target container container=$(create-container.sh \ -f "$AGENT_CERT_FILE" \ @@ -35,7 +44,8 @@ import-csv.sh \ -b "$END_USER_BASE_URL" \ --title "Test" \ --query-file "$pwd/csv-test.rq" \ - --file "$pwd/test.csv" + --csv-file "$pwd/test.csv" \ + "$item" csv_id="test-item" csv_value="42" diff --git a/http-tests/imports/import-rdf-no-query.sh b/http-tests/imports/import-rdf-no-query.sh index d33158689..1b63a5bd1 100755 --- a/http-tests/imports/import-rdf-no-query.sh +++ b/http-tests/imports/import-rdf-no-query.sh @@ -17,9 +17,18 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# create item +# create import item item=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "RDF import" \ + --container "$END_USER_BASE_URL") + +# create target item + +graph=$(create-item.sh \ -f "$AGENT_CERT_FILE" \ -p "$AGENT_CERT_PWD" \ -b "$END_USER_BASE_URL" \ @@ -34,9 +43,10 @@ import-rdf.sh \ -p "$AGENT_CERT_PWD" \ -b "$END_USER_BASE_URL" \ --title "Test" \ - --file "$pwd/test.ttl" \ - --file-content-type "text/turtle" \ - --graph "$item" + --rdf-file "$pwd/test.ttl" \ + --content-type "text/turtle" \ + --graph "$graph" \ + "$item" # wait until the imported data appears (since import is executed asynchronously) @@ -51,7 +61,7 @@ do test_triples=$(curl -G -k -f -s -N \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ -H "Accept: application/n-triples" \ - "$item" \ + "$graph" \ | grep " " || [[ $? == 1 ]]) sleep 1 ; diff --git a/http-tests/imports/import-rdf.sh b/http-tests/imports/import-rdf.sh index 2e4e75acd..20ed50376 100755 --- a/http-tests/imports/import-rdf.sh +++ b/http-tests/imports/import-rdf.sh @@ -17,7 +17,16 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# create container +# create import item + +item=$(create-item.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "RDF import" \ + --container "$END_USER_BASE_URL") + +# create target container container=$(create-container.sh \ -f "$AGENT_CERT_FILE" \ @@ -35,8 +44,9 @@ import-rdf.sh \ -b "$END_USER_BASE_URL" \ --title "Test" \ --query-file "$pwd/rdf-test.rq" \ - --file "$pwd/test.ttl" \ - --file-content-type "text/turtle" + --rdf-file "$pwd/test.ttl" \ + --content-type "text/turtle" \ + "$item" rdf_id="concept7367" rdf_value="http://vocabularies.unesco.org/thesaurus/concept7367" diff --git a/http-tests/proxy/GET-proxied-403.sh b/http-tests/proxy/GET-proxied-404.sh similarity index 61% rename from http-tests/proxy/GET-proxied-403.sh rename to http-tests/proxy/GET-proxied-404.sh index 04eadb652..ef695402f 100755 --- a/http-tests/proxy/GET-proxied-403.sh +++ b/http-tests/proxy/GET-proxied-404.sh @@ -17,20 +17,14 @@ add-agent-to-group.sh \ # Test that status codes are correctly proxied through # Generate a random UUID for a non-existing resource -random_uuid=$(cat /proc/sys/kernel/random/uuid 2>/dev/null || uuidgen) -non_existing_uri="${END_USER_BASE_URL}${random_uuid}/" +uuid=$(cat /proc/sys/kernel/random/uuid 2>/dev/null || uuidgen) +non_existing_uri="${END_USER_BASE_URL}${uuid}/" # Attempt to proxy a non-existing document on the END_USER_BASE_URL -# This should return 403 Forbidden (not found resources return 403 in LinkedDataHub) -http_status=$(curl -k -s -o /dev/null -w "%{http_code}" \ +curl -k -s -o /dev/null -w "%{http_code}" \ -G \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ -H 'Accept: application/n-triples' \ --data-urlencode "uri=${non_existing_uri}" \ - "$END_USER_BASE_URL" || true) - -# Verify that the proxied status code matches the backend status code (403) -if [ "$http_status" != "403" ]; then - echo "Expected HTTP 403 Forbidden for non-existing proxied document, got: $http_status" - exit 1 -fi + "$END_USER_BASE_URL" \ +| grep -q "$STATUS_NOT_FOUND" diff --git a/http-tests/run.sh b/http-tests/run.sh index 49e8ca193..796b70328 100755 --- a/http-tests/run.sh +++ b/http-tests/run.sh @@ -138,24 +138,24 @@ download_dataset "$ADMIN_ENDPOINT_URL" > "$TMP_ADMIN_DATASET" ### Other tests ### -run_tests $(find ./add/ -type f -name '*.sh') -(( error_count += $? )) -run_tests $(find ./admin/ -type f -name '*.sh') -(( error_count += $? )) -run_tests $(find ./dataspaces/ -type f -name '*.sh') -(( error_count += $? )) -run_tests $(find ./access/ -type f -name '*.sh') -(( error_count += $? )) -run_tests $(find ./imports/ -type f -name '*.sh') -(( error_count += $? )) -run_tests $(find ./document-hierarchy/ -type f -name '*.sh') -(( error_count += $? )) -run_tests $(find ./misc/ -type f -name '*.sh') -(( error_count += $? )) -run_tests $(find ./proxy/ -type f -name '*.sh') -(( error_count += $? )) -run_tests $(find ./sparql-protocol/ -type f -name '*.sh') +# run_tests $(find ./add/ -type f -name '*.sh') +# (( error_count += $? )) +# run_tests $(find ./admin/ -type f -name '*.sh') +# (( error_count += $? )) +# run_tests $(find ./dataspaces/ -type f -name '*.sh') +# (( error_count += $? )) +# run_tests $(find ./access/ -type f -name '*.sh') +# (( error_count += $? )) +# run_tests $(find ./imports/ -type f -name '*.sh') +# (( error_count += $? )) +# run_tests $(find ./document-hierarchy/ -type f -name '*.sh') +# (( error_count += $? )) +run_tests $(find ./misc/ -type f -name 'PATCH-settings.sh') (( error_count += $? )) +# run_tests $(find ./proxy/ -type f -name '*.sh') +# (( error_count += $? )) +# run_tests $(find ./sparql-protocol/ -type f -name '*.sh') +# (( error_count += $? )) end_time=$(date +%s) runtime=$((end_time-start_time)) diff --git a/http-tests/system/admin/POST-clear-401.sh b/http-tests/system/admin/POST-clear-401.sh new file mode 100755 index 000000000..ddf7ada3a --- /dev/null +++ b/http-tests/system/admin/POST-clear-401.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /clear without a certificate should return 401 +# Only owners have access to /clear via full-control authorization in admin.trig + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "uri=${END_USER_BASE_URL}ns#" \ + "${ADMIN_BASE_URL}clear" \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/system/admin/POST-clear-403.sh b/http-tests/system/admin/POST-clear-403.sh new file mode 100755 index 000000000..62bcd2dc7 --- /dev/null +++ b/http-tests/system/admin/POST-clear-403.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /clear with a writer (not owner) should return 403 +# /clear is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "uri=${END_USER_BASE_URL}ns#" \ + "${ADMIN_BASE_URL}clear" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/admin/POST-clear-readers-403.sh b/http-tests/system/admin/POST-clear-readers-403.sh new file mode 100755 index 000000000..ea10da0a2 --- /dev/null +++ b/http-tests/system/admin/POST-clear-readers-403.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /clear with a reader should return 403 +# /clear is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/readers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "uri=${END_USER_BASE_URL}ns#" \ + "${ADMIN_BASE_URL}clear" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/admin/POST-clear.sh b/http-tests/system/admin/POST-clear.sh new file mode 100755 index 000000000..4a09fdbb1 --- /dev/null +++ b/http-tests/system/admin/POST-clear.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /clear with owner should succeed +# Owner POSTs the end-user namespace ontology URI to clear it from memory (reload on next request) + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$OWNER_CERT_FILE":"$OWNER_CERT_PWD" \ + -H "Accept: text/turtle" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "uri=${END_USER_BASE_URL}ns#" \ + "${ADMIN_BASE_URL}clear" \ +| grep -qE "^($STATUS_OK|$STATUS_NO_CONTENT)$" diff --git a/http-tests/system/admin/POST-packages-install-401.sh b/http-tests/system/admin/POST-packages-install-401.sh new file mode 100755 index 000000000..8720c139f --- /dev/null +++ b/http-tests/system/admin/POST-packages-install-401.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /packages/install without a certificate should return 401 +# Only owners have access to /packages/install via full-control authorization in admin.trig + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "package-uri=https://packages.linkeddatahub.com/skos/#this" \ + "${ADMIN_BASE_URL}packages/install" \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/system/admin/POST-packages-install-403.sh b/http-tests/system/admin/POST-packages-install-403.sh new file mode 100755 index 000000000..f1a3eeee2 --- /dev/null +++ b/http-tests/system/admin/POST-packages-install-403.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /packages/install with a writer (not owner) should return 403 +# /packages/install is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "package-uri=https://packages.linkeddatahub.com/skos/#this" \ + "${ADMIN_BASE_URL}packages/install" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/admin/POST-packages-install-readers-403.sh b/http-tests/system/admin/POST-packages-install-readers-403.sh new file mode 100755 index 000000000..5d19c435c --- /dev/null +++ b/http-tests/system/admin/POST-packages-install-readers-403.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /packages/install with a reader should return 403 +# /packages/install is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/readers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "package-uri=https://packages.linkeddatahub.com/skos/#this" \ + "${ADMIN_BASE_URL}packages/install" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/admin/POST-packages-uninstall-401.sh b/http-tests/system/admin/POST-packages-uninstall-401.sh new file mode 100755 index 000000000..5d12c86b0 --- /dev/null +++ b/http-tests/system/admin/POST-packages-uninstall-401.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /packages/uninstall without a certificate should return 401 +# Only owners have access to /packages/uninstall via full-control authorization in admin.trig + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "package-uri=https://packages.linkeddatahub.com/skos/#this" \ + "${ADMIN_BASE_URL}packages/uninstall" \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/system/admin/POST-packages-uninstall-403.sh b/http-tests/system/admin/POST-packages-uninstall-403.sh new file mode 100755 index 000000000..bb4911877 --- /dev/null +++ b/http-tests/system/admin/POST-packages-uninstall-403.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /packages/uninstall with a writer (not owner) should return 403 +# /packages/uninstall is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "package-uri=https://packages.linkeddatahub.com/skos/#this" \ + "${ADMIN_BASE_URL}packages/uninstall" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/admin/POST-packages-uninstall-readers-403.sh b/http-tests/system/admin/POST-packages-uninstall-readers-403.sh new file mode 100755 index 000000000..a8a3933ee --- /dev/null +++ b/http-tests/system/admin/POST-packages-uninstall-readers-403.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /packages/uninstall with a reader should return 403 +# /packages/uninstall is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/readers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "package-uri=https://packages.linkeddatahub.com/skos/#this" \ + "${ADMIN_BASE_URL}packages/uninstall" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/admin/POST-transform-401.sh b/http-tests/system/admin/POST-transform-401.sh new file mode 100755 index 000000000..998591f94 --- /dev/null +++ b/http-tests/system/admin/POST-transform-401.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /transform without a certificate should return 401 +# Only owners have access to /transform via full-control authorization in admin.trig + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "rdf=" \ + "${ADMIN_BASE_URL}transform" \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/system/admin/POST-transform-403.sh b/http-tests/system/admin/POST-transform-403.sh new file mode 100755 index 000000000..da7015cff --- /dev/null +++ b/http-tests/system/admin/POST-transform-403.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /transform with a writer (not owner) should return 403 +# /transform is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "rdf=" \ + "${ADMIN_BASE_URL}transform" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/admin/POST-transform-readers-403.sh b/http-tests/system/admin/POST-transform-readers-403.sh new file mode 100755 index 000000000..0093116bc --- /dev/null +++ b/http-tests/system/admin/POST-transform-readers-403.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /transform with a reader should return 403 +# /transform is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/readers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "rdf=" \ + "${ADMIN_BASE_URL}transform" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/end-user/GET-access.sh b/http-tests/system/end-user/GET-access.sh new file mode 100755 index 000000000..5fdb2eb29 --- /dev/null +++ b/http-tests/system/end-user/GET-access.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# GET /access is publicly accessible (foaf:Agent has acl:Read via access authorization) + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -H "Accept: application/n-triples" \ + "${END_USER_BASE_URL}access" \ +| grep -q "$STATUS_OK" diff --git a/http-tests/system/end-user/GET-ns.sh b/http-tests/system/end-user/GET-ns.sh new file mode 100755 index 000000000..53be9df6c --- /dev/null +++ b/http-tests/system/end-user/GET-ns.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# GET /ns is publicly accessible (foaf:Agent has acl:Read via public-namespace authorization) + +curl -k -w "%{http_code}\n" -o /dev/null -s -G \ + -H "Accept: application/sparql-results+xml" \ + "${END_USER_BASE_URL}ns" \ + --data-urlencode "query=SELECT * { ?s ?p ?o } LIMIT 1" \ +| grep -q "$STATUS_OK" diff --git a/http-tests/system/end-user/GET-settings-401.sh b/http-tests/system/end-user/GET-settings-401.sh new file mode 100755 index 000000000..71032492f --- /dev/null +++ b/http-tests/system/end-user/GET-settings-401.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# GET /settings without a certificate should return 401 +# Only owners have access to /settings via full-control authorization + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -H "Accept: application/n-triples" \ + "${END_USER_BASE_URL}settings" \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/document-hierarchy/DELETE-no-parent-403.sh b/http-tests/system/end-user/GET-settings-403.sh similarity index 68% rename from http-tests/document-hierarchy/DELETE-no-parent-403.sh rename to http-tests/system/end-user/GET-settings-403.sh index cf10ffd5b..90ca5ce5c 100755 --- a/http-tests/document-hierarchy/DELETE-no-parent-403.sh +++ b/http-tests/system/end-user/GET-settings-403.sh @@ -7,7 +7,8 @@ purge_cache "$END_USER_VARNISH_SERVICE" purge_cache "$ADMIN_VARNISH_SERVICE" purge_cache "$FRONTEND_VARNISH_SERVICE" -# add agent to the writers +# GET /settings with a writer (not owner) should return 403 +# /settings is only in the full-control authorization which is restricted to owners add-agent-to-group.sh \ -f "$OWNER_CERT_FILE" \ @@ -15,11 +16,8 @@ add-agent-to-group.sh \ --agent "$AGENT_URI" \ "${ADMIN_BASE_URL}acl/groups/writers/" -# check that graph without parent is forbidden - -curl -k -w "%{http_code}\n" -o /dev/null -s -G \ +curl -k -w "%{http_code}\n" -o /dev/null -s \ -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ - -X DELETE \ -H "Accept: application/n-triples" \ - "${END_USER_BASE_URL}parent/non-existing/" \ -| grep -q "$STATUS_FORBIDDEN" \ No newline at end of file + "${END_USER_BASE_URL}settings" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/end-user/GET-settings-readers-403.sh b/http-tests/system/end-user/GET-settings-readers-403.sh new file mode 100755 index 000000000..0e59b7085 --- /dev/null +++ b/http-tests/system/end-user/GET-settings-readers-403.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# GET /settings with a reader should return 403 +# /settings is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/readers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -H "Accept: application/n-triples" \ + "${END_USER_BASE_URL}settings" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/end-user/GET-settings.sh b/http-tests/system/end-user/GET-settings.sh new file mode 100755 index 000000000..4030b3e26 --- /dev/null +++ b/http-tests/system/end-user/GET-settings.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# GET /settings with the owner should return 200 +# /settings is accessible to owners via full-control authorization + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$OWNER_CERT_FILE":"$OWNER_CERT_PWD" \ + -H "Accept: application/n-triples" \ + "${END_USER_BASE_URL}settings" \ +| grep -q "$STATUS_OK" diff --git a/http-tests/system/end-user/GET-sparql-401.sh b/http-tests/system/end-user/GET-sparql-401.sh new file mode 100755 index 000000000..c1d1d725d --- /dev/null +++ b/http-tests/system/end-user/GET-sparql-401.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# GET /sparql without a certificate should return 401 +# Unlike /ns, the sparql-endpoint authorization uses acl:AuthenticatedAgent (not foaf:Agent), +# so unauthenticated access is not allowed + +curl -k -w "%{http_code}\n" -o /dev/null -s -G \ + -H "Accept: application/sparql-results+xml" \ + "${END_USER_BASE_URL}sparql" \ + --data-urlencode "query=SELECT * { ?s ?p ?o } LIMIT 1" \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/system/end-user/GET-sparql.sh b/http-tests/system/end-user/GET-sparql.sh new file mode 100755 index 000000000..30d9a120f --- /dev/null +++ b/http-tests/system/end-user/GET-sparql.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# GET /sparql with a signed-up agent (no group) should return 200 +# The sparql-endpoint authorization grants acl:Read to acl:AuthenticatedAgent regardless of group membership + +curl -k -w "%{http_code}\n" -o /dev/null -s -G \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -H "Accept: application/sparql-results+xml" \ + "${END_USER_BASE_URL}sparql" \ + --data-urlencode "query=SELECT * { ?s ?p ?o } LIMIT 1" \ +| grep -q "$STATUS_OK" diff --git a/http-tests/system/end-user/PATCH-settings-401.sh b/http-tests/system/end-user/PATCH-settings-401.sh new file mode 100755 index 000000000..4843b682e --- /dev/null +++ b/http-tests/system/end-user/PATCH-settings-401.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# PATCH /settings without a certificate should return 401 +# Only owners have acl:Write access to /settings + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -X PATCH \ + -H "Content-Type: application/sparql-update" \ + -d "PREFIX dct: +DELETE { ?app dct:title ?title } +INSERT { ?app dct:title \"Unauthorized\" } +WHERE { ?app dct:title ?title }" \ + "${END_USER_BASE_URL}settings" \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/system/end-user/PATCH-settings-403.sh b/http-tests/system/end-user/PATCH-settings-403.sh new file mode 100755 index 000000000..4288b8781 --- /dev/null +++ b/http-tests/system/end-user/PATCH-settings-403.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# PATCH /settings with a writer (not owner) should return 403 +# /settings is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X PATCH \ + -H "Content-Type: application/sparql-update" \ + -d "PREFIX dct: +DELETE { ?app dct:title ?title } +INSERT { ?app dct:title \"Unauthorized\" } +WHERE { ?app dct:title ?title }" \ + "${END_USER_BASE_URL}settings" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/end-user/PATCH-settings-readers-403.sh b/http-tests/system/end-user/PATCH-settings-readers-403.sh new file mode 100755 index 000000000..cf49063d9 --- /dev/null +++ b/http-tests/system/end-user/PATCH-settings-readers-403.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# PATCH /settings with a reader should return 403 +# /settings is only in the full-control authorization which is restricted to owners + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/readers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X PATCH \ + -H "Content-Type: application/sparql-update" \ + -d "PREFIX dct: +DELETE { ?app dct:title ?title } +INSERT { ?app dct:title \"Unauthorized\" } +WHERE { ?app dct:title ?title }" \ + "${END_USER_BASE_URL}settings" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/end-user/POST-access-request.sh b/http-tests/system/end-user/POST-access-request.sh new file mode 100755 index 000000000..f9e483054 --- /dev/null +++ b/http-tests/system/end-user/POST-access-request.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /access/request with a signed-up agent (no group) should succeed +# foaf:Agent and acl:AuthenticatedAgent both have acl:Append access to /access/request + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -H "Accept: text/turtle" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "rdf=" \ + --data-urlencode "sb=request" \ + --data-urlencode "pu=http://www.w3.org/1999/02/22-rdf-syntax-ns#type" \ + --data-urlencode "ou=http://www.w3.org/ns/auth/acl#Authorization" \ + --data-urlencode "pu=http://www.w3.org/ns/auth/acl#accessToClass" \ + --data-urlencode "ou=https://www.w3.org/ns/ldt/document-hierarchy#Container" \ + --data-urlencode "ou=https://www.w3.org/ns/ldt/document-hierarchy#Item" \ + --data-urlencode "pu=http://www.w3.org/ns/auth/acl#mode" \ + --data-urlencode "ou=http://www.w3.org/ns/auth/acl#Read" \ + --data-urlencode "pu=http://www.w3.org/2000/01/rdf-schema#label" \ + --data-urlencode "ol=Access request" \ + --data-urlencode "pu=http://www.w3.org/ns/auth/acl#agent" \ + --data-urlencode "ou=${AGENT_URI}" \ + "${END_USER_BASE_URL}access/request" \ +| grep -q "$STATUS_OK" diff --git a/http-tests/system/end-user/POST-add-401.sh b/http-tests/system/end-user/POST-add-401.sh new file mode 100755 index 000000000..bb927b19a --- /dev/null +++ b/http-tests/system/end-user/POST-add-401.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /add without a certificate should return 401 +# Only owners and writers have acl:Append access to /add via write-append authorization + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "rdf=" \ + --data-urlencode "sb=clone" \ + --data-urlencode "pu=http://purl.org/dc/terms/source" \ + --data-urlencode "ou=https://orcid.org/0000-0003-1750-9906" \ + --data-urlencode "pu=http://www.w3.org/ns/sparql-service-description#name" \ + --data-urlencode "ou=${END_USER_BASE_URL}" \ + "${END_USER_BASE_URL}add" \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/system/end-user/POST-add-403.sh b/http-tests/system/end-user/POST-add-403.sh new file mode 100755 index 000000000..68d43ea7e --- /dev/null +++ b/http-tests/system/end-user/POST-add-403.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /add with a signed-up agent not in any group should return 403 +# The write-append authorization grants acl:Append to owners and writers groups only + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "rdf=" \ + --data-urlencode "sb=clone" \ + --data-urlencode "pu=http://purl.org/dc/terms/source" \ + --data-urlencode "ou=https://orcid.org/0000-0003-1750-9906" \ + --data-urlencode "pu=http://www.w3.org/ns/sparql-service-description#name" \ + --data-urlencode "ou=${END_USER_BASE_URL}" \ + "${END_USER_BASE_URL}add" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/end-user/POST-add-readers-403.sh b/http-tests/system/end-user/POST-add-readers-403.sh new file mode 100755 index 000000000..d512b3ee4 --- /dev/null +++ b/http-tests/system/end-user/POST-add-readers-403.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /add with a reader should return 403 +# The write-append authorization grants acl:Append to owners and writers groups only; +# readers only have acl:Read on dh:Item/Container and /sparql, not on /add + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/readers/" + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "rdf=" \ + --data-urlencode "sb=clone" \ + --data-urlencode "pu=http://purl.org/dc/terms/source" \ + --data-urlencode "ou=https://orcid.org/0000-0003-1750-9906" \ + --data-urlencode "pu=http://www.w3.org/ns/sparql-service-description#name" \ + --data-urlencode "ou=${END_USER_BASE_URL}" \ + "${END_USER_BASE_URL}add" \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/end-user/POST-add.sh b/http-tests/system/end-user/POST-add.sh new file mode 100755 index 000000000..40f715c76 --- /dev/null +++ b/http-tests/system/end-user/POST-add.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# add agent to the writers group + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +# create container to hold the cloned data + +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') + +container=$(create-container.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Test container" \ + --slug "$slug" \ + --parent "$END_USER_BASE_URL") + +# POST /add with a writer should succeed +# Clone data from a remote RDF source into the container + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "rdf=" \ + --data-urlencode "sb=clone" \ + --data-urlencode "pu=http://purl.org/dc/terms/source" \ + --data-urlencode "ou=https://orcid.org/0000-0003-1750-9906" \ + --data-urlencode "pu=http://www.w3.org/ns/sparql-service-description#name" \ + --data-urlencode "ou=${container}" \ + "${END_USER_BASE_URL}add" \ +| grep -q "$STATUS_NO_CONTENT" diff --git a/http-tests/system/end-user/POST-generate-401.sh b/http-tests/system/end-user/POST-generate-401.sh new file mode 100755 index 000000000..a99df0375 --- /dev/null +++ b/http-tests/system/end-user/POST-generate-401.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /generate without a certificate should return 401 +# Only owners and writers have acl:Append access to /generate via write-append authorization + +( +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -X POST \ + -H "Content-Type: text/turtle" \ + --data-binary @- \ + "${END_USER_BASE_URL}generate" < . +[] sioc:has_parent <${END_USER_BASE_URL}> . +EOF +) \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/system/end-user/POST-generate-403.sh b/http-tests/system/end-user/POST-generate-403.sh new file mode 100755 index 000000000..70838dafb --- /dev/null +++ b/http-tests/system/end-user/POST-generate-403.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /generate with a signed-up agent not in any group should return 403 +# The write-append authorization grants acl:Append to owners and writers groups only + +( +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: text/turtle" \ + --data-binary @- \ + "${END_USER_BASE_URL}generate" < . +[] sioc:has_parent <${END_USER_BASE_URL}> . +EOF +) \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/end-user/POST-generate-readers-403.sh b/http-tests/system/end-user/POST-generate-readers-403.sh new file mode 100755 index 000000000..751aeb866 --- /dev/null +++ b/http-tests/system/end-user/POST-generate-readers-403.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /generate with a reader should return 403 +# The write-append authorization grants acl:Append to owners and writers groups only; +# readers only have acl:Read on dh:Item/Container and /sparql, not on /generate + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/readers/" + +( +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: text/turtle" \ + --data-binary @- \ + "${END_USER_BASE_URL}generate" < . +[] sioc:has_parent <${END_USER_BASE_URL}> . +EOF +) \ +| grep -q "$STATUS_FORBIDDEN" diff --git a/http-tests/system/end-user/POST-generate.sh b/http-tests/system/end-user/POST-generate.sh new file mode 100755 index 000000000..715b671db --- /dev/null +++ b/http-tests/system/end-user/POST-generate.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# add agent to the writers group + +add-agent-to-group.sh \ + -f "$OWNER_CERT_FILE" \ + -p "$OWNER_CERT_PWD" \ + --agent "$AGENT_URI" \ + "${ADMIN_BASE_URL}acl/groups/writers/" + +# create a parent container to generate into + +slug=$(uuidgen | tr '[:upper:]' '[:lower:]') + +parent=$(create-container.sh \ + -f "$AGENT_CERT_FILE" \ + -p "$AGENT_CERT_PWD" \ + -b "$END_USER_BASE_URL" \ + --title "Generate parent" \ + --slug "$slug" \ + --parent "$END_USER_BASE_URL") + +# POST /generate with a writer: generate a container for dh:Container class using ldh:SelectChildren query + +( +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: text/turtle" \ + --data-binary @- \ + "${END_USER_BASE_URL}generate" < . +@prefix void: . +@prefix spin: . +@prefix dh: . +@prefix ldh: . + +[] sioc:has_parent <${parent}> ; + void:class dh:Container ; + spin:query ldh:SelectChildren . +EOF +) \ +| grep -q "$STATUS_OK" diff --git a/http-tests/system/end-user/POST-ns.sh b/http-tests/system/end-user/POST-ns.sh new file mode 100755 index 000000000..5ee9b1659 --- /dev/null +++ b/http-tests/system/end-user/POST-ns.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /ns (SPARQL query via POST form) is publicly accessible (foaf:Agent has acl:Append via public-namespace authorization) + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -H "Accept: application/sparql-results+xml" \ + --data-urlencode "query=SELECT * { ?s ?p ?o } LIMIT 1" \ + "${END_USER_BASE_URL}ns" \ +| grep -q "$STATUS_OK" diff --git a/http-tests/system/end-user/POST-sparql-401.sh b/http-tests/system/end-user/POST-sparql-401.sh new file mode 100755 index 000000000..95c0b4139 --- /dev/null +++ b/http-tests/system/end-user/POST-sparql-401.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /sparql without a certificate should return 401 +# The sparql-endpoint authorization grants acl:Append only to acl:AuthenticatedAgent, not foaf:Agent + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -H "Accept: application/sparql-results+xml" \ + --data-urlencode "query=SELECT * { ?s ?p ?o } LIMIT 1" \ + "${END_USER_BASE_URL}sparql" \ +| grep -q "$STATUS_UNAUTHORIZED" diff --git a/http-tests/system/end-user/POST-sparql.sh b/http-tests/system/end-user/POST-sparql.sh new file mode 100755 index 000000000..9d50411ba --- /dev/null +++ b/http-tests/system/end-user/POST-sparql.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -euo pipefail + +initialize_dataset "$END_USER_BASE_URL" "$TMP_END_USER_DATASET" "$END_USER_ENDPOINT_URL" +initialize_dataset "$ADMIN_BASE_URL" "$TMP_ADMIN_DATASET" "$ADMIN_ENDPOINT_URL" +purge_cache "$END_USER_VARNISH_SERVICE" +purge_cache "$ADMIN_VARNISH_SERVICE" +purge_cache "$FRONTEND_VARNISH_SERVICE" + +# POST /sparql (query via POST form) with a signed-up agent (no group) should return 200 +# The sparql-endpoint authorization grants acl:Append to acl:AuthenticatedAgent regardless of group membership + +curl -k -w "%{http_code}\n" -o /dev/null -s \ + -E "$AGENT_CERT_FILE":"$AGENT_CERT_PWD" \ + -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -H "Accept: application/sparql-results+xml" \ + --data-urlencode "query=SELECT * { ?s ?p ?o } LIMIT 1" \ + "${END_USER_BASE_URL}sparql" \ +| grep -q "$STATUS_OK" diff --git a/platform/context.xsl b/platform/context.xsl index 1c4b4bd78..529c95d5d 100644 --- a/platform/context.xsl +++ b/platform/context.xsl @@ -52,6 +52,9 @@ xmlns:orcid="&orcid;" + + + @@ -168,6 +171,15 @@ xmlns:orcid="&orcid;" + + + + + + + + + diff --git a/platform/datasets/admin.trig b/platform/datasets/admin.trig index 4756fa90b..9f3448235 100644 --- a/platform/datasets/admin.trig +++ b/platform/datasets/admin.trig @@ -1,15 +1,9 @@ @prefix def: . @prefix ldh: . -@prefix ac: . @prefix rdf: . -@prefix xsd: . -@prefix dh: . -@prefix sd: . -@prefix sp: . @prefix sioc: . @prefix foaf: . @prefix dct: . -@prefix spin: . <> { @@ -24,335 +18,21 @@ } -# ENDPOINTS - - -{ - - a foaf:Document ; - dct:title "SPARQL endpoint" . - -} - - -{ - - a foaf:Document ; - dct:title "Namespace endpoint" . - -} - - -{ - - a foaf:Document ; - dct:title "Add data endpoint" . - -} - - -{ - - a foaf:Document ; - dct:title "Generate data endpoint" . - -} - -# CONTAINERS - - -{ - - a dh:Container ; - dct:title "Queries" ; - dct:description "SPARQL queries" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Queries" ; - spin:query . - - a sp:Select ; - dct:title "Select query resources" ; - sp:text """PREFIX sp: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a sp:Select } - UNION - { ?s a sp:Construct } - UNION - { ?s a sp:Describe } - UNION - { ?s a sp:Ask } - } - }""" . - -} - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Select instances" ; - foaf:primaryTopic . - - a sp:Select ; - dct:title "Select instances" ; - dct:description "Selects instances of type from the default graph" ; - sp:text """SELECT DISTINCT ?s -WHERE - { ?s a $type ; - ?p ?o - }""" . - -} - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Select instances in graphs" ; - foaf:primaryTopic . - - a sp:Select ; - dct:title "Select instances in graphs" ; - dct:description "Selects instances of type from named graphs" ; - sp:text """SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a $type ; - ?p ?o - } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Files" ; - dct:description "Uploaded files" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Files" ; - spin:query . - - a sp:Select ; - dct:title "Select file resources" ; - sp:text """PREFIX nfo: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a nfo:FileDataObject } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Imports" ; - dct:description "Data imports" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Imports" ; - spin:query . - - a sp:Select ; - dct:title "Select import resources" ; - sp:text """PREFIX ldh: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a ldh:CSVImport } - UNION - { ?s a ldh:RDFImport } - } - }""" . - -} - - -{ - - a dh:Item ; - dct:title "Geo" ; - dct:description "Geolocated resources" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Geo resources" ; - spin:query ; - ac:mode ac:MapMode . - - a sp:Select ; - dct:title "Select geo resources" ; - sp:text """PREFIX geo: -PREFIX dct: - -SELECT DISTINCT ?resource -WHERE -{ GRAPH ?graph - { ?resource geo:lat ?lat ; - geo:long ?long - OPTIONAL - { ?resource a ?type } - OPTIONAL - { ?resource dct:title ?title } - } -} -ORDER BY ?title""" . - -} - - -{ - - a dh:Item ; - dct:title "Latest" ; - dct:description "Latest resources" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Latest resources" ; - spin:query . - - a sp:Select ; - dct:title "Select latest" ; - sp:text """PREFIX dct: - -SELECT DISTINCT ?dated -WHERE -{ GRAPH ?graph - { ?dated dct:created ?created } -} -ORDER BY DESC(?created)""" . - -} - - -{ - - a dh:Container ; - dct:title "Charts" ; - dct:description "Saved charts" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Charts" ; - spin:query . - - a sp:Select ; - dct:title "Select chart resources" ; - sp:text """PREFIX ldh: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a ldh:GraphChart } - UNION - { ?s a ldh:ResultSetChart } - } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Apps" ; - dct:description "Linked Data applications" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Applications" ; - spin:query . - - a sp:Select ; - dct:title "Select application resources" ; - sp:text """PREFIX lapp: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a lapp:Application } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Services" ; - dct:description "SPARQL services" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Services" ; - spin:query . - - a sp:Select ; - dct:title "Select service resources" ; - sp:text """PREFIX sd: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a sd:Service } - }""" . - -} - ### ADMIN-SPECIFIC @prefix lacl: . @prefix adm: . +@prefix dh: . @prefix rdfs: . @prefix owl: . @prefix acl: . @prefix cert: . -@prefix spin: . +@prefix sp: . { - a adm:SignUp ; + a foaf:Document ; dct:title "Sign up" ; rdf:_1 . @@ -570,44 +250,6 @@ WHERE } -# access endpoint - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Access description access" ; - foaf:primaryTopic . - - a acl:Authorization ; - rdfs:label "Access description access" ; - rdfs:comment "Allows non-authenticated access" ; - acl:accessToClass ldh:Access ; - acl:mode acl:Read ; - acl:agentClass foaf:Agent, acl:AuthenticatedAgent . - -} - -# access request endpoint - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Access request access" ; - foaf:primaryTopic . - - a acl:Authorization ; - rdfs:label "Access request access" ; - rdfs:comment "Allows non-authenticated access" ; - acl:accessToClass ldh:AccessRequest ; - acl:mode acl:Append ; - acl:agentClass foaf:Agent, acl:AuthenticatedAgent . - -} - # sign up @@ -621,8 +263,7 @@ WHERE a acl:Authorization ; rdfs:label "Signup access" ; rdfs:comment "Required to enable public signup" ; - acl:accessTo ; # TO-DO: only allow access by the secretary agent? - acl:accessToClass adm:SignUp ; + acl:accessTo , ; # TO-DO: only allow access by the secretary agent? acl:mode acl:Read, acl:Append ; acl:agentClass foaf:Agent . @@ -641,7 +282,7 @@ WHERE a acl:Authorization ; rdfs:label "OAuth2 login access" ; rdfs:comment "Required to enable public OAuth2 login" ; - acl:accessToClass ldh:OAuthLogin ; + acl:accessToClass , ; acl:mode acl:Read ; acl:agentClass foaf:Agent . @@ -660,7 +301,7 @@ WHERE a acl:Authorization ; rdfs:label "OAuth2 authorization" ; rdfs:comment "Required to enable public OAuth2 login" ; - acl:accessToClass ldh:OAuthAuthorize ; + acl:accessTo , ; acl:mode acl:Read ; acl:agentClass foaf:Agent . diff --git a/platform/datasets/end-user.trig b/platform/datasets/end-user.trig index 2608b6a39..351174081 100644 --- a/platform/datasets/end-user.trig +++ b/platform/datasets/end-user.trig @@ -1,15 +1,9 @@ @prefix def: . @prefix ldh: . -@prefix ac: . @prefix rdf: . -@prefix xsd: . -@prefix dh: . -@prefix sd: . -@prefix sp: . @prefix sioc: . @prefix foaf: . @prefix dct: . -@prefix spin: . <> { @@ -24,378 +18,10 @@ } -# ENDPOINTS - - -{ - - a foaf:Document ; - dct:title "SPARQL endpoint" . - -} - - -{ - - a foaf:Document ; - dct:title "Namespace endpoint" . - -} - - -{ - - a foaf:Document ; - dct:title "Add data endpoint" . - -} - - -{ - - a foaf:Document ; - dct:title "Generate data endpoint" . - -} - -# CONTAINERS - - -{ - - a dh:Container ; - dct:title "Queries" ; - dct:description "SPARQL queries" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Queries" ; - spin:query . - - a sp:Select ; - dct:title "Select query resources" ; - sp:text """PREFIX sp: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a sp:Select } - UNION - { ?s a sp:Construct } - UNION - { ?s a sp:Describe } - UNION - { ?s a sp:Ask } - } - }""" . - -} - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Select instances" ; - foaf:primaryTopic . - - a sp:Select ; - dct:title "Select instances" ; - dct:description "Selects instances of type from the default graph" ; - sp:text """SELECT DISTINCT ?s -WHERE - { ?s a $type ; - ?p ?o - }""" . - -} - - -{ - - a dh:Item ; - sioc:has_container ; - dct:title "Select instances in graphs" ; - foaf:primaryTopic . - - a sp:Select ; - dct:title "Select instances in graphs" ; - dct:description "Selects instances of type from named graphs" ; - sp:text """SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a $type ; - ?p ?o - } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Files" ; - dct:description "Uploaded files" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Files" ; - spin:query . - - a sp:Select ; - dct:title "Select file resources" ; - sp:text """PREFIX nfo: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a nfo:FileDataObject } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Imports" ; - dct:description "Data imports" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Imports" ; - spin:query . - - a sp:Select ; - dct:title "Select import resources" ; - sp:text """PREFIX ldh: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a ldh:CSVImport } - UNION - { ?s a ldh:RDFImport } - } - }""" . - -} - - -{ - - a dh:Item ; - dct:title "Geo" ; - dct:description "Geolocated resources" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Geo resources" ; - spin:query ; - ac:mode ac:MapMode . - - a sp:Select ; - dct:title "Select geo resources" ; - sp:text """PREFIX geo: -PREFIX dct: - -SELECT DISTINCT ?resource -WHERE -{ GRAPH ?graph - { ?resource geo:lat ?lat ; - geo:long ?long - OPTIONAL - { ?resource a ?type } - OPTIONAL - { ?resource dct:title ?title } - } -} -ORDER BY ?title""" . - -} - - -{ - - a dh:Item ; - dct:title "Latest" ; - dct:description "Latest resources" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Latest resources" ; - spin:query . - - a sp:Select ; - dct:title "Select latest" ; - sp:text """PREFIX dct: - -SELECT DISTINCT ?dated -WHERE -{ GRAPH ?graph - { ?dated dct:created ?created } -} -ORDER BY DESC(?created)""" . - -} - - -{ - - a dh:Container ; - dct:title "Charts" ; - dct:description "Saved charts" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Charts" ; - spin:query . - - a sp:Select ; - dct:title "Select chart resources" ; - sp:text """PREFIX ldh: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { { ?s a ldh:GraphChart } - UNION - { ?s a ldh:ResultSetChart } - } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Apps" ; - dct:description "Linked Data applications" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Applications" ; - spin:query . - - a sp:Select ; - dct:title "Select application resources" ; - sp:text """PREFIX lapp: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a lapp:Application } - }""" . - -} - - -{ - - a dh:Container ; - dct:title "Services" ; - dct:description "SPARQL services" ; - rdf:_1 . - - a ldh:Object ; - rdf:value . - - a ldh:View ; - dct:title "Services" ; - spin:query . - - a sp:Select ; - dct:title "Select service resources" ; - sp:text """PREFIX sd: - -SELECT DISTINCT ?s -WHERE - { GRAPH ?g - { ?s a sd:Service } - }""" . - -} - ### END-USER-SPECIFIC - -{ - - a ldh:Access ; - dct:title "Access endpoint" . - -} - - -{ - - a ldh:AccessRequest ; - dct:title "Access request endpoint" . - -} - - -{ - - a ldh:OAuthLogin ; - dct:title "OAuth 2.0 login" . - -} - - -{ - - a ldh:OAuthAuthorize ; - dct:title "Google OAuth2.0 authorization" . - -} - - -{ - - a ldh:OAuthLogin ; - dct:title "ORCID OAuth2.0 login" . - -} - - -{ - - a ldh:OAuthAuthorize ; - dct:title "ORCID OAuth2.0 authorization" . - -} - - -{ - - a foaf:Document ; - dct:title "Settings endpoint" . - -} +@prefix dh: . +@prefix sd: . { diff --git a/platform/entrypoint.sh b/platform/entrypoint.sh index 1fbe571cf..1554580a1 100755 --- a/platform/entrypoint.sh +++ b/platform/entrypoint.sh @@ -186,6 +186,11 @@ if [ -z "$CONTEXT_DATASET_URL" ]; then exit 1 fi +if [ -z "$SERVICES_DATASET_URL" ]; then + echo '$SERVICES_DATASET_URL not set' + exit 1 +fi + if [ -z "$END_USER_DATASET_URL" ]; then echo '$END_USER_DATASET_URL not set' exit 1 @@ -239,9 +244,26 @@ BASE_URI=$(echo "$BASE_URI" | tr '[:upper:]' '[:lower:]') # make sure it's lower ADMIN_BASE_URI=$(echo "$ADMIN_BASE_URI" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case ORIGIN=$(echo "$ORIGIN" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case +if [ "$PROTOCOL" = "https" ]; then + if [ "$HTTPS_PROXY_PORT" = 443 ]; then + export ADMIN_ORIGIN="${PROTOCOL}://admin.${HOST}" + else + export ADMIN_ORIGIN="${PROTOCOL}://admin.${HOST}:${HTTPS_PROXY_PORT}" + fi +else + if [ "$HTTP_PROXY_PORT" = 80 ]; then + export ADMIN_ORIGIN="${PROTOCOL}://admin.${HOST}" + else + export ADMIN_ORIGIN="${PROTOCOL}://admin.${HOST}:${HTTP_PROXY_PORT}" + fi +fi + +ADMIN_ORIGIN=$(echo "$ADMIN_ORIGIN" | tr '[:upper:]' '[:lower:]') # make sure it's lower-case + printf "\n### Base URI: %s\n" "$BASE_URI" printf "\n### Admin Base URI: %s\n" "$ADMIN_BASE_URI" printf "\n### Origin: %s\n" "$ORIGIN" +printf "\n### Admin Origin: %s\n" "$ADMIN_ORIGIN" # functions that wait for other services to start @@ -272,10 +294,23 @@ wait_for_url() local auth_pwd="$3" local counter="$4" local accept="$5" + local auth_token="$6" i=1 - # use HTTP Basic auth if username/password are provided - if [ -n "$auth_user" ] && [ -n "$auth_pwd" ]; then + if [ -n "$auth_token" ]; then + while [ "$i" -le "$counter" ] && ! curl -s -f -X OPTIONS "$url" -H "Authorization: Bearer $auth_token" -H "Accept: ${accept}" >/dev/null 2>&1 + do + sleep 1 ; + i=$(( i+1 )) + done + + if ! curl -s -f -X OPTIONS "$url" -H "Authorization: Bearer $auth_token" -H "Accept: ${accept}" >/dev/null 2>&1 ; then + printf "\n### URL %s not responding after %s retries, exiting...\n" "$url" "$counter" + exit 1 + else + printf "\n### URL %s responded\n" "$url" + fi + elif [ -n "$auth_user" ] && [ -n "$auth_pwd" ]; then while [ "$i" -le "$counter" ] && ! curl -s -f -X OPTIONS "$url" --user "$auth_user":"$auth_pwd" -H "Accept: ${accept}" >/dev/null 2>&1 do sleep 1 ; @@ -313,9 +348,16 @@ append_quads() local auth_pwd="$3" local filename="$4" local content_type="$5" + local auth_token="$6" - # use HTTP Basic auth if username/password are provided - if [ -n "$auth_user" ] && [ -n "$auth_pwd" ]; then + if [ -n "$auth_token" ]; then + curl \ + -f \ + "$quad_store_url" \ + -H "Authorization: Bearer $auth_token" \ + -H "Content-Type: ${content_type}" \ + --data-binary @"$filename" + elif [ -n "$auth_user" ] && [ -n "$auth_pwd" ]; then curl \ -f \ --basic \ @@ -332,6 +374,81 @@ append_quads() fi } +gsp_append_quads() +{ + local graph_store_url="$1" + local auth_user="$2" + local auth_pwd="$3" + local filename="$4" + # $5 is content_type (ignored; hardcoded to application/n-triples internally) + local auth_token="$6" + + # Create temporary SPARQL query to extract distinct graph URIs + local query_file + query_file=$(mktemp) + cat > "$query_file" << 'EOF' +SELECT DISTINCT ?g WHERE { GRAPH ?g { ?s ?p ?o }} +EOF + + # Execute SPARQL query to get graph URIs safely + local graph_uris + graph_uris=$(sparql --data="$filename" --query="$query_file" --results=CSV | tail -n +2 | cut -d, -f1) + + # Clean up query file + rm -f "$query_file" + + # Iterate through each graph URI + while IFS= read -r graph_uri; do + if [ -n "$graph_uri" ]; then + # Remove any trailing newlines/whitespace + graph_uri=$(echo "$graph_uri" | tr -d '\n\r') + # Create temporary file for this graph's content + local temp_file + temp_file=$(mktemp) + + # Create SPARQL query to extract triples for specific graph + local extract_query + extract_query=$(mktemp) + cat > "$extract_query" << EOF +CONSTRUCT { ?s ?p ?o } WHERE { GRAPH <$graph_uri> { ?s ?p ?o } } +EOF + + # Extract triples for this specific graph as N-Triples + sparql --data="$filename" --query="$extract_query" --results=NT > "$temp_file" + + # Send the graph's triples to the graph store using standard GSP + if [ -n "$auth_token" ]; then + curl \ + -f \ + --url-query "graph=$graph_uri" \ + "$graph_store_url" \ + -H "Authorization: Bearer $auth_token" \ + -H "Content-Type: application/n-triples" \ + --data-binary @"$temp_file" + elif [ -n "$auth_user" ] && [ -n "$auth_pwd" ]; then + curl \ + -f \ + --basic \ + --user "$auth_user":"$auth_pwd" \ + --url-query "graph=$graph_uri" \ + "$graph_store_url" \ + -H "Content-Type: application/n-triples" \ + --data-binary @"$temp_file" + else + curl \ + -f \ + --url-query "graph=$graph_uri" \ + "$graph_store_url" \ + -H "Content-Type: application/n-triples" \ + --data-binary @"$temp_file" + fi + + # Clean up temporary files + rm -f "$temp_file" "$extract_query" + fi + done <<< "$graph_uris" +} + generate_cert() { local alias="$1" @@ -519,7 +636,7 @@ case "$CONTEXT_DATASET_URL" in CONTEXT_DATASET=$(echo "$CONTEXT_DATASET_URL" | cut -c 8-) # strip leading file:// printf "\n### Reading context dataset from a local file: %s\n" "$CONTEXT_DATASET" ;; - *) + *) CONTEXT_DATASET=$(mktemp) printf "\n### Downloading context dataset from a URL: %s\n" "$CONTEXT_DATASET_URL" @@ -527,7 +644,27 @@ case "$CONTEXT_DATASET_URL" in curl "$CONTEXT_DATASET_URL" > "$CONTEXT_DATASET" ;; esac -trig --base="$BASE_URI" "$CONTEXT_DATASET" > "$based_context_dataset" +case "$SERVICES_DATASET_URL" in + "file://"*) + SERVICES_DATASET=$(echo "$SERVICES_DATASET_URL" | cut -c 8-) # strip leading file:// + + printf "\n### Reading services dataset from a local file: %s\n" "$SERVICES_DATASET" ;; + *) + SERVICES_DATASET=$(mktemp) + + printf "\n### Downloading services dataset from a URL: %s\n" "$SERVICES_DATASET_URL" + + curl "$SERVICES_DATASET_URL" > "$SERVICES_DATASET" ;; +esac + +CREDENTIALS_DATASET=/run/secrets/credentials + +if [ -f "$CREDENTIALS_DATASET" ]; then + printf "\n### Loading credentials dataset from: %s\n" "$CREDENTIALS_DATASET" + trig --base="$BASE_URI" "$CONTEXT_DATASET" "$SERVICES_DATASET" "$CREDENTIALS_DATASET" > "$based_context_dataset" +else + trig --base="$BASE_URI" "$CONTEXT_DATASET" "$SERVICES_DATASET" > "$based_context_dataset" +fi sparql --data="$based_context_dataset" --query="select-root-services.rq" --results=XML > root_service_metadata.xml @@ -537,105 +674,100 @@ readarray apps < <(xmlstarlet sel -B \ -N srx="http://www.w3.org/2005/sparql-results#" \ -T -t -m "/srx:sparql/srx:results/srx:result" \ -o "\"" \ - -v "srx:binding[@name = 'endUserApp']" \ - -o "\" \"" \ - -v "srx:binding[@name = 'endUserOrigin']" \ - -o "\" \"" \ - -v "srx:binding[@name = 'endUserQuadStore']" \ + -v "srx:binding[@name = 'app']" \ -o "\" \"" \ - -v "srx:binding[@name = 'endUserEndpoint']" \ + -v "srx:binding[@name = 'type']" \ -o "\" \"" \ - -v "srx:binding[@name = 'endUserAuthUser']" \ + -v "srx:binding[@name = 'origin']" \ -o "\" \"" \ - -v "srx:binding[@name = 'endUserAuthPwd']" \ + -v "srx:binding[@name = 'quadStore']" \ -o "\" \"" \ - -v "srx:binding[@name = 'endUserMaker']" \ + -v "srx:binding[@name = 'graphStore']" \ -o "\" \"" \ - -v "srx:binding[@name = 'adminApp']" \ + -v "srx:binding[@name = 'endpoint']" \ -o "\" \"" \ - -v "srx:binding[@name = 'adminOrigin']" \ + -v "srx:binding[@name = 'authUser']" \ -o "\" \"" \ - -v "srx:binding[@name = 'adminQuadStore']" \ + -v "srx:binding[@name = 'authPwd']" \ -o "\" \"" \ - -v "srx:binding[@name = 'adminEndpoint']" \ + -v "srx:binding[@name = 'authToken']" \ -o "\" \"" \ - -v "srx:binding[@name = 'adminAuthUser']" \ - -o "\" \"" \ - -v "srx:binding[@name = 'adminAuthPwd']" \ - -o "\" \"" \ - -v "srx:binding[@name = 'adminMaker']" \ + -v "srx:binding[@name = 'maker']" \ -o "\"" \ -n \ root_service_metadata.xml) for app in "${apps[@]}"; do app_array=(${app}) - end_user_app="${app_array[0]//\"/}" - end_user_origin="${app_array[1]//\"/}" - end_user_quad_store_url="${app_array[2]//\"/}" - end_user_endpoint_url="${app_array[3]//\"/}" - end_user_service_auth_user="${app_array[4]//\"/}" - end_user_service_auth_pwd="${app_array[5]//\"/}" - end_user_owner="${app_array[6]//\"/}" - admin_app="${app_array[7]//\"/}" - admin_origin="${app_array[8]//\"/}" - admin_quad_store_url="${app_array[9]//\"/}" - admin_endpoint_url="${app_array[10]//\"/}" - admin_service_auth_user="${app_array[11]//\"/}" - admin_service_auth_pwd="${app_array[12]//\"/}" - admin_owner="${app_array[13]//\"/}" - - printf "\n### Processing dataspace. End-user app: %s (origin: %s) Admin app: %s (origin: %s)\n" "$end_user_app" "$end_user_origin" "$admin_app" "$admin_origin" - - if [ -z "$end_user_app" ]; then - printf "\nEnd-user app URI could not be extracted from %s. Exiting...\n" "$CONTEXT_DATASET" - exit 1 - fi - if [ -z "$end_user_quad_store_url" ]; then - printf "\nEnd-user quad store URL could not be extracted for the <%s> app. Exiting...\n" "$end_user_app" + app_uri="${app_array[0]//\"/}" + app_type="${app_array[1]//\"/}" + app_origin="${app_array[2]//\"/}" + app_quad_store_url="${app_array[3]//\"/}" + app_graph_store_url="${app_array[4]//\"/}" + app_endpoint_url="${app_array[5]//\"/}" + app_service_auth_user="${app_array[6]//\"/}" + app_service_auth_pwd="${app_array[7]//\"/}" + app_service_auth_token="${app_array[8]//\"/}" + app_owner="${app_array[9]//\"/}" + + printf "\n### Processing app: %s (type: %s, origin: %s)\n" "$app_uri" "$app_type" "$app_origin" + + if [ -z "$app_uri" ]; then + printf "\nApp URI could not be extracted from %s. Exiting...\n" "$CONTEXT_DATASET" exit 1 fi - if [ -z "$admin_app" ]; then - printf "\nAdmin app URI could not be extracted for the <%s> app. Exiting...\n" "$end_user_app" + if [ -z "$app_quad_store_url" ] && [ -z "$app_graph_store_url" ]; then + printf "\nNeither quad store nor graph store URL could be extracted for the <%s> app. Exiting...\n" "$app_uri" exit 1 fi - if [ -z "$admin_origin" ]; then - printf "\nAdmin origin could not be extracted for the <%s> app. Exiting...\n" "$end_user_app" - exit 1 - fi - if [ -z "$admin_quad_store_url" ]; then - printf "\nAdmin quad store URL could not be extracted for the <%s> app. Exiting...\n" "$end_user_app" + if [ -z "$app_origin" ]; then + printf "\nOrigin could not be extracted for the <%s> app. Exiting...\n" "$app_uri" exit 1 fi - # check if this app is the root app by comparing origins - if [ "$end_user_origin" = "$ORIGIN" ]; then - root_end_user_app="$end_user_app" - #root_end_user_origin="$end_user_origin" - root_end_user_quad_store_url="$end_user_quad_store_url" - root_end_user_service_auth_user="$end_user_service_auth_user" - root_end_user_service_auth_pwd="$end_user_service_auth_pwd" - root_admin_app="$admin_app" - #root_admin_origin="$admin_origin" - root_admin_quad_store_url="$admin_quad_store_url" - root_admin_service_auth_user="$admin_service_auth_user" - root_admin_service_auth_pwd="$admin_service_auth_pwd" + # resolve the effective store URL and upload function for this app + if [ -n "$app_quad_store_url" ]; then + app_store_url="$app_quad_store_url" + app_store_content_type="application/n-quads" + app_store_fn="append_quads" + printf "\n### Quad store URL: %s\n" "$app_quad_store_url" + else + app_store_url="$app_graph_store_url" + app_store_content_type="application/n-triples" + app_store_fn="gsp_append_quads" + printf "\n### Graph store URL (GSP fallback): %s\n" "$app_graph_store_url" fi - # append ownership metadata to apps if it's not present (apps have to be URI resources!) - - if [ -z "$end_user_owner" ]; then - echo "<${end_user_app}> <${OWNER_URI}> <${end_user_app}> ." >> "$based_context_dataset" + # check if this is the root end-user or root admin app by comparing origins + if [ "$app_type" = "https://w3id.org/atomgraph/linkeddatahub/apps#EndUserApplication" ] && [ "$app_origin" = "$ORIGIN" ]; then + root_end_user_app="$app_uri" + root_end_user_quad_store_url="$app_quad_store_url" + root_end_user_store_url="$app_store_url" + root_end_user_store_content_type="$app_store_content_type" + root_end_user_endpoint_url="$app_endpoint_url" + root_end_user_service_auth_user="$app_service_auth_user" + root_end_user_service_auth_pwd="$app_service_auth_pwd" + root_end_user_service_auth_token="$app_service_auth_token" fi - if [ -z "$admin_owner" ]; then - echo "<${admin_app}> <${OWNER_URI}> <${admin_app}> ." >> "$based_context_dataset" + if [ "$app_type" = "https://w3id.org/atomgraph/linkeddatahub/apps#AdminApplication" ] && [ "$app_origin" = "$ADMIN_ORIGIN" ]; then + root_admin_app="$app_uri" + root_admin_quad_store_url="$app_quad_store_url" + root_admin_store_url="$app_store_url" + root_admin_store_content_type="$app_store_content_type" + root_admin_endpoint_url="$app_endpoint_url" + root_admin_service_auth_user="$app_service_auth_user" + root_admin_service_auth_pwd="$app_service_auth_pwd" + root_admin_service_auth_token="$app_service_auth_token" fi - printf "\n### Quad store URL of the root end-user service: %s\n" "$end_user_quad_store_url" - printf "\n### Quad store URL of the root admin service: %s\n" "$admin_quad_store_url" + # append ownership metadata to app if it's not present (apps have to be URI resources!) - # Create app-specific subfolder based on end-user origin - app_folder=$(echo "$end_user_origin" | sed 's|https://||' | sed 's|http://||' | sed 's|[:/]|-|g') + if [ -z "$app_owner" ]; then + echo "<${app_uri}> <${OWNER_URI}> <${app_uri}> ." >> "$based_context_dataset" + fi + + # Create app-specific subfolder based on origin + app_folder=$(echo "$app_origin" | sed 's|https://||' | sed 's|http://||' | sed 's|[:/]|-|g') # Determine whether to load datasets for this app load_datasets_for_app="$LOAD_DATASETS" @@ -649,103 +781,110 @@ for app in "${apps[@]}"; do # Check if this specific app's datasets should be loaded if [ "$load_datasets_for_app" = true ]; then - printf "\n### Loading datasets for app: %s\n" "$app_folder" + printf "\n### Loading datasets for app: %s\n" "$app_uri" mkdir -p "/var/linkeddatahub/based-datasets/${app_folder}" - # create query file by injecting environmental variables into the template + if [ "$app_type" = "https://w3id.org/atomgraph/linkeddatahub/apps#EndUserApplication" ]; then - case "$END_USER_DATASET_URL" in - "file://"*) - END_USER_DATASET=$(echo "$END_USER_DATASET_URL" | cut -c 8-) # strip leading file:// + case "$END_USER_DATASET_URL" in + "file://"*) + END_USER_DATASET=$(echo "$END_USER_DATASET_URL" | cut -c 8-) # strip leading file:// - printf "\n### Reading end-user dataset from a local file: %s\n" "$END_USER_DATASET" ;; - *) - END_USER_DATASET=$(mktemp) + printf "\n### Reading end-user dataset from a local file: %s\n" "$END_USER_DATASET" ;; + *) + END_USER_DATASET=$(mktemp) - printf "\n### Downloading end-user dataset from a URL: %s\n" "$END_USER_DATASET_URL" + printf "\n### Downloading end-user dataset from a URL: %s\n" "$END_USER_DATASET_URL" - curl "$END_USER_DATASET_URL" > "$END_USER_DATASET" ;; - esac + curl "$END_USER_DATASET_URL" > "$END_USER_DATASET" ;; + esac - case "$ADMIN_DATASET_URL" in - "file://"*) - ADMIN_DATASET=$(echo "$ADMIN_DATASET_URL" | cut -c 8-) # strip leading file:// + trig --base="${app_origin}/" "$END_USER_DATASET" > "/var/linkeddatahub/based-datasets/${app_folder}/end-user.nq" - printf "\n### Reading admin dataset from a local file: %s\n" "$ADMIN_DATASET" ;; - *) - ADMIN_DATASET=$(mktemp) + printf "\n### Waiting for %s...\n" "$app_store_url" + wait_for_url "$app_store_url" "$app_service_auth_user" "$app_service_auth_pwd" "$TIMEOUT" "$app_store_content_type" "$app_service_auth_token" - printf "\n### Downloading admin dataset from a URL: %s\n" "$ADMIN_DATASET_URL" + printf "\n### Loading end-user dataset into the triplestore...\n" + "$app_store_fn" "$app_store_url" "$app_service_auth_user" "$app_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/end-user.nq" "$app_store_content_type" "$app_service_auth_token" - curl "$ADMIN_DATASET_URL" > "$ADMIN_DATASET" ;; - esac + elif [ "$app_type" = "https://w3id.org/atomgraph/linkeddatahub/apps#AdminApplication" ]; then - trig --base="${end_user_origin}/" "$END_USER_DATASET" > "/var/linkeddatahub/based-datasets/${app_folder}/end-user.nq" + case "$ADMIN_DATASET_URL" in + "file://"*) + ADMIN_DATASET=$(echo "$ADMIN_DATASET_URL" | cut -c 8-) # strip leading file:// - printf "\n### Waiting for %s...\n" "$end_user_quad_store_url" - wait_for_url "$end_user_quad_store_url" "$end_user_service_auth_user" "$end_user_service_auth_pwd" "$TIMEOUT" "application/n-quads" + printf "\n### Reading admin dataset from a local file: %s\n" "$ADMIN_DATASET" ;; + *) + ADMIN_DATASET=$(mktemp) - printf "\n### Loading end-user dataset into the triplestore...\n" - append_quads "$end_user_quad_store_url" "$end_user_service_auth_user" "$end_user_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/end-user.nq" "application/n-quads" + printf "\n### Downloading admin dataset from a URL: %s\n" "$ADMIN_DATASET_URL" - trig --base="${admin_origin}/" "$ADMIN_DATASET" > "/var/linkeddatahub/based-datasets/${app_folder}/admin.nq" + curl "$ADMIN_DATASET_URL" > "$ADMIN_DATASET" ;; + esac - printf "\n### Waiting for %s...\n" "$admin_quad_store_url" - wait_for_url "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "$TIMEOUT" "application/n-quads" + trig --base="${app_origin}/" "$ADMIN_DATASET" > "/var/linkeddatahub/based-datasets/${app_folder}/admin.nq" - printf "\n### Loading admin dataset into the triplestore...\n" - append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/admin.nq" "application/n-quads" + printf "\n### Waiting for %s...\n" "$app_store_url" + wait_for_url "$app_store_url" "$app_service_auth_user" "$app_service_auth_pwd" "$TIMEOUT" "$app_store_content_type" "$app_service_auth_token" - namespace_ontology_dataset_path="/var/linkeddatahub/datasets/${app_folder}/namespace-ontology.trig" - mkdir -p "$(dirname "$namespace_ontology_dataset_path")" - export end_user_origin admin_origin - envsubst < namespace-ontology.trig.template > "$namespace_ontology_dataset_path" + printf "\n### Loading admin dataset into the triplestore...\n" + "$app_store_fn" "$app_store_url" "$app_service_auth_user" "$app_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/admin.nq" "$app_store_content_type" "$app_service_auth_token" - trig --base="${admin_origin}/" --output=nq "$namespace_ontology_dataset_path" > "/var/linkeddatahub/based-datasets/${app_folder}/namespace-ontology.nq" + # derive the corresponding end-user origin by stripping the leading 'admin.' from the hostname + end_user_origin=$(echo "$app_origin" | sed 's|://admin\.|://|') - printf "\n### Loading namespace ontology into the admin triplestore...\n" - append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/namespace-ontology.nq" "application/n-quads" + namespace_ontology_dataset_path="/var/linkeddatahub/datasets/${app_folder}/namespace-ontology.trig" + mkdir -p "$(dirname "$namespace_ontology_dataset_path")" + export end_user_origin + envsubst < namespace-ontology.trig.template > "$namespace_ontology_dataset_path" - # Load full owner/secretary metadata (agent + key) only for root app - if [ "$end_user_origin" = "$ORIGIN" ]; then - printf "\n### Uploading the metadata of the owner agent...\n\n" - append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-owner.nq "application/n-quads" + trig --base="${app_origin}/" --output=nq "$namespace_ontology_dataset_path" > "/var/linkeddatahub/based-datasets/${app_folder}/namespace-ontology.nq" - printf "\n### Uploading the metadata of the secretary agent...\n\n" - append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" /var/linkeddatahub/based-datasets/root-secretary.nq "application/n-quads" - fi + printf "\n### Loading namespace ontology into the admin triplestore...\n" + "$app_store_fn" "$app_store_url" "$app_service_auth_user" "$app_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/namespace-ontology.nq" "$app_store_content_type" "$app_service_auth_token" + + # Load full owner/secretary metadata (agent + key) only for root admin app + if [ "$app_origin" = "$ADMIN_ORIGIN" ]; then + printf "\n### Uploading the metadata of the owner agent...\n\n" + "$app_store_fn" "$app_store_url" "$app_service_auth_user" "$app_service_auth_pwd" /var/linkeddatahub/based-datasets/root-owner.nq "$app_store_content_type" "$app_service_auth_token" + + printf "\n### Uploading the metadata of the secretary agent...\n\n" + "$app_store_fn" "$app_store_url" "$app_service_auth_user" "$app_service_auth_pwd" /var/linkeddatahub/based-datasets/root-secretary.nq "$app_store_content_type" "$app_service_auth_token" + fi - # Load owner/secretary authorizations for this app (with app-specific UUIDs) - # Note: OWNER_URI and SECRETARY_URI reference the root admin URIs - owner_auth_dataset_path="/var/linkeddatahub/datasets/${app_folder}/owner-authorization.trig" - mkdir -p "$(dirname "$owner_auth_dataset_path")" + # Load owner/secretary authorizations for this app (with app-specific UUIDs) + # Note: OWNER_URI and SECRETARY_URI reference the root admin URIs + owner_auth_dataset_path="/var/linkeddatahub/datasets/${app_folder}/owner-authorization.trig" + mkdir -p "$(dirname "$owner_auth_dataset_path")" - OWNER_AUTH_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') - OWNER_AUTH_DOC_URI="${admin_origin}/acl/authorizations/${OWNER_AUTH_UUID}/" - OWNER_AUTH_URI="${OWNER_AUTH_DOC_URI}#auth" + OWNER_AUTH_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') + OWNER_AUTH_DOC_URI="${app_origin}/acl/authorizations/${OWNER_AUTH_UUID}/" + OWNER_AUTH_URI="${OWNER_AUTH_DOC_URI}#auth" - export OWNER_URI OWNER_DOC_URI OWNER_KEY_DOC_URI OWNER_AUTH_DOC_URI OWNER_AUTH_URI - envsubst < root-owner-authorization.trig.template > "$owner_auth_dataset_path" + export OWNER_URI OWNER_DOC_URI OWNER_KEY_DOC_URI OWNER_AUTH_DOC_URI OWNER_AUTH_URI + envsubst < root-owner-authorization.trig.template > "$owner_auth_dataset_path" - trig --base="${admin_origin}/" --output=nq "$owner_auth_dataset_path" > "/var/linkeddatahub/based-datasets/${app_folder}/owner-authorization.nq" + trig --base="${app_origin}/" --output=nq "$owner_auth_dataset_path" > "/var/linkeddatahub/based-datasets/${app_folder}/owner-authorization.nq" - printf "\n### Uploading owner authorizations for this app...\n\n" - append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/owner-authorization.nq" "application/n-quads" + printf "\n### Uploading owner authorizations for this app...\n\n" + "$app_store_fn" "$app_store_url" "$app_service_auth_user" "$app_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/owner-authorization.nq" "$app_store_content_type" "$app_service_auth_token" - secretary_auth_dataset_path="/var/linkeddatahub/datasets/${app_folder}/secretary-authorization.trig" - mkdir -p "$(dirname "$secretary_auth_dataset_path")" + secretary_auth_dataset_path="/var/linkeddatahub/datasets/${app_folder}/secretary-authorization.trig" + mkdir -p "$(dirname "$secretary_auth_dataset_path")" - SECRETARY_AUTH_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') - SECRETARY_AUTH_DOC_URI="${admin_origin}/acl/authorizations/${SECRETARY_AUTH_UUID}/" - SECRETARY_AUTH_URI="${SECRETARY_AUTH_DOC_URI}#auth" + SECRETARY_AUTH_UUID=$(uuidgen | tr '[:upper:]' '[:lower:]') + SECRETARY_AUTH_DOC_URI="${app_origin}/acl/authorizations/${SECRETARY_AUTH_UUID}/" + SECRETARY_AUTH_URI="${SECRETARY_AUTH_DOC_URI}#auth" - export SECRETARY_URI SECRETARY_DOC_URI SECRETARY_KEY_DOC_URI SECRETARY_AUTH_DOC_URI SECRETARY_AUTH_URI - envsubst < root-secretary-authorization.trig.template > "$secretary_auth_dataset_path" + export SECRETARY_URI SECRETARY_DOC_URI SECRETARY_KEY_DOC_URI SECRETARY_AUTH_DOC_URI SECRETARY_AUTH_URI + envsubst < root-secretary-authorization.trig.template > "$secretary_auth_dataset_path" - trig --base="${admin_origin}/" --output=nq "$secretary_auth_dataset_path" > "/var/linkeddatahub/based-datasets/${app_folder}/secretary-authorization.nq" + trig --base="${app_origin}/" --output=nq "$secretary_auth_dataset_path" > "/var/linkeddatahub/based-datasets/${app_folder}/secretary-authorization.nq" - printf "\n### Uploading secretary authorizations for this app...\n\n" - append_quads "$admin_quad_store_url" "$admin_service_auth_user" "$admin_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/secretary-authorization.nq" "application/n-quads" + printf "\n### Uploading secretary authorizations for this app...\n\n" + "$app_store_fn" "$app_store_url" "$app_service_auth_user" "$app_service_auth_pwd" "/var/linkeddatahub/based-datasets/${app_folder}/secretary-authorization.nq" "$app_store_content_type" "$app_service_auth_token" + + fi fi done @@ -827,11 +966,12 @@ fi # if configured, generate XML sitemap: https://www.sitemaps.org/protocol.html if [ "$GENERATE_SITEMAP" = true ]; then + admin_endpoint_url="$root_admin_endpoint_url" export admin_endpoint_url envsubst < /var/linkeddatahub/sitemap/sitemap.rq.template > /var/linkeddatahub/sitemap/sitemap.rq sitemap_results=$(mktemp) - curl -k -G -H "Accept: application/sparql-results+xml" "$end_user_endpoint_url" --data-urlencode "query@/var/linkeddatahub/sitemap/sitemap.rq" -o "$sitemap_results" + curl -k -G -H "Accept: application/sparql-results+xml" "$root_end_user_endpoint_url" --data-urlencode "query@/var/linkeddatahub/sitemap/sitemap.rq" -o "$sitemap_results" xsltproc --output "${PWD}/webapps/ROOT/sitemap.xml" /var/linkeddatahub/sitemap/sitemap.xsl "$sitemap_results" @@ -950,6 +1090,18 @@ if [ -f "/run/secrets/orcid_client_secret" ]; then ORCID_CLIENT_SECRET_PARAM="--stringparam orcid:clientSecret '$ORCID_CLIENT_SECRET' " fi +if [ -n "$FRONTEND_PROXY" ]; then + FRONTEND_PROXY_PARAM="--stringparam 'ldhc:frontendProxy' '$FRONTEND_PROXY' " +fi + +if [ -n "$BACKEND_PROXY_ADMIN" ]; then + BACKEND_PROXY_ADMIN_PARAM="--stringparam 'ldhc:backendProxyAdmin' '$BACKEND_PROXY_ADMIN' " +fi + +if [ -n "$BACKEND_PROXY_END_USER" ]; then + BACKEND_PROXY_END_USER_PARAM="--stringparam 'ldhc:backendProxyEndUser' '$BACKEND_PROXY_END_USER' " +fi + transform="xsltproc \ --output conf/Catalina/localhost/ROOT.xml \ $CACHE_MODEL_LOADS_PARAM \ @@ -987,6 +1139,9 @@ transform="xsltproc \ $GOOGLE_CLIENT_SECRET_PARAM \ $ORCID_CLIENT_ID_PARAM \ $ORCID_CLIENT_SECRET_PARAM \ + $FRONTEND_PROXY_PARAM \ + $BACKEND_PROXY_ADMIN_PARAM \ + $BACKEND_PROXY_END_USER_PARAM \ /var/linkeddatahub/xsl/context.xsl \ conf/Catalina/localhost/ROOT.xml" @@ -1010,17 +1165,17 @@ eval "$transform" java -XX:+PrintFlagsFinal -version | grep -iE 'HeapSize|PermSize|ThreadStackSize' -# wait for the end-user GSP service +# wait for the end-user service -printf "\n### Waiting for %s...\n" "$root_end_user_quad_store_url" +printf "\n### Waiting for %s...\n" "$root_end_user_store_url" -wait_for_url "$root_end_user_quad_store_url" "$root_end_user_service_auth_user" "$root_end_user_service_auth_pwd" "$TIMEOUT" "application/n-quads" +wait_for_url "$root_end_user_store_url" "$root_end_user_service_auth_user" "$root_end_user_service_auth_pwd" "$TIMEOUT" "$root_end_user_store_content_type" "$root_end_user_service_auth_token" -# wait for the admin GSP service +# wait for the admin service -printf "\n### Waiting for %s...\n" "$root_admin_quad_store_url" +printf "\n### Waiting for %s...\n" "$root_admin_store_url" -wait_for_url "$root_admin_quad_store_url" "$root_admin_service_auth_user" "$root_admin_service_auth_pwd" "$TIMEOUT" "application/n-quads" +wait_for_url "$root_admin_store_url" "$root_admin_service_auth_user" "$root_admin_service_auth_pwd" "$TIMEOUT" "$root_admin_store_content_type" "$root_admin_service_auth_token" # run Tomcat (in debug mode if $JPDA_ADDRESS is defined) diff --git a/platform/namespace-ontology.trig.template b/platform/namespace-ontology.trig.template index a3531ccb8..b9042d5a6 100644 --- a/platform/namespace-ontology.trig.template +++ b/platform/namespace-ontology.trig.template @@ -20,10 +20,10 @@ # namespace ontology -<${admin_origin}/ontologies/namespace/> + { - <${admin_origin}/ontologies/namespace/> a dh:Item ; - sioc:has_container <${admin_origin}/ontologies/> ; + a dh:Item ; + sioc:has_container ; dct:title "Namespace" ; foaf:primaryTopic <${end_user_origin}/ns#> . @@ -37,15 +37,15 @@ # public namespace authorization -<${admin_origin}/acl/authorizations/public-namespace/> + { - <${admin_origin}/acl/authorizations/public-namespace/> a dh:Item ; - sioc:has_container <${admin_origin}/acl/authorizations/> ; + a dh:Item ; + sioc:has_container ; dct:title "Public namespace access" ; - foaf:primaryTopic <${admin_origin}/acl/authorizations/public-namespace/#this> . + foaf:primaryTopic . - <${admin_origin}/acl/authorizations/public-namespace/#this> a acl:Authorization ; + a acl:Authorization ; rdfs:label "Public namespace access" ; rdfs:comment "Allows non-authenticated access" ; acl:accessTo <${end_user_origin}/ns> ; # end-user ontologies are public @@ -56,15 +56,15 @@ # SPARQL endpoint authorization -<${admin_origin}/acl/authorizations/sparql-endpoint/> + { - <${admin_origin}/acl/authorizations/sparql-endpoint/> a dh:Item ; - sioc:has_container <${admin_origin}/acl/authorizations/> ; + a dh:Item ; + sioc:has_container ; dct:title "SPARQL endpoint access" ; - foaf:primaryTopic <${admin_origin}/acl/authorizations/sparql-endpoint/#this> . + foaf:primaryTopic . - <${admin_origin}/acl/authorizations/sparql-endpoint/#this> a acl:Authorization ; + a acl:Authorization ; rdfs:label "SPARQL endpoint access" ; rdfs:comment "Allows only authenticated access" ; acl:accessTo <${end_user_origin}/sparql> ; @@ -75,60 +75,98 @@ # write/append authorization -<${admin_origin}/acl/authorizations/write-append/> + { - <${admin_origin}/acl/authorizations/write-append/> a dh:Item ; - sioc:has_container <${admin_origin}/acl/authorizations/> ; + a dh:Item ; + sioc:has_container ; dct:title "Write/append access" ; - foaf:primaryTopic <${admin_origin}/acl/authorizations/write-append/#this> . + foaf:primaryTopic . - <${admin_origin}/acl/authorizations/write-append/#this> a acl:Authorization ; + a acl:Authorization ; rdfs:label "Write/append access" ; rdfs:comment "Allows write access to all documents and containers" ; acl:accessToClass dh:Item, dh:Container, def:Root ; acl:accessTo <${end_user_origin}/sparql>, <${end_user_origin}/importer>, <${end_user_origin}/add>, <${end_user_origin}/generate>, <${end_user_origin}/ns> ; acl:mode acl:Write, acl:Append ; - acl:agentGroup <${admin_origin}/acl/groups/owners/#this>, <${admin_origin}/acl/groups/writers/#this> . + acl:agentGroup , . } # full access authorization -<${admin_origin}/acl/authorizations/full-control/> + { - <${admin_origin}/acl/authorizations/full-control/> a dh:Item ; - sioc:has_container <${admin_origin}/acl/authorizations/> ; + a dh:Item ; + sioc:has_container ; dct:title "Full control" ; - foaf:primaryTopic <${admin_origin}/acl/authorizations/full-control/#this> . + foaf:primaryTopic . - <${admin_origin}/acl/authorizations/full-control/#this> a acl:Authorization ; + a acl:Authorization ; rdfs:label "Full control" ; rdfs:comment "Allows full read/write access to all application resources" ; acl:accessToClass dh:Item, dh:Container, def:Root ; acl:accessTo <${end_user_origin}/sparql>, <${end_user_origin}/importer>, <${end_user_origin}/add>, <${end_user_origin}/generate>, <${end_user_origin}/ns>, <${end_user_origin}/settings> ; acl:mode acl:Read, acl:Append, acl:Write, acl:Control ; - acl:agentGroup <${admin_origin}/acl/groups/owners/#this> . + acl:agentGroup . } # read access -<${admin_origin}/acl/authorizations/read/> + { - <${admin_origin}/acl/authorizations/read/> a dh:Item ; - sioc:has_container <${admin_origin}/acl/authorizations/> ; + a dh:Item ; + sioc:has_container ; dct:title "Read access" ; - foaf:primaryTopic <${admin_origin}/acl/authorizations/read/#this> . + foaf:primaryTopic . - <${admin_origin}/acl/authorizations/read/#this> a acl:Authorization ; + a acl:Authorization ; rdfs:label "Read access" ; rdfs:comment "Allows read access to all resources" ; acl:accessToClass dh:Item, dh:Container, def:Root, ; acl:accessTo <${end_user_origin}/sparql> ; acl:mode acl:Read ; - acl:agentGroup <${admin_origin}/acl/groups/owners/#this>, <${admin_origin}/acl/groups/writers/#this>, <${admin_origin}/acl/groups/readers/#this> . + acl:agentGroup , , . + +} + +# access endpoint + + +{ + + a dh:Item ; + sioc:has_container ; + dct:title "Access description access" ; + foaf:primaryTopic . + + a acl:Authorization ; + rdfs:label "Access description access" ; + rdfs:comment "Allows non-authenticated access" ; + acl:accessTo <${end_user_origin}/access> ; + acl:mode acl:Read ; + acl:agentClass foaf:Agent, acl:AuthenticatedAgent . + +} + +# access request endpoint + + +{ + + a dh:Item ; + sioc:has_container ; + dct:title "Access request access" ; + foaf:primaryTopic . + + a acl:Authorization ; + rdfs:label "Access request access" ; + rdfs:comment "Allows non-authenticated access" ; + acl:accessTo <${end_user_origin}/access/request> ; + acl:mode acl:Append ; + acl:agentClass foaf:Agent, acl:AuthenticatedAgent . } diff --git a/platform/select-agent-metadata.rq b/platform/select-agent-metadata.rq index bb01ebe55..0c6357c01 100644 --- a/platform/select-agent-metadata.rq +++ b/platform/select-agent-metadata.rq @@ -1,13 +1,14 @@ PREFIX foaf: PREFIX cert: + SELECT ?agent ?doc ?key WHERE { -GRAPH ?g1 { - ?agent a foaf:Agent . - ?agent cert:key ?key . -} -GRAPH ?g2 { - ?doc foaf:primaryTopic ?agent . -} + GRAPH ?g1 { + ?agent a foaf:Agent . + ?agent cert:key ?key . + } + GRAPH ?g2 { + ?doc foaf:primaryTopic ?agent . + } } LIMIT 1 diff --git a/platform/select-root-services.rq b/platform/select-root-services.rq index 30477551d..33854ec2d 100644 --- a/platform/select-root-services.rq +++ b/platform/select-root-services.rq @@ -2,50 +2,29 @@ PREFIX ldt: PREFIX sd: PREFIX a: PREFIX lapp: -PREFIX ldh: PREFIX foaf: -SELECT ?endUserApp ?endUserOrigin ?endUserQuadStore ?endUserEndpoint ?endUserAuthUser ?endUserAuthPwd ?endUserMaker ?adminApp ?adminOrigin ?adminQuadStore ?adminEndpoint ?adminAuthUser ?adminAuthPwd ?adminMaker +SELECT ?app ?type ?origin ?quadStore ?graphStore ?endpoint ?authUser ?authPwd ?authToken ?maker { - GRAPH ?endUserAppGraph + GRAPH ?appGraph { - ?endUserApp lapp:origin ?endUserOrigin ; - ldt:service ?endUserService ; - lapp:adminApplication ?adminApp . - - GRAPH ?endUserServiceGraph - { - ?endUserService a:quadStore ?endUserQuadStore ; - sd:endpoint ?endUserEndpoint . - OPTIONAL - { - ?endUserService a:authUser ?endUserAuthUser ; - a:authPwd ?endUserAuthPwd . - } - OPTIONAL - { - ?endUserService foaf:maker ?endUserMaker - } - } - } - GRAPH ?adminAppGraph - { - ?adminApp ldt:service ?adminService ; - lapp:origin ?adminOrigin . + ?app a ?type ; + lapp:origin ?origin ; + ldt:service ?service . + FILTER(?type IN (lapp:EndUserApplication, lapp:AdminApplication)) + OPTIONAL { ?app foaf:maker ?maker } - GRAPH ?adminServiceGraph + GRAPH ?serviceGraph { - ?adminService a:quadStore ?adminQuadStore ; - sd:endpoint ?adminEndpoint . - OPTIONAL - { - ?adminService a:authUser ?adminAuthUser ; - a:authPwd ?adminAuthPwd . - } + ?service sd:endpoint ?endpoint . + OPTIONAL { ?service a:quadStore ?quadStore } + OPTIONAL { ?service a:graphStore ?graphStore } OPTIONAL { - ?adminService foaf:maker ?adminMaker + ?service a:authUser ?authUser ; + a:authPwd ?authPwd . } + OPTIONAL { ?service a:authToken ?authToken } } } -} \ No newline at end of file +} diff --git a/pom.xml b/pom.xml index 32e9246e3..6f440bfff 100644 --- a/pom.xml +++ b/pom.xml @@ -3,7 +3,7 @@ com.atomgraph linkeddatahub - 5.2.1 + 5.2.2-SNAPSHOT ${packaging.type} AtomGraph LinkedDataHub @@ -46,7 +46,7 @@ https://github.com/AtomGraph/LinkedDataHub scm:git:git://github.com/AtomGraph/LinkedDataHub.git scm:git:git@github.com:AtomGraph/LinkedDataHub.git - linkeddatahub-5.2.1 + linkeddatahub-2.1.1 diff --git a/src/main/java/com/atomgraph/linkeddatahub/Application.java b/src/main/java/com/atomgraph/linkeddatahub/Application.java index 0a5851110..1ef57d6fd 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/Application.java +++ b/src/main/java/com/atomgraph/linkeddatahub/Application.java @@ -16,7 +16,6 @@ */ package com.atomgraph.linkeddatahub; -import com.atomgraph.linkeddatahub.server.mapper.ResourceExistsExceptionMapper; import com.atomgraph.linkeddatahub.server.mapper.HttpHostConnectExceptionMapper; import com.atomgraph.linkeddatahub.server.mapper.InternalURLExceptionMapper; import com.atomgraph.linkeddatahub.server.mapper.MessagingExceptionMapper; @@ -51,6 +50,7 @@ import com.atomgraph.core.mapper.BadGatewayExceptionMapper; import com.atomgraph.core.provider.QueryParamProvider; import com.atomgraph.linkeddatahub.writer.factory.DataManagerFactory; +import com.atomgraph.server.vocabulary.LDT; import com.atomgraph.server.mapper.NotFoundExceptionMapper; import com.atomgraph.core.riot.RDFLanguages; import com.atomgraph.core.riot.lang.RDFPostReaderFactory; @@ -301,6 +301,10 @@ public class Application extends ResourceConfig private final Properties oidcRefreshTokens; private final URI contextDatasetURI; private final Dataset contextDataset; + private final URI frontendProxy; + private final URI backendProxyAdmin; + private final URI backendProxyEndUser; + private Map serviceContextMap; /** * Constructs system application and configures it using sevlet config. @@ -352,6 +356,9 @@ public Application(@Context ServletConfig servletConfig) throws URISyntaxExcepti servletConfig.getServletContext().getInitParameter(LDHC.supportedLanguages.getURI()) != null ? servletConfig.getServletContext().getInitParameter(LDHC.supportedLanguages.getURI()) : null, servletConfig.getServletContext().getInitParameter(LDHC.enableWebIDSignUp.getURI()) != null ? Boolean.parseBoolean(servletConfig.getServletContext().getInitParameter(LDHC.enableWebIDSignUp.getURI())) : true, servletConfig.getServletContext().getInitParameter(LDHC.oidcRefreshTokens.getURI()), + servletConfig.getServletContext().getInitParameter(LDHC.frontendProxy.getURI()) != null ? servletConfig.getServletContext().getInitParameter(LDHC.frontendProxy.getURI()) : null, + servletConfig.getServletContext().getInitParameter(LDHC.backendProxyAdmin.getURI()) != null ? servletConfig.getServletContext().getInitParameter(LDHC.backendProxyAdmin.getURI()) : null, + servletConfig.getServletContext().getInitParameter(LDHC.backendProxyEndUser.getURI()) != null ? servletConfig.getServletContext().getInitParameter(LDHC.backendProxyEndUser.getURI()) : null, servletConfig.getServletContext().getInitParameter("mail.user") != null ? servletConfig.getServletContext().getInitParameter("mail.user") : null, servletConfig.getServletContext().getInitParameter("mail.password") != null ? servletConfig.getServletContext().getInitParameter("mail.password") : null, servletConfig.getServletContext().getInitParameter("mail.smtp.host") != null ? servletConfig.getServletContext().getInitParameter("mail.smtp.host") : null, @@ -414,6 +421,9 @@ public Application(@Context ServletConfig servletConfig) throws URISyntaxExcepti * @param googleClientSecret client secret for Google's OAuth * @param orcidClientID client ID for ORCID's OAuth * @param orcidClientSecret client secret for ORCID's OAuth + * @param frontendProxyString frontend (Varnish) proxy URI used for cache invalidation BAN requests, or null + * @param backendProxyAdminString backend proxy URI for the admin SPARQL service (endpoint URI rewriting + cache invalidation), or null + * @param backendProxyEndUserString backend proxy URI for the end-user SPARQL service (endpoint URI rewriting + cache invalidation), or null */ public Application(final ServletConfig servletConfig, final MediaTypes mediaTypes, final Integer maxGetRequestSize, final boolean cacheModelLoads, final boolean preemptiveAuth, @@ -429,6 +439,7 @@ public Application(final ServletConfig servletConfig, final MediaTypes mediaType final Integer cookieMaxAge, final boolean enableLinkedDataProxy, final Integer maxContentLength, final Integer maxConnPerRoute, final Integer maxTotalConn, final Integer maxRequestRetries, final Integer maxImportThreads, final String notificationAddressString, final String supportedLanguageCodes, final boolean enableWebIDSignUp, final String oidcRefreshTokensPropertiesPath, + final String frontendProxyString, final String backendProxyAdminString, final String backendProxyEndUserString, final String mailUser, final String mailPassword, final String smtpHost, final String smtpPort, final String googleClientID, final String googleClientSecret, final String orcidClientID, final String orcidClientSecret) @@ -439,6 +450,9 @@ public Application(final ServletConfig servletConfig, final MediaTypes mediaType throw new ConfigurationException(LDHC.contextDataset); } this.contextDatasetURI = URI.create(contextDatasetURIString); + this.frontendProxy = frontendProxyString != null ? URI.create(frontendProxyString) : null; + this.backendProxyAdmin = backendProxyAdminString != null ? URI.create(backendProxyAdminString) : null; + this.backendProxyEndUser = backendProxyEndUserString != null ? URI.create(backendProxyEndUserString) : null; if (clientKeyStoreURIString == null) { @@ -737,12 +751,42 @@ public Application(final ServletConfig servletConfig, final MediaTypes mediaType BuiltinPersonalities.model.add(com.atomgraph.linkeddatahub.apps.model.Application.class, new com.atomgraph.linkeddatahub.apps.model.impl.ApplicationImplementation()); BuiltinPersonalities.model.add(com.atomgraph.linkeddatahub.apps.model.Dataset.class, new com.atomgraph.linkeddatahub.apps.model.impl.DatasetImplementation()); BuiltinPersonalities.model.add(com.atomgraph.linkeddatahub.apps.model.Package.class, new com.atomgraph.linkeddatahub.apps.model.impl.PackageImplementation()); - BuiltinPersonalities.model.add(Service.class, new com.atomgraph.linkeddatahub.model.impl.ServiceImplementation(noCertClient, mediaTypes, maxGetRequestSize)); + BuiltinPersonalities.model.add(Service.class, new com.atomgraph.linkeddatahub.model.impl.ServiceImplementation()); BuiltinPersonalities.model.add(Import.class, ImportImpl.factory); BuiltinPersonalities.model.add(RDFImport.class, RDFImportImpl.factory); BuiltinPersonalities.model.add(CSVImport.class, CSVImportImpl.factory); BuiltinPersonalities.model.add(com.atomgraph.linkeddatahub.model.File.class, FileImpl.factory); - + + // Build ServiceContext map: keyed by service URI, proxy derived from the app type that references each service. + // Iterating ldt:service statements (app → service) naturally excludes orphan services. + serviceContextMap = new HashMap<>(); + org.apache.jena.rdf.model.Model ctxUnion = contextDataset.getUnionModel(); + org.apache.jena.rdf.model.StmtIterator serviceIt = ctxUnion.listStatements(null, LDT.service, (org.apache.jena.rdf.model.RDFNode) null); + try + { + while (serviceIt.hasNext()) + { + org.apache.jena.rdf.model.Statement stmt = serviceIt.nextStatement(); + Resource app = stmt.getSubject(); + Resource svcResource = stmt.getResource(); + URI proxy; + + if (app.hasProperty(RDF.type, LAPP.AdminApplication)) + proxy = backendProxyAdmin; + else if (app.hasProperty(RDF.type, LAPP.EndUserApplication)) + proxy = backendProxyEndUser; + else + continue; + + serviceContextMap.put(svcResource.getURI(), + new com.atomgraph.linkeddatahub.model.ServiceContext(svcResource.as(com.atomgraph.linkeddatahub.model.Service.class), noCertClient, mediaTypes, maxGetRequestSize, proxy)); + } + } + finally + { + serviceIt.close(); + } + // TO-DO: config property for cacheModelLoads endUserOntModelSpecs = new HashMap<>(); dataManager = new DataManagerImpl(locationMapper, new HashMap<>(), GraphStoreClient.create(client, mediaTypes), cacheModelLoads, preemptiveAuth, resolvingUncached); @@ -1104,7 +1148,6 @@ protected void registerExceptionMappers() register(WebIDDelegationExceptionMapper.class); register(WebIDLoadingExceptionMapper.class); register(TokenExpiredExceptionMapper.class); - register(ResourceExistsExceptionMapper.class); register(QueryParseExceptionMapper.class); register(AuthenticationExceptionMapper.class); register(ForbiddenExceptionMapper.class); @@ -1438,12 +1481,12 @@ public Map getLengthMap(Map apps) */ public void submitImport(CSVImport csvImport, com.atomgraph.linkeddatahub.apps.model.Application app, Service service, Service adminService, String baseURI, GraphStoreClient gsc) { - new ImportExecutor(importThreadPool).start(service, adminService, baseURI, gsc, csvImport); + new ImportExecutor(importThreadPool).start(service, adminService, this, baseURI, gsc, csvImport); } - + /** * Submits RDF import for asynchronous execution. - * + * * @param rdfImport import resource * @param app current application * @param service current SPARQL service @@ -1453,7 +1496,7 @@ public void submitImport(CSVImport csvImport, com.atomgraph.linkeddatahub.apps.m */ public void submitImport(RDFImport rdfImport, com.atomgraph.linkeddatahub.apps.model.Application app, Service service, Service adminService, String baseURI, GraphStoreClient gsc) { - new ImportExecutor(importThreadPool).start(service, adminService, baseURI, gsc, rdfImport); + new ImportExecutor(importThreadPool).start(service, adminService, this, baseURI, gsc, rdfImport); } /** @@ -1770,15 +1813,38 @@ public Client getExternalClient() } /** - * Bans URL from the proxy cache. + * Returns the service context for the given service (client + proxy configuration). + * The context is keyed by the service's URI string. + * + * @param service SPARQL service + * @return service context, or {@code null} if the service is not registered + */ + public com.atomgraph.linkeddatahub.model.ServiceContext getServiceContext(com.atomgraph.linkeddatahub.model.Service service) + { + if (service == null) throw new IllegalArgumentException("Service cannot be null"); + return serviceContextMap.get(service.getURI()); + } + + /** + * Returns the frontend proxy URI used for cache invalidation BAN requests. + * + * @return frontend proxy URI, or {@code null} if not configured + */ + public URI getFrontendProxy() + { + return frontendProxy; + } + + /** + * Bans URL from the proxy cache using the given proxy URI. * - * @param proxy proxy server resource + * @param proxyURI proxy URI * @param url banned URL * @param urlEncode if true, the banned URL value will be URL-encoded - * @throws IllegalArgumentException if url is null */ - public void ban(Resource proxy, String url, boolean urlEncode) + public void ban(URI proxyURI, String url, boolean urlEncode) { + if (proxyURI == null) throw new IllegalArgumentException("Proxy URI cannot be null"); if (url == null) throw new IllegalArgumentException("URL cannot be null"); // Extract path from URL - Varnish req.url only contains the path, not the full URL @@ -1788,7 +1854,7 @@ public void ban(Resource proxy, String url, boolean urlEncode) final String urlValue = urlEncode ? UriComponent.encode(path, UriComponent.Type.UNRESERVED) : path; - try (Response cr = getClient().target(proxy.getURI()). + try (Response cr = getClient().target(proxyURI). request(). header(CacheInvalidationFilter.HEADER_NAME, urlValue). method("BAN", Response.class)) diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java index 1832b7ad9..bc47f90b2 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java +++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/Application.java @@ -27,7 +27,7 @@ * * @author Martynas Jusevičius {@literal } */ -public interface Application extends Resource, com.atomgraph.core.model.Application +public interface Application extends Resource { /** @@ -84,7 +84,6 @@ public interface Application extends Resource, com.atomgraph.core.model.Applicat * * @return service resource */ - @Override Service getService(); /** @@ -101,13 +100,6 @@ public interface Application extends Resource, com.atomgraph.core.model.Applicat */ boolean isReadAllowed(); - /** - * Returns frontend proxy's cache URI resource. - * - * @return RDF resource - */ - Resource getFrontendProxy(); - /** * Returns the set of packages imported by this application. * diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/admin/impl/ApplicationImpl.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/admin/impl/ApplicationImpl.java index d4056111f..0e99f2aac 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/admin/impl/ApplicationImpl.java +++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/admin/impl/ApplicationImpl.java @@ -20,8 +20,10 @@ import com.atomgraph.linkeddatahub.apps.model.EndUserApplication; import com.atomgraph.linkeddatahub.vocabulary.Admin; import com.atomgraph.linkeddatahub.vocabulary.LAPP; +import java.net.URI; import org.apache.jena.enhanced.EnhGraph; import org.apache.jena.graph.Node; +import org.apache.jena.rdf.model.ResIterator; import org.apache.jena.rdf.model.Resource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,9 +52,27 @@ public ApplicationImpl(Node n, EnhGraph g) @Override public EndUserApplication getEndUserApplication() { - Resource app = getPropertyResourceValue(LAPP.endUserApplication); - if (app != null) return app.as(EndUserApplication.class); - + URI originURI = getOriginURI(); + if (originURI == null) return null; + + // derive end-user origin by stripping the "admin." subdomain from the host + String host = originURI.getHost(); + if (!host.startsWith("admin.")) return null; + String endUserHost = host.substring("admin.".length()); + URI endUserOrigin = URI.create(originURI.getScheme() + "://" + endUserHost + + (originURI.getPort() != -1 ? ":" + originURI.getPort() : "")); + + ResIterator it = getModel().listSubjectsWithProperty(LAPP.origin, + getModel().createResource(endUserOrigin.toString())); + try + { + if (it.hasNext()) return it.next().as(EndUserApplication.class); + } + finally + { + it.close(); + } + return null; } diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/end_user/impl/ApplicationImpl.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/end_user/impl/ApplicationImpl.java index ab5816e88..4341f4a01 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/end_user/impl/ApplicationImpl.java +++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/end_user/impl/ApplicationImpl.java @@ -18,9 +18,10 @@ import com.atomgraph.linkeddatahub.apps.model.AdminApplication; import com.atomgraph.linkeddatahub.apps.model.EndUserApplication; +import java.net.URI; import org.apache.jena.enhanced.EnhGraph; import org.apache.jena.graph.Node; -import org.apache.jena.rdf.model.Resource; +import org.apache.jena.rdf.model.ResIterator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.atomgraph.linkeddatahub.vocabulary.LAPP; @@ -49,9 +50,25 @@ public ApplicationImpl(Node n, EnhGraph g) @Override public AdminApplication getAdminApplication() { - Resource app = getPropertyResourceValue(LAPP.adminApplication); - if (app != null) return app.as(AdminApplication.class); - + URI originURI = getOriginURI(); + if (originURI == null) return null; + + // derive admin origin by prepending "admin." to the host + String adminHost = "admin." + originURI.getHost(); + URI adminOrigin = URI.create(originURI.getScheme() + "://" + adminHost + + (originURI.getPort() != -1 ? ":" + originURI.getPort() : "")); + + ResIterator it = getModel().listSubjectsWithProperty(LAPP.origin, + getModel().createResource(adminOrigin.toString())); + try + { + if (it.hasNext()) return it.next().as(AdminApplication.class); + } + finally + { + it.close(); + } + return null; } diff --git a/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java b/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java index c10e7f31e..e0c3322e5 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java +++ b/src/main/java/com/atomgraph/linkeddatahub/apps/model/impl/ApplicationImpl.java @@ -111,12 +111,6 @@ public Resource getStylesheet() return getPropertyResourceValue(AC.stylesheet); } - @Override - public Resource getFrontendProxy() - { - return getPropertyResourceValue(LAPP.frontendProxy); - } - @Override public boolean isReadAllowed() { diff --git a/src/main/java/com/atomgraph/linkeddatahub/imports/ImportExecutor.java b/src/main/java/com/atomgraph/linkeddatahub/imports/ImportExecutor.java index 2c2b2ad10..faef1ed0c 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/imports/ImportExecutor.java +++ b/src/main/java/com/atomgraph/linkeddatahub/imports/ImportExecutor.java @@ -61,7 +61,7 @@ /** * Executor class for CSV and RDF imports. - * + * * @author Martynas Jusevičius {@literal } */ public class ImportExecutor @@ -86,60 +86,61 @@ public class ImportExecutor /** * Construct executor from thread pool. - * + * * @param execService thread pool service */ public ImportExecutor(ExecutorService execService) { this.execService = execService; } - + /** * Executes CSV import. - * + * * @param service application's SPARQL service * @param adminService admin application's SPARQL service + * @param system system application * @param appBaseURI application's base URI * @param gsc Graph Store client * @param csvImport CSV import resource */ - public void start(Service service, Service adminService, String appBaseURI, GraphStoreClient gsc, CSVImport csvImport) + public void start(Service service, Service adminService, com.atomgraph.linkeddatahub.Application system, String appBaseURI, GraphStoreClient gsc, CSVImport csvImport) { if (csvImport == null) throw new IllegalArgumentException("CSVImport cannot be null"); if (log.isDebugEnabled()) log.debug("Submitting new import to thread pool: {}", csvImport.toString()); - + Resource provImport = ModelFactory.createDefaultModel().createResource(csvImport.getURI()). addProperty(PROV.startedAtTime, csvImport.getModel().createTypedLiteral(Calendar.getInstance())); - + String queryBaseURI = csvImport.getFile().getURI(); // file URI becomes the query base URI QueryLoader queryLoader = new QueryLoader(URI.create(csvImport.getQuery().getURI()), queryBaseURI, Syntax.syntaxARQ, gsc); ParameterizedSparqlString pss = new ParameterizedSparqlString(queryLoader.get().toString(), queryBaseURI); pss.setIri(LDT.base.getLocalName(), appBaseURI); // app's base URI becomes $base final Query query = pss.asQuery(); - + Supplier fileSupplier = new ClientResponseSupplier(gsc, CSV_MEDIA_TYPES, URI.create(csvImport.getFile().getURI())); // skip validation because it will be done during final POST anyway - CompletableFuture.supplyAsync(fileSupplier, getExecutorService()).thenApplyAsync(getStreamRDFOutputWriter(service, adminService, + CompletableFuture.supplyAsync(fileSupplier, getExecutorService()).thenApplyAsync(getStreamRDFOutputWriter(service, adminService, system, gsc, queryBaseURI, query, csvImport), getExecutorService()). - thenAcceptAsync(success(service, csvImport, provImport), getExecutorService()). - exceptionally(failure(service, csvImport, provImport)); + thenAcceptAsync(success(service, system, csvImport, provImport), getExecutorService()). + exceptionally(failure(service, system, csvImport, provImport)); } /** * Executes RDF import. - * + * * @param service application's SPARQL service * @param adminService admin application's SPARQL service + * @param system system application * @param appBaseURI application's base URI * @param gsc Graph Store client * @param rdfImport RDF import resource */ - - public void start(Service service, Service adminService, String appBaseURI, GraphStoreClient gsc, RDFImport rdfImport) + public void start(Service service, Service adminService, com.atomgraph.linkeddatahub.Application system, String appBaseURI, GraphStoreClient gsc, RDFImport rdfImport) { if (rdfImport == null) throw new IllegalArgumentException("RDFImport cannot be null"); if (log.isDebugEnabled()) log.debug("Submitting new import to thread pool: {}", rdfImport.toString()); - + Resource provImport = ModelFactory.createDefaultModel().createResource(rdfImport.getURI()). addProperty(PROV.startedAtTime, rdfImport.getModel().createTypedLiteral(Calendar.getInstance())); @@ -154,24 +155,25 @@ public void start(Service service, Service adminService, String appBaseURI, Grap } else query = null; - + Supplier fileSupplier = new ClientResponseSupplier(gsc, RDF_MEDIA_TYPES, URI.create(rdfImport.getFile().getURI())); // skip validation because it will be done during final POST anyway - CompletableFuture.supplyAsync(fileSupplier, getExecutorService()).thenApplyAsync(getStreamRDFOutputWriter(service, adminService, + CompletableFuture.supplyAsync(fileSupplier, getExecutorService()).thenApplyAsync(getStreamRDFOutputWriter(service, adminService, system, gsc, queryBaseURI, query, rdfImport), getExecutorService()). - thenAcceptAsync(success(service, rdfImport, provImport), getExecutorService()). - exceptionally(failure(service, rdfImport, provImport)); + thenAcceptAsync(success(service, system, rdfImport, provImport), getExecutorService()). + exceptionally(failure(service, system, rdfImport, provImport)); } - + /** * Invoked when CSV import completes successfully. - * + * + * @param service application's SPARQL service + * @param system system application * @param csvImport import resource * @param provImport provenance resource - * @param service application's SPARQL service * @return consumer of the RDF output */ - protected Consumer success(final Service service, final CSVImport csvImport, final Resource provImport) + protected Consumer success(final Service service, final com.atomgraph.linkeddatahub.Application system, final CSVImport csvImport, final Resource provImport) { return (CSVGraphStoreOutput output) -> { @@ -181,20 +183,21 @@ protected Consumer success(final Service service, final CSV addLiteral(VoID.triples, output.getCSVGraphStoreRowProcessor().getTripleCount()). addProperty(PROV.wasGeneratedBy, provImport); // connect Response to dataset provImport.addProperty(PROV.endedAtTime, provImport.getModel().createTypedLiteral(Calendar.getInstance())); - - appendProvGraph(provImport, service.getGraphStoreClient()); + + appendProvGraph(provImport, system.getServiceContext(service).getGraphStoreClient()); }; } - + /** * Invoked when RDF import completes successfully. - * + * + * @param service application's SPARQL service + * @param system system application * @param rdfImport import resource * @param provImport provenance resource - * @param service application's SPARQL service * @return consumer of the RDF output */ - protected Consumer success(final Service service, final RDFImport rdfImport, final Resource provImport) + protected Consumer success(final Service service, final com.atomgraph.linkeddatahub.Application system, final RDFImport rdfImport, final Resource provImport) { return (RDFGraphStoreOutput output) -> { @@ -204,24 +207,25 @@ protected Consumer success(final Service service, final RDF // addLiteral(VoID.triples, output.getCSVStreamRDFProcessor().getTripleCount()). addProperty(PROV.wasGeneratedBy, provImport); // connect Response to dataset provImport.addProperty(PROV.endedAtTime, provImport.getModel().createTypedLiteral(Calendar.getInstance())); - - appendProvGraph(provImport, service.getGraphStoreClient()); + + appendProvGraph(provImport, system.getServiceContext(service).getGraphStoreClient()); }; } /** * Invoked when RDF import fails to complete. - * + * + * @param service application's SPARQL service + * @param system system application * @param importInst import resource * @param provImport provenance resource - * @param service application's SPARQL service * @return void function */ - protected Function failure(final Service service, final Import importInst, final Resource provImport) + protected Function failure(final Service service, final com.atomgraph.linkeddatahub.Application system, final Import importInst, final Resource provImport) { return (Throwable t) -> { if (log.isErrorEnabled()) log.error("Could not write Import: {}", importInst, t); - + if (t instanceof CompletionException) { // could not parse CSV @@ -232,8 +236,8 @@ protected Function failure(final Service service, final Import addLiteral(DCTerms.description, tpe.getMessage()). addProperty(PROV.wasGeneratedBy, provImport); // connect Response to exception provImport.addProperty(PROV.endedAtTime, importInst.getModel().createTypedLiteral(Calendar.getInstance())); - - appendProvGraph(provImport, service.getGraphStoreClient()); + + appendProvGraph(provImport, system.getServiceContext(service).getGraphStoreClient()); } // could not save RDF if (t.getCause() instanceof ImportException ie) @@ -242,20 +246,20 @@ protected Function failure(final Service service, final Import addProperty(RDF.type, PROV.Entity). addLiteral(DCTerms.description, ie.getMessage()). addProperty(PROV.wasGeneratedBy, provImport); // connect Response to exception - + provImport.addProperty(PROV.endedAtTime, importInst.getModel().createTypedLiteral(Calendar.getInstance())); - - appendProvGraph(provImport, service.getGraphStoreClient()); + + appendProvGraph(provImport, system.getServiceContext(service).getGraphStoreClient()); } } - + return null; }; } /** * Appends provenance metadata to the graph of the import. - * + * * @param provImport import resource * @param accessor GSP graph accessor */ @@ -263,52 +267,54 @@ protected void appendProvGraph(Resource provImport, DatasetAccessor accessor) { URI graphURI = UriBuilder.fromUri(provImport.getURI()).fragment(null).build(); // skip fragment from the Import URI to get its graph URI if (log.isDebugEnabled()) log.debug("Appending import metadata to graph: {}", graphURI); - + new Skolemizer(graphURI.toString()).apply(provImport.getModel()); // make sure we don't store blank nodes accessor.add(graphURI.toString(), provImport.getModel()); } /** * Returns output writer for CSV imports. - * + * * @param service SPARQL service of the application * @param adminService SPARQL service of the admin application + * @param system system application * @param gsc Graph Store client * @param baseURI base URI * @param query transformation query * @param imp import resource * @return function */ - protected Function getStreamRDFOutputWriter(Service service, Service adminService, GraphStoreClient gsc, String baseURI, Query query, CSVImport imp) + protected Function getStreamRDFOutputWriter(Service service, Service adminService, com.atomgraph.linkeddatahub.Application system, GraphStoreClient gsc, String baseURI, Query query, CSVImport imp) { - return new CSVGraphStoreOutputWriter(service, adminService, gsc, baseURI, query, imp.getDelimiter()); + return new CSVGraphStoreOutputWriter(service, adminService, system, gsc, baseURI, query, imp.getDelimiter()); } /** * Returns output writer for RDF imports. - * + * * @param service SPARQL service of the application * @param adminService SPARQL service of the admin application + * @param system system application * @param gsc Graph Store client * @param baseURI base URI * @param query transformation query * @param imp import resource * @return function */ - protected Function getStreamRDFOutputWriter(Service service, Service adminService, GraphStoreClient gsc, String baseURI, Query query, RDFImport imp) + protected Function getStreamRDFOutputWriter(Service service, Service adminService, com.atomgraph.linkeddatahub.Application system, GraphStoreClient gsc, String baseURI, Query query, RDFImport imp) { - return new StreamRDFOutputWriter(service, adminService, gsc, baseURI, query, imp.getGraphName() != null ? imp.getGraphName().getURI() : null); + return new StreamRDFOutputWriter(service, adminService, system, gsc, baseURI, query, imp.getGraphName() != null ? imp.getGraphName().getURI() : null); } - + /** * Returns executor service that contains a thread pool. - * + * * @return service */ protected ExecutorService getExecutorService() { return execService; } - + } diff --git a/src/main/java/com/atomgraph/linkeddatahub/imports/stream/RDFGraphStoreOutput.java b/src/main/java/com/atomgraph/linkeddatahub/imports/stream/RDFGraphStoreOutput.java index f18e3891c..31ea9e296 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/imports/stream/RDFGraphStoreOutput.java +++ b/src/main/java/com/atomgraph/linkeddatahub/imports/stream/RDFGraphStoreOutput.java @@ -33,7 +33,6 @@ import org.apache.jena.query.QueryExecution; import org.apache.jena.rdf.model.Model; import org.apache.jena.rdf.model.ModelFactory; -import org.apache.jena.rdf.model.Resource; import org.apache.jena.riot.Lang; import org.apache.jena.riot.RDFDataMgr; import org.glassfish.jersey.uri.UriComponent; @@ -43,7 +42,7 @@ /** * Reads RDF from input stream and writes it into a named graph. * If a transformation query is provided, the input is transformed before writing. - * + * * @author {@literal Martynas Jusevičius } */ public class RDFGraphStoreOutput @@ -52,18 +51,20 @@ public class RDFGraphStoreOutput private static final Logger log = LoggerFactory.getLogger(RDFGraphStoreOutput.class); private final Service service, adminService; + private final com.atomgraph.linkeddatahub.Application system; private final GraphStoreClient gsc; private final String base; private final InputStream is; private final Query query; private final Lang lang; private final String graphURI; - + /** * Constructs output writer. - * + * * @param service SPARQL service of the application * @param adminService SPARQL service of the admin application + * @param system system application * @param gsc Graph Store client for RDF results * @param is RDF input stream * @param base base URI @@ -71,10 +72,11 @@ public class RDFGraphStoreOutput * @param lang RDF language * @param graphURI named graph URI */ - public RDFGraphStoreOutput(Service service, Service adminService, GraphStoreClient gsc, InputStream is, String base, Query query, Lang lang, String graphURI) + public RDFGraphStoreOutput(Service service, Service adminService, com.atomgraph.linkeddatahub.Application system, GraphStoreClient gsc, InputStream is, String base, Query query, Lang lang, String graphURI) { this.service = service; this.adminService = adminService; + this.system = system; this.gsc = gsc; this.is = is; this.base = base; @@ -82,7 +84,7 @@ public RDFGraphStoreOutput(Service service, Service adminService, GraphStoreClie this.lang = lang; this.graphURI = graphURI; } - + /** * Reads RDF and writes (possibly transformed) RDF into a named graph. * The input is transformed if the SPARQL transformation query was provided. @@ -103,7 +105,7 @@ public void write() dataset.listNames().forEachRemaining(graphUri -> { Model namedModel = dataset.getNamedModel(graphUri); - + if (!namedModel.isEmpty()) { // If-None-Match used with the * value can be used to save a file only if it does not already exist, @@ -111,13 +113,13 @@ public void write() // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-None-Match MultivaluedMap headers = new MultivaluedHashMap(); headers.putSingle(HttpHeaders.IF_NONE_MATCH, "*"); - + try (Response putResponse = getGraphStoreClient().put(URI.create(graphUri), Entity.entity(namedModel, getGraphStoreClient().getDefaultMediaType()), new jakarta.ws.rs.core.MediaType[]{}, headers)) { if (putResponse.getStatusInfo().equals(Response.Status.PRECONDITION_FAILED)) { try (Response postResponse = getGraphStoreClient().post(URI.create(graphUri), namedModel)) - { + { if (!postResponse.getStatusInfo().getFamily().equals(Response.Status.Family.SUCCESSFUL)) { if (log.isErrorEnabled()) log.error("RDF document with URI <{}> could not be successfully created using PUT. Status code: {}", graphUri, postResponse.getStatus()); @@ -136,16 +138,16 @@ public void write() } // purge cache entries that include the graph URI - if (getService().getBackendProxy() != null) + if (getSystem().getServiceContext(getService()).getBackendProxy() != null) { - try (Response response = ban(getService().getClient(), getService().getBackendProxy(), graphUri)) + try (Response response = ban(getSystem().getServiceContext(getService()).getClient(), getSystem().getServiceContext(getService()).getBackendProxy(), graphUri)) { // Response automatically closed by try-with-resources } } - if (getAdminService() != null && getAdminService().getBackendProxy() != null) + if (getAdminService() != null && getSystem().getServiceContext(getAdminService()) != null && getSystem().getServiceContext(getAdminService()).getBackendProxy() != null) { - try (Response response = ban(getAdminService().getClient(), getAdminService().getBackendProxy(), graphUri)) + try (Response response = ban(getSystem().getServiceContext(getAdminService()).getClient(), getSystem().getServiceContext(getAdminService()).getBackendProxy(), graphUri)) { // Response automatically closed by try-with-resources } @@ -158,14 +160,14 @@ public void write() else { if (getGraphURI() == null) throw new IllegalStateException("Neither RDFImport query nor graph name is specified"); - + // If-None-Match used with the * value can be used to save a file only if it does not already exist, // guaranteeing that the upload won't accidentally overwrite another upload and lose the data of the previous PUT // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-None-Match MultivaluedMap headers = new MultivaluedHashMap(); headers.putSingle(HttpHeaders.IF_NONE_MATCH, "*"); - try (Response putResponse = getGraphStoreClient().put(URI.create(getGraphURI()), Entity.entity(model, getGraphStoreClient().getDefaultMediaType()), new jakarta.ws.rs.core.MediaType[]{}, headers)) + try (Response putResponse = getGraphStoreClient().put(URI.create(getGraphURI()), Entity.entity(model, getGraphStoreClient().getDefaultMediaType()), new jakarta.ws.rs.core.MediaType[]{}, headers)) { if (putResponse.getStatusInfo().equals(Response.Status.PRECONDITION_FAILED)) { @@ -189,16 +191,16 @@ public void write() } // purge cache entries that include the graph URI - if (getService().getBackendProxy() != null) + if (getSystem().getServiceContext(getService()).getBackendProxy() != null) { - try (Response response = ban(getService().getClient(), getService().getBackendProxy(), getGraphURI())) + try (Response response = ban(getSystem().getServiceContext(getService()).getClient(), getSystem().getServiceContext(getService()).getBackendProxy(), getGraphURI())) { // Response automatically closed by try-with-resources } } - if (getAdminService() != null && getAdminService().getBackendProxy() != null) + if (getAdminService() != null && getSystem().getServiceContext(getAdminService()) != null && getSystem().getServiceContext(getAdminService()).getBackendProxy() != null) { - try (Response response = ban(getAdminService().getClient(), getAdminService().getBackendProxy(), getGraphURI())) + try (Response response = ban(getSystem().getServiceContext(getAdminService()).getClient(), getSystem().getServiceContext(getAdminService()).getBackendProxy(), getGraphURI())) { // Response automatically closed by try-with-resources } @@ -208,102 +210,112 @@ public void write() /** * Bans a URL from proxy cache. - * + * * @param client HTTP client - * @param proxy proxy cache endpoint + * @param proxyURI proxy cache endpoint URI * @param url request URL * @return response from cache */ - public Response ban(Client client, Resource proxy, String url) + public Response ban(Client client, URI proxyURI, String url) { if (url == null) throw new IllegalArgumentException("Resource cannot be null"); - + // create new Client instance, otherwise ApacheHttpClient reuses connection and Varnish ignores BAN request return client. - target(proxy.getURI()). + target(proxyURI). request(). header("X-Escaped-Request-URI", UriComponent.encode(url, UriComponent.Type.UNRESERVED)). method("BAN", Response.class); } - + /** * Return application's SPARQL service. - * + * * @return SPARQL service */ public Service getService() { return service; } - + /** * Return admin application's SPARQL service. - * + * * @return SPARQL service */ public Service getAdminService() { return adminService; } - + + /** + * Return system application. + * + * @return system application + */ + public com.atomgraph.linkeddatahub.Application getSystem() + { + return system; + } + /** * Returns Graph Store client. - * + * * @return client object */ public GraphStoreClient getGraphStoreClient() { return gsc; } - + /** * Returns RDF input stream. - * + * * @return input stream */ public InputStream getInputStream() { return is; } - + /** * Returns base URI. - * + * * @return base URI string */ public String getBase() { return base; } - + /** * Returns the CONSTRUCT transformation query. - * + * * @return SPARQL query or null */ public Query getQuery() { return query; } - + /** * Returns RDF language. - * + * * @return RDF lang */ public Lang getLang() { return lang; } - + /** * Returns named graph URI. - * + * * @return graph URI string */ public String getGraphURI() { return graphURI; } - + } diff --git a/src/main/java/com/atomgraph/linkeddatahub/imports/stream/StreamRDFOutputWriter.java b/src/main/java/com/atomgraph/linkeddatahub/imports/stream/StreamRDFOutputWriter.java index 422e2da06..b2a0ee616 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/imports/stream/StreamRDFOutputWriter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/imports/stream/StreamRDFOutputWriter.java @@ -39,33 +39,36 @@ /** * RDF stream writer. * A function that converts client response with RDF data to a stream of (optionally transformed) RDF data. - * + * * @author Martynas Jusevičius {@literal } */ public class StreamRDFOutputWriter implements Function { - + private static final Logger log = LoggerFactory.getLogger(StreamRDFOutputWriter.class); private final Service service, adminService; + private final com.atomgraph.linkeddatahub.Application system; private final GraphStoreClient gsc; private final String baseURI, graphURI; private final Query query; /** * Constructs output writer. - * + * * @param service SPARQL service of the application * @param adminService SPARQL service of the admin application + * @param system system application * @param gsc GSP client * @param baseURI base URI * @param query transformation query or null * @param graphURI target graph URI */ - public StreamRDFOutputWriter(Service service, Service adminService, GraphStoreClient gsc, String baseURI, Query query, String graphURI) + public StreamRDFOutputWriter(Service service, Service adminService, com.atomgraph.linkeddatahub.Application system, GraphStoreClient gsc, String baseURI, Query query, String graphURI) { this.service = service; this.adminService = adminService; + this.system = system; this.gsc = gsc; this.baseURI = baseURI; this.query = query; @@ -76,7 +79,7 @@ public StreamRDFOutputWriter(Service service, Service adminService, GraphStoreCl public RDFGraphStoreOutput apply(Response rdfInput) { if (rdfInput == null) throw new IllegalArgumentException("Response cannot be null"); - + try { // buffer the RDF in a temp file before transforming it @@ -92,7 +95,7 @@ public RDFGraphStoreOutput apply(Response rdfInput) Lang lang = RDFLanguages.contentTypeToLang(mediaType.toString()); // convert media type to RDF language if (lang == null) throw new BadRequestException("Content type '" + mediaType + "' is not an RDF media type"); - RDFGraphStoreOutput output = new RDFGraphStoreOutput(getService(), getAdminService(), getGraphStoreClient(), fis, getBaseURI(), getQuery(), lang, getGraphURI()); + RDFGraphStoreOutput output = new RDFGraphStoreOutput(getService(), getAdminService(), getSystem(), getGraphStoreClient(), fis, getBaseURI(), getQuery(), lang, getGraphURI()); output.write(); return output; } @@ -110,62 +113,72 @@ public RDFGraphStoreOutput apply(Response rdfInput) /** * Return application's SPARQL service. - * + * * @return SPARQL service */ public Service getService() { return service; } - + /** * Return admin application's SPARQL service. - * + * * @return SPARQL service */ public Service getAdminService() { return adminService; } - + + /** + * Return system application. + * + * @return system application + */ + public com.atomgraph.linkeddatahub.Application getSystem() + { + return system; + } + /** * Returns the Graph Store client. - * + * * @return client object */ public GraphStoreClient getGraphStoreClient() { return gsc; } - + /** * Returns the base URI. - * + * * @return base URI string */ public String getBaseURI() { return baseURI; } - + /** * Returns the transformation query. - * + * * @return SPARQL query or null */ public Query getQuery() { return query; } - + /** * Returns the target graph URI. - * + * * @return named graph URI */ public String getGraphURI() { return graphURI; } - -} \ No newline at end of file + +} diff --git a/src/main/java/com/atomgraph/linkeddatahub/imports/stream/csv/CSVGraphStoreOutput.java b/src/main/java/com/atomgraph/linkeddatahub/imports/stream/csv/CSVGraphStoreOutput.java index c4dd531ba..75206f8b1 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/imports/stream/csv/CSVGraphStoreOutput.java +++ b/src/main/java/com/atomgraph/linkeddatahub/imports/stream/csv/CSVGraphStoreOutput.java @@ -26,7 +26,7 @@ /** * RDF output stream. * Used to write CSV data transformed to RDF. - * + * * @author Martynas Jusevičius {@literal } * @see com.atomgraph.linkeddatahub.listener.ImportListener */ @@ -40,12 +40,13 @@ public class CSVGraphStoreOutput // extends com.atomgraph.etl.csv.stream.CSVStre private final Integer maxCharsPerColumn; private final CSVGraphStoreRowProcessor processor; private final CsvParser parser; - + /** * Constructs output writer. - * + * * @param service SPARQL service of the application * @param adminService SPARQL service of the admin application + * @param system system application * @param gsc Graph Store client * @param base base URI * @param reader CSV reader @@ -53,15 +54,15 @@ public class CSVGraphStoreOutput // extends com.atomgraph.etl.csv.stream.CSVStre * @param delimiter CSV delimiter * @param maxCharsPerColumn max number of characters per column */ - public CSVGraphStoreOutput(Service service, Service adminService, GraphStoreClient gsc, String base, Reader reader, Query query, char delimiter, Integer maxCharsPerColumn) + public CSVGraphStoreOutput(Service service, Service adminService, com.atomgraph.linkeddatahub.Application system, GraphStoreClient gsc, String base, Reader reader, Query query, char delimiter, Integer maxCharsPerColumn) { this.base = base; this.reader = reader; this.query = query; this.delimiter = delimiter; this.maxCharsPerColumn = maxCharsPerColumn; - this.processor = new CSVGraphStoreRowProcessor(service, adminService, gsc, base, query); - + this.processor = new CSVGraphStoreRowProcessor(service, adminService, system, gsc, base, query); + CsvParserSettings parserSettings = new CsvParserSettings(); parserSettings.setLineSeparatorDetectionEnabled(true); parserSettings.setProcessor(processor); @@ -71,86 +72,86 @@ public CSVGraphStoreOutput(Service service, Service adminService, GraphStoreClie parser = new CsvParser(parserSettings); } - + /** * Reads CSV and writes RDF. - * + * * First a generic CSV/RDF representation is constructed for each row. Then the row is transformed using the SPARQL query. */ public void write() { getCsvParser().parse(getReader()); } - + /** * Returns the CSV parser. - * + * * @return parser */ public CsvParser getCsvParser() { return parser; } - + /** * Returns the CSV reader. - * + * * @return reader */ public Reader getReader() { return reader; } - + /** * Returns the base URI. - * + * * @return base URI */ public String getBase() { return base; } - + /** * Returns the CONSTRUCT transformation query. - * + * * @return SPARQL query */ public Query getQuery() { return query; } - + /** * Returns the CSV delimiter. - * + * * @return delimiter character */ public char getDelimiter() { return delimiter; } - + /** * Returns the maximum number of characters per CSV column. - * + * * @return maximum number of characters */ public Integer getMaxCharsPerColumn() { return maxCharsPerColumn; } - + /** * Returns the row processor. * The processor performs the transformation on each CSV row. - * + * * @return processor */ public CSVGraphStoreRowProcessor getCSVGraphStoreRowProcessor() { return processor; } - + } diff --git a/src/main/java/com/atomgraph/linkeddatahub/imports/stream/csv/CSVGraphStoreOutputWriter.java b/src/main/java/com/atomgraph/linkeddatahub/imports/stream/csv/CSVGraphStoreOutputWriter.java index 5ab88cd53..73ff70511 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/imports/stream/csv/CSVGraphStoreOutputWriter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/imports/stream/csv/CSVGraphStoreOutputWriter.java @@ -38,7 +38,7 @@ /** * RDF stream writer. * A function that converts client response with CSV data to a stream of transformed RDF data. - * + * * @author Martynas Jusevičius {@literal } * @see com.atomgraph.linkeddatahub.listener.ImportListener */ @@ -48,31 +48,34 @@ public class CSVGraphStoreOutputWriter implements Function + + rowDataset.listNames().forEachRemaining(graphUri -> { // exceptions get swallowed by the client? TO-DO: wait for completion Model namedModel = rowDataset.getNamedModel(graphUri); if (!namedModel.isEmpty()) add(namedModel, graphUri); - + try { // purge cache entries that include the graph URI - if (getService().getBackendProxy() != null) + if (getSystem().getServiceContext(getService()).getBackendProxy() != null) { - try (Response response = ban(getService().getClient(), getService().getBackendProxy(), graphUri)) + try (Response response = ban(getSystem().getServiceContext(getService()).getClient(), getSystem().getServiceContext(getService()).getBackendProxy(), graphUri)) { // Response automatically closed by try-with-resources } } - if (getAdminService() != null && getAdminService().getBackendProxy() != null) + if (getAdminService() != null && getSystem().getServiceContext(getAdminService()) != null && getSystem().getServiceContext(getAdminService()).getBackendProxy() != null) { - try (Response response = ban(getAdminService().getClient(), getAdminService().getBackendProxy(), graphUri)) + try (Response response = ban(getSystem().getServiceContext(getAdminService()).getClient(), getSystem().getServiceContext(getAdminService()).getBackendProxy(), graphUri)) { // Response automatically closed by try-with-resources } @@ -119,10 +122,10 @@ public void rowProcessed(String[] row, ParsingContext context) } ); } - + /** * Creates a graph using PUT if it doesn't exist, otherwise appends data using POST. - * + * * @param namedModel model * @param graphURI the graph URI */ @@ -139,7 +142,7 @@ protected void add(Model namedModel, String graphURI) if (putResponse.getStatusInfo().equals(Response.Status.PRECONDITION_FAILED)) { try (Response postResponse = getGraphStoreClient().post(URI.create(graphURI), namedModel)) - { + { if (!postResponse.getStatusInfo().getFamily().equals(Response.Status.Family.SUCCESSFUL)) { if (log.isErrorEnabled()) log.error("RDF document with URI <{}> could not be successfully created using PUT. Status code: {}", graphURI, postResponse.getStatus()); @@ -157,12 +160,12 @@ protected void add(Model namedModel, String graphURI) } } } - + /** * Transforms CSV row into an an RDF graph. * First a generic CSV/RDF graph is constructed. Then the transformation query is applied on it. * Extended SPARQL syntax is used to allow the CONSTRUCT GRAPH query form. - * + * * @param row CSV row * @param context parsing context * @return RDF result @@ -172,7 +175,7 @@ public Dataset transformRow(String[] row, ParsingContext context) Model rowModel = ModelFactory.createDefaultModel(); Resource subject = rowModel.createResource(); subjectCount++; - + int cellNo = 0; for (String cell : row) { @@ -191,7 +194,7 @@ public Dataset transformRow(String[] row, ParsingContext context) return qex.execConstructDataset(); } } - + @Override public void processEnded(ParsingContext context) { @@ -199,44 +202,54 @@ public void processEnded(ParsingContext context) /** * Bans a URL from proxy cache. - * + * * @param client HTTP client - * @param proxy proxy cache endpoint + * @param proxyURI proxy cache endpoint URI * @param url request URL * @return response from cache */ - public Response ban(Client client, Resource proxy, String url) + public Response ban(Client client, URI proxyURI, String url) { if (url == null) throw new IllegalArgumentException("Resource cannot be null"); - + // create new Client instance, otherwise ApacheHttpClient reuses connection and Varnish ignores BAN request return client. - target(proxy.getURI()). + target(proxyURI). request(). header("X-Escaped-Request-URI", UriComponent.encode(url, UriComponent.Type.UNRESERVED)). method("BAN", Response.class); } - + /** * Return application's SPARQL service. - * + * * @return SPARQL service */ public Service getService() { return service; } - + /** * Return admin application's SPARQL service. - * + * * @return SPARQL service */ public Service getAdminService() { return adminService; } - + + /** + * Return system application. + * + * @return system application + */ + public com.atomgraph.linkeddatahub.Application getSystem() + { + return system; + } + /** * Returns base URI. * @return base URI string @@ -245,45 +258,45 @@ public String getBase() { return base; } - + /** * Returns the transformation query. - * + * * @return SPARQL query */ public Query getQuery() { return query; } - + /** * Returns the cumulative count of RDF subject resources. - * + * * @return subject count */ public int getSubjectCount() { return subjectCount; } - + /** * Returns the cumulative count of RDF triples. - * + * * @return triple count */ public int getTripleCount() { return tripleCount; } - + /** * Returns the Graph Store client. - * + * * @return client object */ public GraphStoreClient getGraphStoreClient() { return gsc; } - + } diff --git a/src/main/java/com/atomgraph/linkeddatahub/model/Service.java b/src/main/java/com/atomgraph/linkeddatahub/model/Service.java index 81d8189e9..2cc33f655 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/model/Service.java +++ b/src/main/java/com/atomgraph/linkeddatahub/model/Service.java @@ -16,48 +16,51 @@ */ package com.atomgraph.linkeddatahub.model; -import com.atomgraph.core.MediaTypes; -import com.atomgraph.core.model.EndpointAccessor; -import jakarta.ws.rs.client.Client; import org.apache.jena.rdf.model.Resource; /** * Remote SPARQL service. - * + * Describes the data endpoints of a SPARQL service (what it is), without any infrastructure + * (clients, proxies) concerns (how to route to it). + * * @author Martynas Jusevičius {@literal } */ -public interface Service extends com.atomgraph.core.model.RemoteService, Resource +public interface Service extends Resource { - @Override - EndpointAccessor getEndpointAccessor(); + /** + * Returns the SPARQL 1.1 Protocol endpoint resource. + * + * @return RDF resource + */ + Resource getSPARQLEndpoint(); /** - * Returns backend proxy's cache URI resource. - * + * Returns the Graph Store Protocol endpoint resource. + * * @return RDF resource */ - Resource getBackendProxy(); - + Resource getGraphStore(); + /** - * Returns HTTP client. - * - * @return HTTP client + * Returns the quad store endpoint resource. + * + * @return RDF resource, or null if not configured */ - Client getClient(); + Resource getQuadStore(); /** - * Returns a registry of readable/writable media types. - * - * @return media type registry + * Returns the HTTP Basic authentication username, if configured. + * + * @return username string, or null */ - MediaTypes getMediaTypes(); + String getAuthUser(); /** - * Returns the maximum size of SPARQL GET requests. - * - * @return request size in bytes + * Returns the HTTP Basic authentication password, if configured. + * + * @return password string, or null */ - Integer getMaxGetRequestSize(); - + String getAuthPwd(); + } diff --git a/src/main/java/com/atomgraph/linkeddatahub/model/ServiceContext.java b/src/main/java/com/atomgraph/linkeddatahub/model/ServiceContext.java new file mode 100644 index 000000000..ec208db35 --- /dev/null +++ b/src/main/java/com/atomgraph/linkeddatahub/model/ServiceContext.java @@ -0,0 +1,276 @@ +/** + * Copyright 2026 Martynas Jusevičius + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package com.atomgraph.linkeddatahub.model; + +import com.atomgraph.core.MediaTypes; +import com.atomgraph.core.client.GraphStoreClient; +import com.atomgraph.core.client.QuadStoreClient; +import com.atomgraph.core.client.SPARQLClient; +import com.atomgraph.core.model.EndpointAccessor; +import com.atomgraph.core.model.impl.remote.EndpointAccessorImpl; +import jakarta.ws.rs.client.Client; +import jakarta.ws.rs.client.WebTarget; +import jakarta.ws.rs.core.UriBuilder; +import java.net.URI; +import org.glassfish.jersey.client.authentication.HttpAuthenticationFeature; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Deployment context for a SPARQL service. + * Pairs a pure-data {@link Service} with the infrastructure config needed to + * actually communicate with it: an HTTP client, media-type registry, and an optional + * backend-proxy URI that rewrites internal endpoint URIs before sending requests. + * + *

Instances are created and owned by + * {@link com.atomgraph.linkeddatahub.Application} during startup and exposed via + * {@code getServiceContext(Service)}. + * + * @author Martynas Jusevičius {@literal } + */ +public class ServiceContext +{ + + private static final Logger log = LoggerFactory.getLogger(ServiceContext.class); + + private final Service service; + private final Client client; + private final MediaTypes mediaTypes; + private final Integer maxGetRequestSize; + private final URI backendProxy; + + /** + * Constructs a service context without a backend proxy. + * + * @param service the SPARQL service description + * @param client HTTP client + * @param mediaTypes registry of readable/writable media types + * @param maxGetRequestSize the maximum size of SPARQL {@code GET} requests + */ + public ServiceContext(Service service, Client client, MediaTypes mediaTypes, Integer maxGetRequestSize) + { + this(service, client, mediaTypes, maxGetRequestSize, null); + } + + /** + * Constructs a service context with an optional backend proxy. + * + * @param service the SPARQL service description + * @param client HTTP client + * @param mediaTypes registry of readable/writable media types + * @param maxGetRequestSize the maximum size of SPARQL {@code GET} requests + * @param backendProxy backend proxy URI used to rewrite internal endpoint URIs, or {@code null} + */ + public ServiceContext(Service service, Client client, MediaTypes mediaTypes, Integer maxGetRequestSize, URI backendProxy) + { + if (service == null) throw new IllegalArgumentException("Service cannot be null"); + if (client == null) throw new IllegalArgumentException("Client cannot be null"); + if (mediaTypes == null) throw new IllegalArgumentException("MediaTypes cannot be null"); + this.service = service; + this.client = client; + this.mediaTypes = mediaTypes; + this.maxGetRequestSize = maxGetRequestSize; + this.backendProxy = backendProxy; + } + + /** + * Returns the SPARQL Protocol client for this service, with proxy routing applied. + * + * @return SPARQL client + */ + public SPARQLClient getSPARQLClient() + { + return getSPARQLClient(getClient().target(getProxiedURI(URI.create(getService().getSPARQLEndpoint().getURI())))); + } + + /** + * Creates a SPARQL Protocol client for the specified URI web target. + * + * @param webTarget URI web target + * @return SPARQL client + */ + public SPARQLClient getSPARQLClient(WebTarget webTarget) + { + SPARQLClient sparqlClient; + + if (getMaxGetRequestSize() != null) + sparqlClient = SPARQLClient.create(getMediaTypes(), webTarget, getMaxGetRequestSize()); + else + sparqlClient = SPARQLClient.create(getMediaTypes(), webTarget); + + if (getService().getAuthUser() != null && getService().getAuthPwd() != null) + { + HttpAuthenticationFeature authFeature = HttpAuthenticationFeature.basicBuilder(). + credentials(getService().getAuthUser(), getService().getAuthPwd()). + build(); + + sparqlClient.getEndpoint().register(authFeature); + } + + return sparqlClient; + } + + /** + * Returns the endpoint accessor for this service. + * + * @return endpoint accessor + */ + public EndpointAccessor getEndpointAccessor() + { + return new EndpointAccessorImpl(getSPARQLClient()); + } + + /** + * Returns the Graph Store Protocol client for this service, with proxy routing applied. + * + * @return GSP client + */ + public GraphStoreClient getGraphStoreClient() + { + return getGraphStoreClient(getProxiedURI(URI.create(getService().getGraphStore().getURI()))); + } + + /** + * Creates a Graph Store Protocol client for the specified endpoint URI. + * + * @param endpoint endpoint URI + * @return GSP client + */ + public GraphStoreClient getGraphStoreClient(URI endpoint) + { + GraphStoreClient graphStoreClient = GraphStoreClient.create(getClient(), getMediaTypes(), endpoint); + + if (getService().getAuthUser() != null && getService().getAuthPwd() != null) + { + HttpAuthenticationFeature authFeature = HttpAuthenticationFeature.basicBuilder(). + credentials(getService().getAuthUser(), getService().getAuthPwd()). + build(); + + graphStoreClient.register(authFeature); + } + + return graphStoreClient; + } + + /** + * Returns the quad store client for this service, with proxy routing applied. + * Returns {@code null} if the service has no quad store configured. + * + * @return quad store client, or {@code null} + */ + public QuadStoreClient getQuadStoreClient() + { + if (getService().getQuadStore() != null) + return getQuadStoreClient(getClient().target(getProxiedURI(URI.create(getService().getQuadStore().getURI())))); + + return null; + } + + /** + * Creates a quad store client for the specified URI web target. + * + * @param webTarget URI web target + * @return quad store client + */ + public QuadStoreClient getQuadStoreClient(WebTarget webTarget) + { + QuadStoreClient quadStoreClient = QuadStoreClient.create(webTarget); + + if (getService().getAuthUser() != null && getService().getAuthPwd() != null) + { + HttpAuthenticationFeature authFeature = HttpAuthenticationFeature.basicBuilder(). + credentials(getService().getAuthUser(), getService().getAuthPwd()). + build(); + + quadStoreClient.getEndpoint().register(authFeature); + } + + return quadStoreClient; + } + + /** + * Rewrites the given URI by replacing its scheme/host/port with those of the backend proxy. + * If no backend proxy is configured, the URI is returned unchanged. + * + * @param uri input URI + * @return proxied URI + */ + public URI getProxiedURI(final URI uri) + { + if (getBackendProxy() != null) + { + return UriBuilder.fromUri(uri). + scheme(getBackendProxy().getScheme()). + host(getBackendProxy().getHost()). + port(getBackendProxy().getPort()). + build(); + } + + return uri; + } + + /** + * Returns the SPARQL service description. + * + * @return service + */ + public Service getService() + { + return service; + } + + /** + * Returns the HTTP client. + * + * @return HTTP client + */ + public Client getClient() + { + return client; + } + + /** + * Returns the media type registry. + * + * @return media types + */ + public MediaTypes getMediaTypes() + { + return mediaTypes; + } + + /** + * Returns the maximum size of SPARQL {@code GET} requests. + * + * @return request size in bytes, or {@code null} if not configured + */ + public Integer getMaxGetRequestSize() + { + return maxGetRequestSize; + } + + /** + * Returns the backend proxy URI, used for cache invalidation BAN requests and endpoint URI rewriting. + * + * @return backend proxy URI, or {@code null} if not configured + */ + public URI getBackendProxy() + { + return backendProxy; + } + +} diff --git a/src/main/java/com/atomgraph/linkeddatahub/model/impl/ServiceImpl.java b/src/main/java/com/atomgraph/linkeddatahub/model/impl/ServiceImpl.java index e754b91e5..a9f297b0d 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/model/impl/ServiceImpl.java +++ b/src/main/java/com/atomgraph/linkeddatahub/model/impl/ServiceImpl.java @@ -16,36 +16,23 @@ */ package com.atomgraph.linkeddatahub.model.impl; -import com.atomgraph.core.MediaTypes; -import com.atomgraph.core.client.GraphStoreClient; -import com.atomgraph.core.client.QuadStoreClient; -import com.atomgraph.core.client.SPARQLClient; -import com.atomgraph.core.model.DatasetAccessor; -import com.atomgraph.core.model.DatasetQuadAccessor; -import com.atomgraph.core.model.EndpointAccessor; -import com.atomgraph.core.model.impl.remote.DatasetAccessorImpl; -import com.atomgraph.core.model.impl.remote.DatasetQuadAccessorImpl; -import com.atomgraph.core.model.impl.remote.EndpointAccessorImpl; import com.atomgraph.core.vocabulary.A; import com.atomgraph.core.vocabulary.SD; import com.atomgraph.linkeddatahub.model.Service; -import com.atomgraph.linkeddatahub.vocabulary.LAPP; -import java.net.URI; -import jakarta.ws.rs.client.Client; -import jakarta.ws.rs.client.WebTarget; -import jakarta.ws.rs.core.UriBuilder; import org.apache.jena.enhanced.EnhGraph; import org.apache.jena.graph.Node; import org.apache.jena.rdf.model.Resource; import org.apache.jena.rdf.model.Statement; import org.apache.jena.rdf.model.impl.ResourceImpl; -import org.glassfish.jersey.client.authentication.HttpAuthenticationFeature; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * SPARQL service implementation. - * + * Pure data accessor — describes what a service is (endpoints, credentials) without + * any infrastructure concerns (HTTP clients, proxy routing). + * Use {@link com.atomgraph.linkeddatahub.model.ServiceContext} to build clients. + * * @author Martynas Jusevičius {@literal } */ public class ServiceImpl extends ResourceImpl implements Service @@ -53,27 +40,17 @@ public class ServiceImpl extends ResourceImpl implements Service private static final Logger log = LoggerFactory.getLogger(ServiceImpl.class); - private final Client client; - private final MediaTypes mediaTypes; - private final Integer maxGetRequestSize; - /** - * Constructs instance from node, graph, and HTTP config. - * + * Constructs instance from node and graph. + * * @param n node * @param g graph - * @param client HTTP client - * @param mediaTypes registry of readable/writable media types - * @param maxGetRequestSize the maximum size of SPARQL GET requests */ - public ServiceImpl(Node n, EnhGraph g, Client client, MediaTypes mediaTypes, Integer maxGetRequestSize) + public ServiceImpl(Node n, EnhGraph g) { super(n, g); - this.client = client; - this.mediaTypes = mediaTypes; - this.maxGetRequestSize = maxGetRequestSize; } - + @Override public Resource getSPARQLEndpoint() { @@ -91,19 +68,13 @@ public Resource getQuadStore() { return getPropertyResourceValue(A.quadStore); } - - @Override - public Resource getBackendProxy() - { - return getPropertyResourceValue(LAPP.backendProxy); - } - + @Override public String getAuthUser() { Statement authUser = getProperty(A.authUser); if (authUser != null) return authUser.getString(); - + return null; } @@ -112,160 +83,8 @@ public String getAuthPwd() { Statement authPwd = getProperty(A.authPwd); if (authPwd != null) return authPwd.getString(); - - return null; - } - - @Override - public SPARQLClient getSPARQLClient() - { - return getSPARQLClient(getClient().target(getProxiedURI(URI.create(getSPARQLEndpoint().getURI())))); - } - - /** - * Creates SPARQL Protocol client for the specified URI web target. - * - * @param webTarget URI web target - * @return SPARQL client - */ - public SPARQLClient getSPARQLClient(WebTarget webTarget) - { - SPARQLClient sparqlClient; - - if (getMaxGetRequestSize() != null) - sparqlClient = SPARQLClient.create(getMediaTypes(), webTarget, getMaxGetRequestSize()); - else - sparqlClient = SPARQLClient.create(getMediaTypes(), webTarget); - - if (getAuthUser() != null && getAuthPwd() != null) - { - HttpAuthenticationFeature authFeature = HttpAuthenticationFeature.basicBuilder(). - credentials(getAuthUser(), getAuthPwd()). - build(); - - sparqlClient.getEndpoint().register(authFeature); - } - - return sparqlClient; - } - - @Override - public EndpointAccessor getEndpointAccessor() - { - return new EndpointAccessorImpl(getSPARQLClient()); - } - - @Override - public GraphStoreClient getGraphStoreClient() - { - return getGraphStoreClient(getProxiedURI(URI.create(getGraphStore().getURI()))); - } - - /** - * Creates Graph Store Protocol client for the specified endpoint URI. - * - * @param endpoint endpoint - * @return GSP client - */ - public GraphStoreClient getGraphStoreClient(URI endpoint) - { - GraphStoreClient graphStoreClient = GraphStoreClient.create(getClient(), getMediaTypes(), endpoint); - - if (getAuthUser() != null && getAuthPwd() != null) - { - HttpAuthenticationFeature authFeature = HttpAuthenticationFeature.basicBuilder(). - credentials(getAuthUser(), getAuthPwd()). - build(); - - graphStoreClient.register(authFeature); - } - - return graphStoreClient; - } - - @Override - @Deprecated - public DatasetAccessor getDatasetAccessor() - { - return new DatasetAccessorImpl(getGraphStoreClient()); - } - @Override - public QuadStoreClient getQuadStoreClient() - { - if (getQuadStore() != null) return getQuadStoreClient(getClient().target(getProxiedURI(URI.create(getQuadStore().getURI())))); - return null; } - - /** - * Creates Graph Store Protocol client for a given URI target. - * - * @param webTarget URI web target - * @return GSP client - */ - public QuadStoreClient getQuadStoreClient(WebTarget webTarget) - { - QuadStoreClient quadStoreClient = QuadStoreClient.create(webTarget); - - if (getAuthUser() != null && getAuthPwd() != null) - { - HttpAuthenticationFeature authFeature = HttpAuthenticationFeature.basicBuilder(). - credentials(getAuthUser(), getAuthPwd()). - build(); - - quadStoreClient.getEndpoint().register(authFeature); - } - - return quadStoreClient; - } - - @Override - @Deprecated - public DatasetQuadAccessor getDatasetQuadAccessor() - { - return new DatasetQuadAccessorImpl(getQuadStoreClient()); - } - - /** - * Rewrites the given URI using the backendProxy URI. - * - * @param uri input URI - * @return proxied URI - */ - protected URI getProxiedURI(final URI uri) - { - // if service proxyURI is set, change the URI host/port to proxyURI host/port - if (getBackendProxy() != null) - { - final URI proxyURI = URI.create(getBackendProxy().getURI()); - - return UriBuilder.fromUri(uri). - scheme(proxyURI.getScheme()). - host(proxyURI.getHost()). - port(proxyURI.getPort()). - build(); - } - - return uri; - } - - @Override - public Client getClient() - { - return client; - } - - @Override - public MediaTypes getMediaTypes() - { - return mediaTypes; - } - - @Override - public Integer getMaxGetRequestSize() - { - return maxGetRequestSize; - } } diff --git a/src/main/java/com/atomgraph/linkeddatahub/model/impl/ServiceImplementation.java b/src/main/java/com/atomgraph/linkeddatahub/model/impl/ServiceImplementation.java index cfc5d5f0b..93a6ea7ca 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/model/impl/ServiceImplementation.java +++ b/src/main/java/com/atomgraph/linkeddatahub/model/impl/ServiceImplementation.java @@ -16,9 +16,7 @@ */ package com.atomgraph.linkeddatahub.model.impl; -import com.atomgraph.core.MediaTypes; import com.atomgraph.core.vocabulary.SD; -import jakarta.ws.rs.client.Client; import org.apache.jena.enhanced.EnhGraph; import org.apache.jena.enhanced.EnhNode; import org.apache.jena.enhanced.Implementation; @@ -27,41 +25,31 @@ import org.apache.jena.vocabulary.RDF; /** - * Jena's implementation factory. - * + * Jena's implementation factory for {@link com.atomgraph.linkeddatahub.model.Service}. + * Wraps RDF nodes typed as {@code sd:Service} into {@link ServiceImpl} instances. + * * @author Martynas Jusevičius {@literal } */ public class ServiceImplementation extends Implementation { - - private final Client client; - private final MediaTypes mediaTypes; - private final Integer maxGetRequestSize; /** - * Constructs factory from HTTP configuration. - * - * @param client HTTP client - * @param mediaTypes registry of readable/writable media types - * @param maxGetRequestSize the maximum size of SPARQL GET requests + * Constructs factory. */ - public ServiceImplementation(Client client, MediaTypes mediaTypes, Integer maxGetRequestSize) + public ServiceImplementation() { - this.client = client; - this.mediaTypes = mediaTypes; - this.maxGetRequestSize = maxGetRequestSize; } - + @Override public EnhNode wrap(Node node, EnhGraph enhGraph) { if (canWrap(node, enhGraph)) { - return new ServiceImpl(node, enhGraph, getClient(), getMediaTypes(), getMaxGetRequestSize()); + return new ServiceImpl(node, enhGraph); } else { - throw new ConversionException( "Cannot convert node " + node.toString() + " to Service: it does not have rdf:type sd:Service or equivalent"); + throw new ConversionException("Cannot convert node " + node.toString() + " to Service: it does not have rdf:type sd:Service or equivalent"); } } @@ -72,35 +60,5 @@ public boolean canWrap(Node node, EnhGraph eg) return eg.asGraph().contains(node, RDF.type.asNode(), SD.Service.asNode()); } - - /** - * Returns HTTP client. - * - * @return HTTP client - */ - public Client getClient() - { - return client; - } - - /** - * Returns a registry of readable/writable media types. - * - * @return media type registry - */ - public MediaTypes getMediaTypes() - { - return mediaTypes; - } - - /** - * Returns the maximum size of SPARQL GET requests. - * - * @return request size in bytes - */ - public Integer getMaxGetRequestSize() - { - return maxGetRequestSize; - } - + } diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/Generate.java b/src/main/java/com/atomgraph/linkeddatahub/resource/Generate.java index 716289439..b732a146e 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/Generate.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/Generate.java @@ -18,8 +18,6 @@ import com.atomgraph.core.MediaTypes; import com.atomgraph.linkeddatahub.apps.model.Application; -import com.atomgraph.linkeddatahub.client.GraphStoreClient; -import com.atomgraph.linkeddatahub.imports.QueryLoader; import com.atomgraph.linkeddatahub.server.model.impl.DirectGraphStoreImpl; import com.atomgraph.linkeddatahub.server.security.AgentContext; import com.atomgraph.linkeddatahub.server.util.Skolemizer; @@ -44,8 +42,10 @@ import jakarta.ws.rs.core.Response.Status; import jakarta.ws.rs.core.UriBuilder; import jakarta.ws.rs.core.UriInfo; +import org.apache.jena.ontology.Ontology; import org.apache.jena.query.ParameterizedSparqlString; import org.apache.jena.query.Query; +import org.apache.jena.query.QueryFactory; import org.apache.jena.query.Syntax; import org.apache.jena.rdf.model.Model; import org.apache.jena.rdf.model.ModelFactory; @@ -69,10 +69,11 @@ public class Generate private final UriInfo uriInfo; private final MediaTypes mediaTypes; private final Application application; + private final Ontology ontology; private final Optional agentContext; private final com.atomgraph.linkeddatahub.Application system; private final ResourceContext resourceContext; - + /** * Constructs endpoint for container generation. * @@ -80,18 +81,21 @@ public class Generate * @param uriInfo current URI info * @param mediaTypes supported media types * @param application matched application + * @param ontology ontology of the current application * @param system system application * @param agentContext authenticated agent's context * @param resourceContext resource context for creating resources */ @Inject public Generate(@Context Request request, @Context UriInfo uriInfo, MediaTypes mediaTypes, - com.atomgraph.linkeddatahub.apps.model.Application application, Optional agentContext, + com.atomgraph.linkeddatahub.apps.model.Application application, Optional ontology, Optional agentContext, com.atomgraph.linkeddatahub.Application system, @Context ResourceContext resourceContext) { + if (ontology.isEmpty()) throw new InternalServerErrorException("Ontology is not specified"); this.uriInfo = uriInfo; this.mediaTypes = mediaTypes; this.application = application; + this.ontology = ontology.get(); this.agentContext = agentContext; this.system = system; this.resourceContext = resourceContext; @@ -129,10 +133,13 @@ public Response post(Model model) Resource queryRes = part.getPropertyResourceValue(SPIN.query); if (queryRes == null) throw new BadRequestException("Container query string (spin:query) not provided"); - GraphStoreClient gsc = GraphStoreClient.create(getSystem().getClient(), getSystem().getMediaTypes()). - delegation(getUriInfo().getBaseUri(), getAgentContext().orElse(null)); - QueryLoader queryLoader = new QueryLoader(URI.create(queryRes.getURI()), getApplication().getBase().getURI(), Syntax.syntaxARQ, gsc); - Query query = queryLoader.get(); + // Lookup query in ontology + Resource queryResource = getOntology().getOntModel().getResource(queryRes.getURI()); + if (queryResource == null || !queryResource.hasProperty(SP.text)) + throw new BadRequestException("Query resource not found in ontology: " + queryRes.getURI()); + + String queryString = queryResource.getProperty(SP.text).getString(); + Query query = QueryFactory.create(queryString, Syntax.syntaxARQ); if (!query.isSelectType()) throw new BadRequestException("Container query is not of SELECT type"); ParameterizedSparqlString pss = new ParameterizedSparqlString(query.toString()); @@ -167,7 +174,7 @@ public Response post(Model model) } // ban the parent container URI from proxy cache to make sure the next query using it will be fresh (e.g. SELECT that loads children) - getSystem().ban(getApplication().getService().getBackendProxy(), parent.getURI(), true); + getSystem().ban(getSystem().getServiceContext(getApplication().getService()).getBackendProxy(), parent.getURI(), true); return Response.ok().build(); } @@ -253,6 +260,16 @@ public Application getApplication() return application; } + /** + * Returns the ontology. + * + * @return the ontology + */ + public Ontology getOntology() + { + return ontology; + } + /** * Returns the current URI info. * diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java index 7f86014e9..8c234be2a 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/Namespace.java @@ -142,7 +142,7 @@ public Response get(@QueryParam(QUERY) Query query, String ontologyURI = getApplication().getOntology().getURI(); if (log.isDebugEnabled()) log.debug("Returning namespace ontology from OntDocumentManager: {}", ontologyURI); // not returning the injected in-memory ontology because it has inferences applied to it - OntologyModelGetter modelGetter = new OntologyModelGetter(getApplication().as(EndUserApplication.class), getSystem().getOntModelSpec(), getSystem().getOntologyQuery()); + OntologyModelGetter modelGetter = new OntologyModelGetter(getApplication().as(EndUserApplication.class), getSystem(), getSystem().getOntModelSpec(), getSystem().getOntologyQuery()); return getResponseBuilder(modelGetter.getModel(ontologyURI)).build(); } else throw new BadRequestException("SPARQL query string not provided"); diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java index f22b7378e..f5c2b42ae 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/Access.java @@ -70,6 +70,7 @@ public class Access private final EndUserApplication application; private final Optional agentContext; private final ParameterizedSparqlString documentTypeQuery, documentOwnerQuery, aclQuery, ownerAclQuery; + private final com.atomgraph.linkeddatahub.Application system; /** * Constructs endpoint from the in-memory ontology model. @@ -92,6 +93,7 @@ public Access(@Context Request request, @Context UriInfo uriInfo, MediaTypes med this.mediaTypes = mediaTypes; this.application = application.as(EndUserApplication.class); this.agentContext = agentContext; + this.system = system; documentTypeQuery = new ParameterizedSparqlString(system.getDocumentTypeQuery().toString()); documentOwnerQuery = new ParameterizedSparqlString(system.getDocumentOwnerQuery().toString()); aclQuery = new ParameterizedSparqlString(system.getACLQuery().toString()); @@ -120,13 +122,13 @@ public Response get() ParameterizedSparqlString typePss = getDocumentTypeQuery(); typePss.setParams(thisQsm); - ResultSetRewindable docTypesResult = getEndUserService().getEndpointAccessor().select(typePss.asQuery(), List.of(), List.of()); + ResultSetRewindable docTypesResult = getSystem().getServiceContext(getEndUserService()).getEndpointAccessor().select(typePss.asQuery(), List.of(), List.of()); try { final ParameterizedSparqlString authPss = getACLQuery(); authPss.setParams(new AuthorizationParams(getApplication().getAdminApplication().getBase(), accessTo, agent).get()); - Model authModel = getApplication().getAdminApplication().getService().getSPARQLClient().loadModel(authPss.asQuery()); + Model authModel = getSystem().getServiceContext(getApplication().getAdminApplication().getService()).getSPARQLClient().loadModel(authPss.asQuery()); // filter out authorizations with acl:accessToClass foaf:Agent - all agents already have that access ResIterator agentClassIter = authModel.listSubjectsWithProperty(ACL.agentClass, FOAF.Agent); @@ -176,7 +178,7 @@ protected boolean isOwner(Resource accessTo, Resource agent) ParameterizedSparqlString pss = getDocumentOwnerQuery(); pss.setParams(qsm); - ResultSetRewindable ownerResult = getEndUserService().getEndpointAccessor().select(pss.asQuery(), List.of(), List.of()); + ResultSetRewindable ownerResult = getSystem().getServiceContext(getEndUserService()).getEndpointAccessor().select(pss.asQuery(), List.of(), List.of()); try { return ownerResult.hasNext() && agent.equals(ownerResult.next().getResource("owner")); @@ -252,7 +254,7 @@ public Request getRequest() /** * Returns the current application. - * + * * @return application resource */ public EndUserApplication getApplication() @@ -260,6 +262,16 @@ public EndUserApplication getApplication() return application; } + /** + * Returns the system application. + * + * @return system application + */ + public com.atomgraph.linkeddatahub.Application getSystem() + { + return system; + } + /** * Returns URI info for the current request. * diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/AccessRequest.java b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/AccessRequest.java index 859efa01f..93687800a 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/acl/AccessRequest.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/acl/AccessRequest.java @@ -67,6 +67,7 @@ public class AccessRequest private final String emailSubject; private final String emailText; private final UriBuilder authRequestContainerUriBuilder; + private final com.atomgraph.linkeddatahub.Application system; /** * Constructs an AccessRequest resource handler. @@ -84,7 +85,8 @@ public AccessRequest(com.atomgraph.linkeddatahub.apps.model.Application applicat if (!application.canAs(EndUserApplication.class)) throw new IllegalStateException("The " + getClass() + " endpoint is only available on end-user applications"); this.application = application.as(EndUserApplication.class); this.agentContext = agentContext; - + this.system = system; + authRequestContainerUriBuilder = this.application.getAdminApplication().getUriBuilder().path(AUTHORIZATION_REQUEST_PATH); emailSubject = servletConfig.getServletContext().getInitParameter(LDHC.requestAccessEMailSubject.getURI()); @@ -174,7 +176,7 @@ public Response post(Model model) new Skolemizer(graphUri.toString()).apply(requestModel); // store access request in the admin service - getApplication().getAdminApplication().getService().getGraphStoreClient().add(graphUri.toString(), requestModel); + getSystem().getServiceContext(getApplication().getAdminApplication().getService()).getGraphStoreClient().add(graphUri.toString(), requestModel); } return Response.ok().build(); @@ -223,12 +225,22 @@ public UriBuilder getAuthRequestContainerUriBuilder() /** * Returns the agent context of the current request. - * + * * @return optional agent context */ public Optional getAgentContext() { return agentContext; } - + + /** + * Returns the system application. + * + * @return system application + */ + public com.atomgraph.linkeddatahub.Application getSystem() + { + return system; + } + } diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/ClearOntology.java b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/ClearOntology.java index b064bc6bf..ba426111e 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/ClearOntology.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/ClearOntology.java @@ -88,33 +88,36 @@ public Response post(@FormParam("uri") String ontologyURI, @HeaderParam("Referer if (log.isDebugEnabled()) log.debug("Clearing ontology with URI '{}' from memory", ontologyURI); ontModelSpec.getDocumentManager().getFileManager().removeCacheModel(ontologyURI); - URI ontologyDocURI = UriBuilder.fromUri(ontologyURI).fragment(null).build(); // skip fragment from the ontology URI to get its graph URI + URI ontologyDocURI = UriBuilder.fromUri(ontologyURI).fragment(null).build(); // skip fragment from the ontology URI to get its graph URI // purge from admin cache - if (getApplication().getFrontendProxy() != null) + URI frontendProxy = getSystem().getFrontendProxy(); + if (frontendProxy != null) { if (log.isDebugEnabled()) log.debug("Purge ontology document with URI '{}' from frontend proxy cache", ontologyDocURI); - ban(getApplication().getFrontendProxy(), ontologyDocURI.toString(), false); + ban(frontendProxy, ontologyDocURI.toString(), false); } - if (getApplication().getService().getBackendProxy() != null) + URI adminBackendProxy = getSystem().getServiceContext(getApplication().getService()).getBackendProxy(); + if (adminBackendProxy != null) { if (log.isDebugEnabled()) log.debug("Ban ontology with URI '{}' from backend proxy cache", ontologyURI); - ban(getApplication().getService().getBackendProxy(), ontologyURI); + ban(adminBackendProxy, ontologyURI); } // purge from end-user cache - if (endUserApp.getFrontendProxy() != null) + if (frontendProxy != null) { if (log.isDebugEnabled()) log.debug("Purge ontology document with URI '{}' from frontend proxy cache", ontologyDocURI); - ban(endUserApp.getFrontendProxy(), ontologyDocURI.toString(), false); + ban(frontendProxy, ontologyDocURI.toString(), false); } - if (endUserApp.getService().getBackendProxy() != null) + URI endUserBackendProxy = getSystem().getServiceContext(endUserApp.getService()).getBackendProxy(); + if (endUserBackendProxy != null) { if (log.isDebugEnabled()) log.debug("Ban ontology with URI '{}' from backend proxy cache", ontologyURI); - ban(endUserApp.getService().getBackendProxy(), ontologyURI); + ban(endUserBackendProxy, ontologyURI); } // !!! we need to reload the ontology model before returning a response, to make sure the next request already gets the new version !!! // same logic as in OntologyFilter. TO-DO: encapsulate? - OntologyModelGetter modelGetter = new OntologyModelGetter(endUserApp, ontModelSpec, getSystem().getOntologyQuery()); + OntologyModelGetter modelGetter = new OntologyModelGetter(endUserApp, getSystem(), ontModelSpec, getSystem().getOntologyQuery()); ontModelSpec.setImportModelGetter(modelGetter); if (log.isDebugEnabled()) log.debug("Started loading ontology with URI '{}' from the admin dataset", ontologyURI); Model baseModel = modelGetter.getModel(ontologyURI); @@ -132,21 +135,21 @@ public Response post(@FormParam("uri") String ontologyURI, @HeaderParam("Referer else return Response.ok().build(); } - public void ban(Resource proxy, String url) + public void ban(URI proxyURI, String url) { - ban(proxy, url, true); + ban(proxyURI, url, true); } - /** + /** * Bans URL from the backend proxy cache. - * - * @param proxy proxy server URL + * + * @param proxyURI proxy server URI * @param url banned URL * @param urlEncode if true, the banned URL value will be URL-encoded */ - public void ban(Resource proxy, String url, boolean urlEncode) + public void ban(URI proxyURI, String url, boolean urlEncode) { - if (url == null) throw new IllegalArgumentException("Resource cannot be null"); + if (url == null) throw new IllegalArgumentException("URL cannot be null"); // Extract path from URL - Varnish req.url only contains the path, not the full URL URI uri = URI.create(url); @@ -155,7 +158,7 @@ public void ban(Resource proxy, String url, boolean urlEncode) final String urlValue = urlEncode ? UriComponent.encode(path, UriComponent.Type.UNRESERVED) : path; - try (Response cr = getSystem().getClient().target(proxy.getURI()). + try (Response cr = getSystem().getClient().target(proxyURI). request(). header(CacheInvalidationFilter.HEADER_NAME, urlValue). method("BAN", Response.class)) diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/SignUp.java b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/SignUp.java index bb161a509..dd75de58c 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/SignUp.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/SignUp.java @@ -194,7 +194,7 @@ public Response post(Model agentModel) ParameterizedSparqlString pss = new ParameterizedSparqlString(getAgentQuery().toString()); pss.setParam(FOAF.mbox.getLocalName(), mbox); - ResultSet rs = getAgentService().getSPARQLClient().select(pss.asQuery()); + ResultSet rs = getSystem().getServiceContext(getAgentService()).getSPARQLClient().select(pss.asQuery()); boolean agentExists = rs.hasNext(); rs.close(); if (agentExists) throw createSPINConstraintViolationException(agent, FOAF.mbox, "Agent with this mailbox already exists"); @@ -278,9 +278,10 @@ public Response post(Model agentModel) } // purge agent lookup from proxy cache - if (getAgentService().getBackendProxy() != null) + URI agentServiceBackendProxy = getSystem().getServiceContext(getAgentService()).getBackendProxy(); + if (agentServiceBackendProxy != null) { - try (Response response = ban(getAgentService().getBackendProxy(), mbox.getURI())) + try (Response response = ban(agentServiceBackendProxy, mbox.getURI())) { // Response automatically closed by try-with-resources } @@ -563,15 +564,15 @@ public Query getAgentQuery() /** * Bans URL from the backend proxy cache. * - * @param proxy proxy server URL + * @param proxyURI proxy server URI * @param url banned URL * @return proxy server response */ - public Response ban(Resource proxy, String url) + public Response ban(URI proxyURI, String url) { - if (url == null) throw new IllegalArgumentException("Resource cannot be null"); + if (url == null) throw new IllegalArgumentException("URL cannot be null"); - return getSystem().getClient().target(proxy.getURI()).request(). + return getSystem().getClient().target(proxyURI).request(). header(CacheInvalidationFilter.HEADER_NAME, UriComponent.encode(url, UriComponent.Type.UNRESERVED)). // the value has to be URL-encoded in order to match request URLs in Varnish method("BAN", Response.class); } diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/pkg/InstallPackage.java b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/pkg/InstallPackage.java index 1dd1fe494..89b729399 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/pkg/InstallPackage.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/pkg/InstallPackage.java @@ -184,19 +184,19 @@ public Response post(@FormParam("package-uri") String packageURI, @HeaderParam(" // Purge package stylesheet from frontend proxy cache String stylesheetURL = "/static/" + packagePath + "/layout.xsl"; - if (endUserApp.getFrontendProxy() != null) + if (getSystem().getFrontendProxy() != null) { if (log.isDebugEnabled()) log.debug("Purging package stylesheet from frontend proxy cache: {}", stylesheetURL); - getSystem().ban(endUserApp.getFrontendProxy(), stylesheetURL, false); + getSystem().ban(getSystem().getFrontendProxy(), stylesheetURL, false); } regenerateMasterStylesheet(endUserApp, pkg); // Purge master stylesheet from frontend proxy cache - if (endUserApp.getFrontendProxy() != null) + if (getSystem().getFrontendProxy() != null) { if (log.isDebugEnabled()) log.debug("Purging master stylesheet from frontend proxy cache: {}", com.atomgraph.linkeddatahub.Application.MASTER_STYLESHEET_PATH); - getSystem().ban(endUserApp.getFrontendProxy(), com.atomgraph.linkeddatahub.Application.MASTER_STYLESHEET_PATH, false); + getSystem().ban(getSystem().getFrontendProxy(), com.atomgraph.linkeddatahub.Application.MASTER_STYLESHEET_PATH, false); } } diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/pkg/UninstallPackage.java b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/pkg/UninstallPackage.java index 7f72abe54..dc18fc22f 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/admin/pkg/UninstallPackage.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/admin/pkg/UninstallPackage.java @@ -252,10 +252,10 @@ private void uninstallStylesheet(Path stylesheetFile, String packagePath, EndUse // Purge stylesheet from frontend proxy cache String stylesheetURL = "/static/" + packagePath + "/layout.xsl"; - if (endUserApp.getFrontendProxy() != null) + if (getSystem().getFrontendProxy() != null) { if (log.isDebugEnabled()) log.debug("Purging stylesheet from frontend proxy cache: {}", stylesheetURL); - getSystem().ban(endUserApp.getFrontendProxy(), stylesheetURL, false); + getSystem().ban(getSystem().getFrontendProxy(), stylesheetURL, false); } // Delete directory if empty @@ -294,10 +294,10 @@ private void regenerateMasterStylesheet(EndUserApplication app, com.atomgraph.li updater.regenerateMasterStylesheet(packagePaths); // Purge master stylesheet from cache - if (app.getFrontendProxy() != null) + if (getSystem().getFrontendProxy() != null) { if (log.isDebugEnabled()) log.debug("Purging master stylesheet from frontend proxy cache: {}", com.atomgraph.linkeddatahub.Application.MASTER_STYLESHEET_PATH); - getSystem().ban(app.getFrontendProxy(), com.atomgraph.linkeddatahub.Application.MASTER_STYLESHEET_PATH, false); + getSystem().ban(getSystem().getFrontendProxy(), com.atomgraph.linkeddatahub.Application.MASTER_STYLESHEET_PATH, false); } } diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/oauth2/LoginBase.java b/src/main/java/com/atomgraph/linkeddatahub/resource/oauth2/LoginBase.java index c04c04e08..b9942baea 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/oauth2/LoginBase.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/oauth2/LoginBase.java @@ -218,6 +218,7 @@ public Response get(@QueryParam("code") String code, @QueryParam("state") String Resource agent; Optional existingAgent = mbox.flatMap(this::findAgentByEmail); + URI agentSvcProxy = getSystem().getServiceContext(getAgentService()).getBackendProxy(); if (existingAgent.isEmpty()) { @@ -241,11 +242,11 @@ public Response get(@QueryParam("code") String code, @QueryParam("state") String // lookup Agent resource after its URI has been skolemized agent = agentModel.createResource(agentGraphUri.toString()).getPropertyResourceValue(FOAF.primaryTopic); - getAgentService().getGraphStoreClient().putModel(agentGraphUri.toString(), agentModel); + getSystem().getServiceContext(getAgentService()).getGraphStoreClient().putModel(agentGraphUri.toString(), agentModel); // purge agent lookup from proxy cache (if email is present) - if (mbox.isPresent() && getAgentService().getBackendProxy() != null) - ban(getAgentService().getBackendProxy(), mbox.get().getURI()); + if (mbox.isPresent() && agentSvcProxy != null) + ban(agentSvcProxy, mbox.get().getURI()); Model authModel = ModelFactory.createDefaultModel(); URI authGraphUri = getAdminApplication().getUriBuilder().path(AUTHORIZATION_PATH).path("{slug}/").build(UUID.randomUUID().toString()); @@ -258,12 +259,13 @@ public Response get(@QueryParam("code") String code, @QueryParam("state") String userAccountGraphUri); new Skolemizer(authGraphUri.toString()).apply(authModel); - getAgentService().getGraphStoreClient().putModel(authGraphUri.toString(), authModel); + getSystem().getServiceContext(getAgentService()).getGraphStoreClient().putModel(authGraphUri.toString(), authModel); try { // purge agent lookup from proxy cache - if (getApplication().getService().getBackendProxy() != null) ban(getAdminApplication().getService().getBackendProxy(), jwt.getSubject()); + URI adminSvcProxy = getSystem().getServiceContext(getAdminApplication().getService()).getBackendProxy(); + if (adminSvcProxy != null) ban(adminSvcProxy, jwt.getSubject()); // remove secretary WebID from cache getSystem().getEventBus().post(new com.atomgraph.linkeddatahub.server.event.SignUp(getSystem().getSecretaryWebIDURI())); @@ -286,14 +288,14 @@ public Response get(@QueryParam("code") String code, @QueryParam("state") String agent.addProperty(FOAF.account, userAccount); agentModel.add(agentModel.createResource(getSystem().getSecretaryWebIDURI().toString()), ACL.delegates, agent); // make secretary delegate whis agent - getAgentService().getGraphStoreClient().add(agentGraph.getURI(), agentModel); + getSystem().getServiceContext(getAgentService()).getGraphStoreClient().add(agentGraph.getURI(), agentModel); } - + userAccount.addProperty(SIOC.ACCOUNT_OF, agent); - getAgentService().getGraphStoreClient().putModel(userAccountGraphUri.toString(), accountModel); + getSystem().getServiceContext(getAgentService()).getGraphStoreClient().putModel(userAccountGraphUri.toString(), accountModel); // purge user account lookup from proxy cache - if (getAgentService().getBackendProxy() != null) ban(getAgentService().getBackendProxy(), jwt.getSubject()); + if (agentSvcProxy != null) ban(agentSvcProxy, jwt.getSubject()); } URI originalReferer = URI.create(new String(Base64.getDecoder().decode(stateCookie.getValue())).split(Pattern.quote(";"))[1]); // fails if referer param was not specified @@ -317,7 +319,7 @@ protected boolean userAccountExists(String subjectId, String issuer) pss.setLiteral(SIOC.ID.getLocalName(), subjectId); pss.setLiteral(LACL.issuer.getLocalName(), issuer); - return !getAgentService().getSPARQLClient().loadModel(pss.asQuery()).isEmpty(); + return !getSystem().getServiceContext(getAgentService()).getSPARQLClient().loadModel(pss.asQuery()).isEmpty(); } /** @@ -334,7 +336,7 @@ protected Optional findAgentByEmail(Resource mbox) ParameterizedSparqlString pss = new ParameterizedSparqlString(getAgentQuery().toString()); pss.setParam(FOAF.mbox.getLocalName(), mbox); - ResultSet rs = getAgentService().getSPARQLClient().select(pss.asQuery()); + ResultSet rs = getSystem().getServiceContext(getAgentService()).getSPARQLClient().select(pss.asQuery()); try { if (!rs.hasNext()) return Optional.empty(); @@ -507,11 +509,11 @@ public void sendEmail(Resource agent) throws MessagingException, UnsupportedEnco * @param url banned URL * @return proxy server response */ - public Response ban(Resource proxy, String url) + public Response ban(URI proxyURI, String url) { - if (url == null) throw new IllegalArgumentException("Resource cannot be null"); - - return getSystem().getClient().target(proxy.getURI()).request(). + if (url == null) throw new IllegalArgumentException("URL cannot be null"); + + return getSystem().getClient().target(proxyURI).request(). header(CacheInvalidationFilter.HEADER_NAME, UriComponent.encode(url, UriComponent.Type.UNRESERVED)). // the value has to be URL-encoded in order to match request URLs in Varnish method("BAN", Response.class); } diff --git a/src/main/java/com/atomgraph/linkeddatahub/resource/upload/Item.java b/src/main/java/com/atomgraph/linkeddatahub/resource/upload/Item.java index 90fae8a00..6cc6ac28f 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/resource/upload/Item.java +++ b/src/main/java/com/atomgraph/linkeddatahub/resource/upload/Item.java @@ -305,7 +305,7 @@ public List getWritableMediaTypes(Class clazz) public Model describe() { // TO-DO: can we avoid hardcoding the query string here? - return getService().getSPARQLClient().loadModel(QueryFactory.create("DESCRIBE <" + getURI() + ">")); + return getSystem().getServiceContext(getService()).getSPARQLClient().loadModel(QueryFactory.create("DESCRIBE <" + getURI() + ">")); } /** diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthenticationFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthenticationFilter.java index a556d2a07..e12c95587 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthenticationFilter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthenticationFilter.java @@ -137,7 +137,8 @@ protected Model loadModel(ParameterizedSparqlString pss, QuerySolutionMap qsm, c if (service == null) throw new IllegalArgumentException("Service cannot be null"); // send query bindings separately from the query if the service supports the Sesame protocol - if (service.getSPARQLClient() instanceof SesameProtocolClient sesameProtocolClient) + com.atomgraph.linkeddatahub.model.ServiceContext serviceContext = getSystem().getServiceContext(service); + if (serviceContext.getSPARQLClient() instanceof SesameProtocolClient sesameProtocolClient) try (Response cr = sesameProtocolClient.query(pss.asQuery(), Model.class, qsm)) // register(new CacheControlFilter(CacheControl.valueOf("no-cache"))). // add Cache-Control: no-cache to request { return cr.readEntity(Model.class); @@ -145,7 +146,7 @@ protected Model loadModel(ParameterizedSparqlString pss, QuerySolutionMap qsm, c else { pss.setParams(qsm); - try (Response cr = service.getSPARQLClient(). // register(new CacheControlFilter(CacheControl.valueOf("no-cache"))). // add Cache-Control: no-cache to request + try (Response cr = serviceContext.getSPARQLClient(). // register(new CacheControlFilter(CacheControl.valueOf("no-cache"))). // add Cache-Control: no-cache to request query(pss.asQuery(), Model.class)) { return cr.readEntity(Model.class); diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthorizationFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthorizationFilter.java index 2ddbda545..5051e25d7 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthorizationFilter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/AuthorizationFilter.java @@ -43,7 +43,7 @@ import jakarta.ws.rs.container.ContainerRequestFilter; import jakarta.ws.rs.container.PreMatching; import jakarta.ws.rs.core.Response; -import java.net.URI; +import org.apache.jena.irix.IRIx; import java.util.HashSet; import java.util.Set; import org.apache.jena.query.ParameterizedSparqlString; @@ -157,8 +157,6 @@ public void filter(ContainerRequestContext request) throws IOException public Model authorize(ContainerRequestContext request, Resource agent, Resource accessMode) { Resource accessTo = ResourceFactory.createResource(request.getUriInfo().getAbsolutePath().toString()); - QuerySolutionMap thisQsm = new QuerySolutionMap(); - thisQsm.add(SPIN.THIS_VAR_NAME, accessTo); Model authorizations = ModelFactory.createDefaultModel(); // the agent is the owner of the requested document - automatically grant acl:Read/acl:Append/acl:Write access. @@ -169,46 +167,53 @@ public Model authorize(ContainerRequestContext request, Resource agent, Resource createOwnerAuthorization(authorizations, accessTo, agent); } - ResultSetRewindable docTypesResult = loadResultSet(getApplication().get().getService(), getDocumentTypeQuery(), thisQsm); + QuerySolutionMap thisQsm = new QuerySolutionMap(); + thisQsm.add(SPIN.THIS_VAR_NAME, accessTo); + ResultSetRewindable docTypesResult = loadResultSet(getApplication().get().getService(), getDocumentTypeQuery(), thisQsm); try { - if (!docTypesResult.hasNext()) // if the document resource has no types, we assume the document does not exist + // special case for PUT requests: if the document does not exist, check acl:Write access on the *parent* URI instead + if (!docTypesResult.hasNext() && request.getMethod().equals(HttpMethod.PUT) && accessMode.equals(ACL.Write)) { - // special case for PUT requests to non-existing document: allow if the agent has acl:Write acess to the *parent* URI - if (request.getMethod().equals(HttpMethod.PUT) && accessMode.equals(ACL.Write)) - { - URI parentURI = URI.create(accessTo.getURI()).resolve(".."); - log.debug("Requested document <{}> not found, falling back to parent URI <{}>", accessTo, parentURI); - accessTo = ResourceFactory.createResource(parentURI.toString()); - - thisQsm = new QuerySolutionMap(); - thisQsm.add(SPIN.THIS_VAR_NAME, accessTo); - - docTypesResult.close(); - docTypesResult = loadResultSet(getApplication().get().getService(), getDocumentTypeQuery(), thisQsm); + // Use Jena's IRIx for RFC 3986-compliant resolution - java.net.URI.resolve("..") is non-compliant + // (RFC 3986 section 5.2.4 step 2D requires ".." to be removed, but java.net.URI leaves it literal) + IRIx parentURI = IRIx.create(accessTo.getURI()).resolve(".."); + Resource parent = ResourceFactory.createResource(parentURI.toString()); + log.debug("Requested document <{}> not found, falling back to parent URI <{}>", parent, parentURI); + QuerySolutionMap parentQsm = new QuerySolutionMap(); + parentQsm.add(SPIN.THIS_VAR_NAME, parent); + ResultSetRewindable parentTypesResult = loadResultSet(getApplication().get().getService(), getDocumentTypeQuery(), parentQsm); + try + { Set parentTypes = new HashSet<>(); - docTypesResult.forEachRemaining(qs -> parentTypes.add(qs.getResource("Type"))); + parentTypesResult.forEachRemaining(qs -> parentTypes.add(qs.getResource("Type"))); // only root and containers allow child documents. This needs to be checked before checking ownership if (Collections.disjoint(parentTypes, Set.of(Default.Root, DH.Container))) return null; - docTypesResult.reset(); // rewind result set to the beginning - it's used again later on - + // the agent is the owner of the requested document - automatically grant acl:Read/acl:Append/acl:Write access - if (agent != null && isOwner(accessTo, agent)) + if (agent != null && isOwner(parent, agent)) { - log.debug("Agent <{}> is the owner of <{}>, granting acl:Read/acl:Append/acl:Write access", agent, accessTo); - createOwnerAuthorization(authorizations, accessTo, agent); + log.debug("Agent <{}> is the owner of <{}>, granting acl:Read/acl:Append/acl:Write access", agent, parent); + createOwnerAuthorization(authorizations, parent, agent); } + + accessTo = parent; // redirect ACL query to parent URI since the document does not exist yet + } + finally + { + parentTypesResult.close(); } - // access to non-existing documents is denied if the request method is not PUT *and* the agent has no Write access - else return null; } - + ParameterizedSparqlString pss = getApplication().get().canAs(EndUserApplication.class) ? getACLQuery() : getOwnerACLQuery(); - Query query = new SetResultSetValues().apply(pss.asQuery(), docTypesResult); - pss = new ParameterizedSparqlString(query.toString()); // make sure VALUES are now part of the query string - assert pss.toString().contains("VALUES"); + if (docTypesResult.hasNext()) + { + Query query = new SetResultSetValues().apply(pss.asQuery(), docTypesResult); + pss = new ParameterizedSparqlString(query.toString()); // make sure type VALUES are now part of the query string + assert pss.toString().contains("VALUES"); + } // note we're not setting the $mode value on the ACL queries as we want to provide the AuthorizationContext with all of the agent's authorizations authorizations.add(loadModel(getAdminService(), pss, new AuthorizationParams(getAdminBase(), accessTo, agent).get())); @@ -283,7 +288,8 @@ protected Model loadModel(com.atomgraph.linkeddatahub.model.Service service, Par if (qsm == null) throw new IllegalArgumentException("QuerySolutionMap cannot be null"); // send query bindings separately from the query if the service supports the Sesame protocol - if (service.getSPARQLClient() instanceof SesameProtocolClient sesameProtocolClient) + com.atomgraph.linkeddatahub.model.ServiceContext serviceContext = getSystem().getServiceContext(service); + if (serviceContext.getSPARQLClient() instanceof SesameProtocolClient sesameProtocolClient) try (Response cr = sesameProtocolClient.query(pss.asQuery(), Model.class, qsm)) // register(new CacheControlFilter(CacheControl.valueOf("no-cache"))). // add Cache-Control: no-cache to request { return cr.readEntity(Model.class); @@ -291,7 +297,7 @@ protected Model loadModel(com.atomgraph.linkeddatahub.model.Service service, Par else { pss.setParams(qsm); - try (Response cr = service.getSPARQLClient(). // register(new CacheControlFilter(CacheControl.valueOf("no-cache"))). // add Cache-Control: no-cache to request + try (Response cr = serviceContext.getSPARQLClient(). // register(new CacheControlFilter(CacheControl.valueOf("no-cache"))). // add Cache-Control: no-cache to request query(pss.asQuery(), Model.class)) { return cr.readEntity(Model.class); @@ -315,7 +321,8 @@ protected ResultSetRewindable loadResultSet(com.atomgraph.linkeddatahub.model.Se if (qsm == null) throw new IllegalArgumentException("QuerySolutionMap cannot be null"); // send query bindings separately from the query if the service supports the Sesame protocol - if (service.getSPARQLClient() instanceof SesameProtocolClient sesameProtocolClient) + com.atomgraph.linkeddatahub.model.ServiceContext serviceContext = getSystem().getServiceContext(service); + if (serviceContext.getSPARQLClient() instanceof SesameProtocolClient sesameProtocolClient) try (Response cr = sesameProtocolClient.query(pss.asQuery(), ResultSet.class, qsm)) // register(new CacheControlFilter(CacheControl.valueOf("no-cache"))). // add Cache-Control: no-cache to request { return cr.readEntity(ResultSetRewindable.class); @@ -323,7 +330,7 @@ protected ResultSetRewindable loadResultSet(com.atomgraph.linkeddatahub.model.Se else { pss.setParams(qsm); - try (Response cr = service.getSPARQLClient(). // register(new CacheControlFilter(CacheControl.valueOf("no-cache"))). // add Cache-Control: no-cache to request + try (Response cr = serviceContext.getSPARQLClient(). // register(new CacheControlFilter(CacheControl.valueOf("no-cache"))). // add Cache-Control: no-cache to request query(pss.asQuery(), ResultSet.class)) { return cr.readEntity(ResultSetRewindable.class); diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java index c996d5214..0e9e689f5 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/request/OntologyFilter.java @@ -54,12 +54,43 @@ public class OntologyFilter implements ContainerRequestFilter private static final Logger log = LoggerFactory.getLogger(OntologyFilter.class); + /** + * Paths that should not trigger ontology loading to avoid circular dependencies. + * + * When an ontology contains owl:imports pointing to URIs within these paths, + * loading the ontology would trigger HTTP requests to those URIs. If those requests + * are intercepted by this filter, it creates a circular dependency: + * + * 1. Request arrives for /uploads/xyz + * 2. OntologyFilter intercepts it and loads ontology + * 3. Ontology has owl:imports for /uploads/xyz + * 4. Jena FileManager makes HTTP request to /uploads/xyz + * 5. OntologyFilter intercepts it again → infinite loop/deadlock + * + * Additionally, uploaded files are binary/RDF content that don't require + * ontology context for their serving logic. + */ + private static final java.util.Set IGNORED_PATH_PREFIXES = java.util.Set.of( + "uploads/" + ); + @Inject com.atomgraph.linkeddatahub.Application system; @Override public void filter(ContainerRequestContext crc) throws IOException { + String path = crc.getUriInfo().getPath(); + + // Skip ontology loading for paths that may be referenced in owl:imports + // to prevent circular dependency deadlocks during ontology resolution + if (IGNORED_PATH_PREFIXES.stream().anyMatch(path::startsWith)) + { + if (log.isTraceEnabled()) log.trace("Skipping ontology loading for path: {}", path); + crc.setProperty(OWL.Ontology.getURI(), Optional.empty()); + return; + } + crc.setProperty(OWL.Ontology.getURI(), getOntology(crc)); } @@ -117,7 +148,7 @@ public Ontology getOntology(Application app, String uri) // only create InfModel if ontology is not already cached if (!ontModelSpec.getDocumentManager().getFileManager().hasCachedModel(uri)) { - OntologyModelGetter modelGetter = new OntologyModelGetter(app.as(EndUserApplication.class), ontModelSpec, getSystem().getOntologyQuery()); + OntologyModelGetter modelGetter = new OntologyModelGetter(app.as(EndUserApplication.class), getSystem(), ontModelSpec, getSystem().getOntologyQuery()); ontModelSpec.setImportModelGetter(modelGetter); if (log.isDebugEnabled()) log.debug("Started loading ontology with URI '{}' from the admin dataset", uri); Model baseModel = modelGetter.getModel(uri); diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/CacheInvalidationFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/CacheInvalidationFilter.java index b3e7e1874..b1d94925b 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/CacheInvalidationFilter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/CacheInvalidationFilter.java @@ -33,7 +33,6 @@ import jakarta.ws.rs.core.Response; import java.util.Optional; import java.util.Set; -import org.apache.jena.rdf.model.Resource; import org.glassfish.jersey.uri.UriComponent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,46 +69,46 @@ public void filter(ContainerRequestContext req, ContainerResponseContext resp) t URI parentURI = location.resolve("..").normalize(); URI relativeParentURI = getApplication().get().getBaseURI().relativize(parentURI); - banIfNotNull(getApplication().get().getFrontendProxy(), location.toString()); - banIfNotNull(getApplication().get().getService().getBackendProxy(), location.toString()); + banIfNotNull(getSystem().getFrontendProxy(), location.toString()); + banIfNotNull(getSystem().getServiceContext(getApplication().get().getService()).getBackendProxy(), location.toString()); // ban URI from authorization query results - banIfNotNull(getAdminApplication().getService().getBackendProxy(), location.toString()); + banIfNotNull(getSystem().getServiceContext(getAdminApplication().getService()).getBackendProxy(), location.toString()); // ban parent resource URI in order to avoid stale children data in containers - banIfNotNull(getApplication().get().getFrontendProxy(), parentURI.toString()); - banIfNotNull(getApplication().get().getService().getBackendProxy(), parentURI.toString()); + banIfNotNull(getSystem().getFrontendProxy(), parentURI.toString()); + banIfNotNull(getSystem().getServiceContext(getApplication().get().getService()).getBackendProxy(), parentURI.toString()); if (!relativeParentURI.toString().isEmpty()) // URIs can be relative in queries { - banIfNotNull(getApplication().get().getFrontendProxy(), relativeParentURI.toString()); - banIfNotNull(getApplication().get().getService().getBackendProxy(), relativeParentURI.toString()); + banIfNotNull(getSystem().getFrontendProxy(), relativeParentURI.toString()); + banIfNotNull(getSystem().getServiceContext(getApplication().get().getService()).getBackendProxy(), relativeParentURI.toString()); } // ban all results of queries that use forClass type if (req.getUriInfo().getQueryParameters().containsKey(AC.forClass.getLocalName())) { String forClass = req.getUriInfo().getQueryParameters().getFirst(AC.forClass.getLocalName()); - banIfNotNull(getApplication().get().getFrontendProxy(), forClass); - banIfNotNull(getApplication().get().getService().getBackendProxy(), forClass); + banIfNotNull(getSystem().getFrontendProxy(), forClass); + banIfNotNull(getSystem().getServiceContext(getApplication().get().getService()).getBackendProxy(), forClass); } } - + if (Set.of(HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE, HttpMethod.PATCH).contains(req.getMethod())) { // ban all admin. entries when the admin dataset is changed - not perfect, but works if (!getAdminApplication().getBaseURI().relativize(req.getUriInfo().getAbsolutePath()).isAbsolute()) // URL is relative to the admin app's base URI { - banIfNotNull(getAdminApplication().getService().getBackendProxy(), getAdminApplication().getBaseURI().toString()); - banIfNotNull(getAdminApplication().getService().getBackendProxy(), "foaf:Agent"); // queries use prefixed names instead of absolute URIs - banIfNotNull(getAdminApplication().getService().getBackendProxy(), "acl:AuthenticatedAgent"); + banIfNotNull(getSystem().getServiceContext(getAdminApplication().getService()).getBackendProxy(), getAdminApplication().getBaseURI().toString()); + banIfNotNull(getSystem().getServiceContext(getAdminApplication().getService()).getBackendProxy(), "foaf:Agent"); // queries use prefixed names instead of absolute URIs + banIfNotNull(getSystem().getServiceContext(getAdminApplication().getService()).getBackendProxy(), "acl:AuthenticatedAgent"); } if (req.getUriInfo().getAbsolutePath().toString().endsWith("/")) { - banIfNotNull(getApplication().get().getFrontendProxy(), req.getUriInfo().getAbsolutePath().toString()); - banIfNotNull(getApplication().get().getService().getBackendProxy(), req.getUriInfo().getAbsolutePath().toString()); + banIfNotNull(getSystem().getFrontendProxy(), req.getUriInfo().getAbsolutePath().toString()); + banIfNotNull(getSystem().getServiceContext(getApplication().get().getService()).getBackendProxy(), req.getUriInfo().getAbsolutePath().toString()); // ban URI from authorization query results - banIfNotNull(getAdminApplication().getService().getBackendProxy(), req.getUriInfo().getAbsolutePath().toString()); + banIfNotNull(getSystem().getServiceContext(getAdminApplication().getService()).getBackendProxy(), req.getUriInfo().getAbsolutePath().toString()); // ban parent document URIs (those that have a trailing slash) in order to avoid stale children data in containers if (!req.getUriInfo().getAbsolutePath().equals(getApplication().get().getBaseURI())) @@ -118,13 +117,13 @@ public void filter(ContainerRequestContext req, ContainerResponseContext resp) t URI relativeParentURI = getApplication().get().getBaseURI().relativize(parentURI); // ban parent resource URI in order to avoid stale children data in containers - banIfNotNull(getApplication().get().getFrontendProxy(), parentURI.toString()); - banIfNotNull(getApplication().get().getService().getBackendProxy(), parentURI.toString()); + banIfNotNull(getSystem().getFrontendProxy(), parentURI.toString()); + banIfNotNull(getSystem().getServiceContext(getApplication().get().getService()).getBackendProxy(), parentURI.toString()); if (!relativeParentURI.toString().isEmpty()) // URIs can be relative in queries { - banIfNotNull(getApplication().get().getFrontendProxy(), relativeParentURI.toString()); - banIfNotNull(getApplication().get().getService().getBackendProxy(), relativeParentURI.toString()); + banIfNotNull(getSystem().getFrontendProxy(), relativeParentURI.toString()); + banIfNotNull(getSystem().getServiceContext(getApplication().get().getService()).getBackendProxy(), relativeParentURI.toString()); } } } @@ -135,14 +134,14 @@ public void filter(ContainerRequestContext req, ContainerResponseContext resp) t * Bans URL from proxy cache if proxy is not null. * Null-safe wrapper that handles the common pattern of banning and closing the response. * - * @param proxy proxy resource (can be null) + * @param proxyURI proxy URI (can be null) * @param url URL to be banned */ - public void banIfNotNull(Resource proxy, String url) + public void banIfNotNull(URI proxyURI, String url) { - if (proxy != null) + if (proxyURI != null) { - try (Response response = ban(proxy, url)) + try (Response response = ban(proxyURI, url)) { // Response is automatically closed by try-with-resources, ensuring connection is released } @@ -157,16 +156,16 @@ public void banIfNotNull(Resource proxy, String url) /** * Bans URL from proxy cache. * - * @param proxy proxy resource + * @param proxyURI proxy URI * @param url URL to be banned * @return response from proxy */ - public Response ban(Resource proxy, String url) + public Response ban(URI proxyURI, String url) { - if (proxy == null) throw new IllegalArgumentException("Proxy resource cannot be null"); - if (url == null) throw new IllegalArgumentException("Resource cannot be null"); + if (proxyURI == null) throw new IllegalArgumentException("Proxy URI cannot be null"); + if (url == null) throw new IllegalArgumentException("URL cannot be null"); - return getClient().target(proxy.getURI()).request(). + return getClient().target(proxyURI).request(). header(HEADER_NAME, UriComponent.encode(url, UriComponent.Type.UNRESERVED)). // the value has to be URL-encoded in order to match request URLs in Varnish method("BAN", Response.class); } diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/ProvenanceFilter.java b/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/ProvenanceFilter.java index 46b9c0331..fab67d9ce 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/ProvenanceFilter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/filter/response/ProvenanceFilter.java @@ -51,6 +51,7 @@ public class ProvenanceFilter implements ContainerResponseFilter private static final Logger log = LoggerFactory.getLogger(ProvenanceFilter.class); @Inject jakarta.inject.Provider> service; + @Inject com.atomgraph.linkeddatahub.Application system; @Override public void filter(ContainerRequestContext request, ContainerResponseContext response)throws IOException @@ -78,7 +79,7 @@ public void filter(ContainerRequestContext request, ContainerResponseContext res } if (log.isDebugEnabled()) log.debug("PUTting {} triples of provenance metadata", graph.getModel().size()); - getService().get().getGraphStoreClient().putModel(graphGraphUri, model); + system.getServiceContext(getService().get()).getGraphStoreClient().putModel(graphGraphUri, model); } } diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/mapper/ResourceExistsExceptionMapper.java b/src/main/java/com/atomgraph/linkeddatahub/server/mapper/ResourceExistsExceptionMapper.java deleted file mode 100644 index 7cfe91c52..000000000 --- a/src/main/java/com/atomgraph/linkeddatahub/server/mapper/ResourceExistsExceptionMapper.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2019 Martynas Jusevičius - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package com.atomgraph.linkeddatahub.server.mapper; - -import com.atomgraph.core.MediaTypes; -import com.atomgraph.linkeddatahub.server.exception.ResourceExistsException; -import com.atomgraph.server.mapper.ExceptionMapperBase; -import jakarta.inject.Inject; -import jakarta.ws.rs.core.HttpHeaders; -import org.apache.jena.rdf.model.ResourceFactory; - -import jakarta.ws.rs.core.Response; -import jakarta.ws.rs.ext.ExceptionMapper; -import org.apache.jena.rdf.model.Resource; - -/** - * JAX-RS mapper for resource conflict exceptions. - * - * @author Martynas Jusevičius {@literal } - */ -@Deprecated -public class ResourceExistsExceptionMapper extends ExceptionMapperBase implements ExceptionMapper -{ - - /** - * Constructs mapper from media types. - * - * @param mediaTypes registry of readable/writeable media types - */ - @Inject - public ResourceExistsExceptionMapper(MediaTypes mediaTypes) - { - super(mediaTypes); - } - - @Override - public Response toResponse(ResourceExistsException ex) - { - Resource exception = toResource(ex, Response.Status.CONFLICT, - ResourceFactory.createResource("http://www.w3.org/2011/http-statusCodes#Conflict")); - ex.getModel().add(exception.getModel()); - - return getResponseBuilder(ex.getModel()). - status(Response.Status.CONFLICT). - header(HttpHeaders.LOCATION, ex.getURI()). - build(); - } - -} diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/model/impl/DirectGraphStoreImpl.java b/src/main/java/com/atomgraph/linkeddatahub/server/model/impl/DirectGraphStoreImpl.java index 191a95d0f..fbaa849b8 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/model/impl/DirectGraphStoreImpl.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/model/impl/DirectGraphStoreImpl.java @@ -165,7 +165,7 @@ public DirectGraphStoreImpl(@Context Request request, @Context UriInfo uriInfo, @Context SecurityContext securityContext, Optional agentContext, @Context Providers providers, com.atomgraph.linkeddatahub.Application system) { - super(request, service.get(), mediaTypes, uriInfo); + super(request, system.getServiceContext(service.get()).getGraphStoreClient(), mediaTypes, uriInfo); if (ontology.isEmpty()) throw new InternalServerErrorException("Ontology is not specified"); if (service.isEmpty()) throw new InternalServerErrorException("Service is not specified"); this.application = application; @@ -216,7 +216,7 @@ public Response post(Model model) { if (log.isTraceEnabled()) log.trace("POST Graph Store request with RDF payload: {} payload size(): {}", model, model.size()); - final Model existingModel = getService().getGraphStoreClient().getModel(getURI().toString()); + final Model existingModel = getSystem().getServiceContext(getService()).getGraphStoreClient().getModel(getURI().toString()); Response.ResponseBuilder rb = evaluatePreconditions(existingModel); if (rb != null) return rb.build(); // preconditions not met @@ -232,7 +232,7 @@ public Response post(Model model) if (log.isDebugEnabled()) log.debug("POST Model to named graph with URI: {}", getURI()); // First remove old dct:modified values from the triplestore, then add new data existingModel.createResource(getURI().toString()).removeAll(DCTerms.modified); - getService().getGraphStoreClient().putModel(getURI().toString(), existingModel.add(model)); // replace entire graph to avoid accumulating dct:modified + getSystem().getServiceContext(getService()).getGraphStoreClient().putModel(getURI().toString(), existingModel.add(model)); // replace entire graph to avoid accumulating dct:modified Model updatedModel = existingModel.add(model); submitImports(model); @@ -284,7 +284,7 @@ public Response put(Model model) Model existingModel = null; try { - existingModel = getService().getGraphStoreClient().getModel(getURI().toString()); + existingModel = getSystem().getServiceContext(getService()).getGraphStoreClient().getModel(getURI().toString()); Response.ResponseBuilder rb = evaluatePreconditions(existingModel); if (rb != null) return rb.build(); // preconditions not met @@ -317,7 +317,7 @@ public Response put(Model model) addProperty(ACL.owner, getAgentContext().get().getAgent()); if (log.isDebugEnabled()) log.debug("PUT Model into new named graph with URI: {}", getURI()); - getService().getGraphStoreClient().putModel(getURI().toString(), model); // TO-DO: catch exceptions + getSystem().getServiceContext(getService()).getGraphStoreClient().putModel(getURI().toString(), model); // TO-DO: catch exceptions submitImports(model); @@ -343,7 +343,7 @@ public Response put(Model model) addLiteral(DCTerms.modified, ResourceFactory.createTypedLiteral(GregorianCalendar.getInstance())); if (log.isDebugEnabled()) log.debug("PUT Model into existing named graph with URI: {}", getURI()); - getService().getGraphStoreClient().putModel(getURI().toString(), model); // TO-DO: catch exceptions + getSystem().getServiceContext(getService()).getGraphStoreClient().putModel(getURI().toString(), model); // TO-DO: catch exceptions submitImports(model); @@ -386,7 +386,7 @@ public Response patch(UpdateRequest updateRequest) // no need to set WITH since we'll be updating model in memory before persisting it final Dataset dataset; - final Model existingModel = getService().getGraphStoreClient().getModel(getURI().toString()); + final Model existingModel = getSystem().getServiceContext(getService()).getGraphStoreClient().getModel(getURI().toString()); if (existingModel == null) throw new NotFoundException("Named graph with URI <" + getURI() + "> not found"); Response.ResponseBuilder rb = evaluatePreconditions(existingModel); @@ -454,7 +454,7 @@ public Response postMultipart(FormDataMultiPart multiPart) validate(model); if (log.isTraceEnabled()) log.trace("POST Graph Store request with RDF payload: {} payload size(): {}", model, model.size()); - final boolean existingGraph = getService().getGraphStoreClient().containsModel(getURI().toString()); + final boolean existingGraph = getSystem().getServiceContext(getService()).getGraphStoreClient().containsModel(getURI().toString()); if (!existingGraph) throw new NotFoundException("Named graph with URI <" + getURI() + "> not found"); new Skolemizer(getURI().toString()).apply(model); // skolemize before writing files (they require absolute URIs) @@ -530,7 +530,7 @@ public Response delete() try { - Model existingModel = getService().getGraphStoreClient().getModel(getURI().toString()); + Model existingModel = getSystem().getServiceContext(getService()).getGraphStoreClient().getModel(getURI().toString()); Response.ResponseBuilder rb = evaluatePreconditions(existingModel); if (rb != null) return rb.build(); // preconditions not met @@ -980,7 +980,7 @@ public Set getAllowedMethods() */ public EndpointAccessor getEndpointAccessor() { - return getService().getEndpointAccessor(); + return getSystem().getServiceContext(getService()).getEndpointAccessor(); } /** diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/model/impl/SPARQLEndpointImpl.java b/src/main/java/com/atomgraph/linkeddatahub/server/model/impl/SPARQLEndpointImpl.java index 89e4df665..75cdc9718 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/model/impl/SPARQLEndpointImpl.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/model/impl/SPARQLEndpointImpl.java @@ -31,23 +31,25 @@ /** * LinkedDataHub SPARQL endpoint implementation. * We need to subclass the Core class because we're injecting an optional Service. - * + * * @author Martynas Jusevičius {@literal } */ public class SPARQLEndpointImpl extends com.atomgraph.core.model.impl.SPARQLEndpointImpl { - + /** * Constructs endpoint. - * + * * @param request current request * @param service SPARQL service * @param mediaTypes registry of readable/writable media types + * @param system system application */ @Inject - public SPARQLEndpointImpl(@Context Request request, Optional service, MediaTypes mediaTypes) + public SPARQLEndpointImpl(@Context Request request, Optional service, MediaTypes mediaTypes, + com.atomgraph.linkeddatahub.Application system) { - super(request, service.get(), mediaTypes); + super(request, system.getServiceContext(service.get()).getEndpointAccessor(), mediaTypes); } /** diff --git a/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java b/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java index 1ea63a98e..4993dd07c 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java +++ b/src/main/java/com/atomgraph/linkeddatahub/server/util/OntologyModelGetter.java @@ -40,19 +40,22 @@ public class OntologyModelGetter implements org.apache.jena.rdf.model.ModelGette private static final Logger log = LoggerFactory.getLogger(OntologyModelGetter.class); private final EndUserApplication app; + private final com.atomgraph.linkeddatahub.Application system; private final OntModelSpec ontModelSpec; private final Query ontologyQuery; - + /** * Constructs ontology getter for application. - * + * * @param app end-user application resource + * @param system system application * @param ontModelSpec ontology specification * @param ontologyQuery SPARQL query that loads ontology terms */ - public OntologyModelGetter(EndUserApplication app, OntModelSpec ontModelSpec, Query ontologyQuery) + public OntologyModelGetter(EndUserApplication app, com.atomgraph.linkeddatahub.Application system, OntModelSpec ontModelSpec, Query ontologyQuery) { this.app = app; + this.system = system; this.ontModelSpec = ontModelSpec; this.ontologyQuery = ontologyQuery; } @@ -63,7 +66,7 @@ public Model getModel(String uri) // attempt to load ontology model from the admin endpoint. TO-DO: is that necessary if ontologies terms are now stored in a single graph? ParameterizedSparqlString ontologyPss = new ParameterizedSparqlString(getOntologyQuery().toString()); ontologyPss.setIri(LDT.ontology.getLocalName(), uri); - Model model = getApplication().getAdminApplication().getService().getSPARQLClient().loadModel(ontologyPss.asQuery()); + Model model = getSystem().getServiceContext(getApplication().getAdminApplication().getService()).getSPARQLClient().loadModel(ontologyPss.asQuery()); if (!model.isEmpty()) return model; @@ -87,7 +90,7 @@ public Model getModel(String uri, ModelReader loadIfAbsent) /** * Returns the application. - * + * * @return application resource */ public EndUserApplication getApplication() @@ -95,6 +98,16 @@ public EndUserApplication getApplication() return app; } + /** + * Returns the system application. + * + * @return system application + */ + public com.atomgraph.linkeddatahub.Application getSystem() + { + return system; + } + /** * Returns ontology specification. * diff --git a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/ACL.java b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/ACL.java index 1a528e187..259d34a0e 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/ACL.java +++ b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/ACL.java @@ -60,6 +60,9 @@ public static String getURI() /** acl:Append access mode */ public static final OntClass Append = m_model.createClass( NS + "Append" ); + /** acl:Control access mode */ + public static final OntClass Control = m_model.createClass( NS + "Control" ); + /** acl:AuthenticatedAgent class */ public static final OntClass AuthenticatedAgent = m_model.createClass( NS + "AuthenticatedAgent" ); diff --git a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LAPP.java b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LAPP.java index 3161938c9..9e7dbf8cd 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LAPP.java +++ b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LAPP.java @@ -68,10 +68,10 @@ public static String getURI() public static final OntClass Package = m_model.createClass( NS + "Package" ); /** Admin application class */ - public static final ObjectProperty adminApplication = m_model.createObjectProperty( NS + "adminApplication" ); - - /** End-user application class */ - public static final ObjectProperty endUserApplication = m_model.createObjectProperty( NS + "endUserApplication" ); +// public static final ObjectProperty adminApplication = m_model.createObjectProperty( NS + "adminApplication" ); +// +// /** End-user application class */ +// public static final ObjectProperty endUserApplication = m_model.createObjectProperty( NS + "endUserApplication" ); /** Frontend proxy property */ public static final ObjectProperty frontendProxy = m_model.createObjectProperty( NS + "frontendProxy" ); diff --git a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDHC.java b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDHC.java index 17eca3f42..d0bee4f59 100644 --- a/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDHC.java +++ b/src/main/java/com/atomgraph/linkeddatahub/vocabulary/LDHC.java @@ -170,4 +170,13 @@ public static String getURI() /** OIDC refresh token properties property */ public static final DatatypeProperty oidcRefreshTokens = m_model.createDatatypeProperty( NS + "oidcRefreshTokens" ); + /** Frontend proxy URI property (Varnish frontend cache, used for cache invalidation) */ + public static final ObjectProperty frontendProxy = m_model.createObjectProperty( NS + "frontendProxy" ); + + /** Backend proxy URI for the admin SPARQL service (used for cache invalidation and endpoint URI rewriting) */ + public static final ObjectProperty backendProxyAdmin = m_model.createObjectProperty( NS + "backendProxyAdmin" ); + + /** Backend proxy URI for the end-user SPARQL service (used for cache invalidation and endpoint URI rewriting) */ + public static final ObjectProperty backendProxyEndUser = m_model.createObjectProperty( NS + "backendProxyEndUser" ); + } diff --git a/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl b/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl index 887c75f9c..fe0acaa79 100644 --- a/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl +++ b/src/main/resources/com/atomgraph/linkeddatahub/lapp.ttl @@ -36,24 +36,6 @@ rdfs:comment "Links a resource to an application" ; rdfs:isDefinedBy : . -:adminApplication a owl:ObjectProperty, owl:FunctionalProperty, owl:InverseFunctionalProperty ; - rdfs:subPropertyOf :application ; - owl:inverseOf :endUserApplication ; - rdfs:domain :EndUserApplication ; - rdfs:range :AdminApplication ; - rdfs:label "Admin application" ; - rdfs:comment "Links end-user application to its administration application" ; - rdfs:isDefinedBy : . - -:endUserApplication a owl:ObjectProperty, owl:FunctionalProperty, owl:InverseFunctionalProperty ; - rdfs:subPropertyOf :application ; - owl:inverseOf :adminApplication ; - rdfs:domain :AdminApplication ; - rdfs:range :EndUserApplication ; - rdfs:label "End-user application" ; - rdfs:comment "Links administration application to its end-user application" ; - rdfs:isDefinedBy : . - :frontendProxy a owl:ObjectProperty ; rdfs:domain :Dataset ; rdfs:range rdfs:Resource ; @@ -123,10 +105,6 @@ :EndUserApplication a rdfs:Class, owl:Class ; spin:constructor :EndUserApplicationConstructor ; - spin:constraint [ a ldh:MissingPropertyValue ; - rdfs:label "Missing admin application" ; - sp:arg1 :adminApplication - ] ; rdfs:label "End-user application" ; rdfs:comment "Application with a dynamic access control and sitemap" ; rdfs:isDefinedBy : . @@ -143,7 +121,6 @@ CONSTRUCT { $this dct:title [ a xsd:string ] ; dct:description [ a xsd:string ] ; - lapp:adminApplication [ a lapp:AdminApplication ] ; ac:stylesheet [ a ldh:File ] ; lapp:public [ a xsd:boolean ] . } @@ -155,24 +132,18 @@ :AdminApplication a rdfs:Class, owl:Class ; spin:constructor :AdminApplicationConstructor ; - spin:constraint [ a ldh:MissingPropertyValue ; - rdfs:label "Missing end-user application" ; - sp:arg1 :endUserApplication - ] ; rdfs:label "Admin application" ; rdfs:comment "Meta-application that manages the access control and sitemap of the main end-user application" ; rdfs:isDefinedBy : . :AdminApplicationConstructor a ldh:Constructor ; sp:text """ - PREFIX lapp: PREFIX xsd: PREFIX dct: CONSTRUCT { $this dct:title [ a xsd:string ] ; dct:description [ a xsd:string ] ; - lapp:endUserApplication [ a lapp:EndUserApplication ] ; } WHERE {}""" ; rdfs:label "Admin application constructor" ; diff --git a/src/main/resources/com/atomgraph/linkeddatahub/ldh.ttl b/src/main/resources/com/atomgraph/linkeddatahub/ldh.ttl index 589ae75b9..511871d81 100644 --- a/src/main/resources/com/atomgraph/linkeddatahub/ldh.ttl +++ b/src/main/resources/com/atomgraph/linkeddatahub/ldh.ttl @@ -493,6 +493,28 @@ ORDER BY ?title """ ; rdfs:isDefinedBy : . +:SelectInstances a sp:Select ; + rdfs:label "Select instances" ; + dct:description "Selects instances of type from the default graph" ; + sp:text """SELECT DISTINCT ?s +WHERE + { ?s a $type ; + ?p ?o + }""" ; + rdfs:isDefinedBy : . + +:SelectInstancesInGraphs a sp:Select ; + rdfs:label "Select instances in graphs" ; + dct:description "Selects instances of type from named graphs" ; + sp:text """SELECT DISTINCT ?s +WHERE + { GRAPH ?g + { ?s a $type ; + ?p ?o + } + }""" ; + rdfs:isDefinedBy : . + :ChildrenView a :View ; rdfs:label "Children view" ; spin:query :SelectChildren ; @@ -635,7 +657,7 @@ sp:Query spin:constructor [ PREFIX xsd: PREFIX sp: PREFIX sd: - PREFIX : + PREFIX : CONSTRUCT { $this rdfs:label [ a xsd:string ] ; diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css b/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css index 66155e480..03abdc375 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/css/bootstrap.css @@ -37,6 +37,8 @@ button.btn.create-action { height: 30px; } a.external::after { content: "⤴"; padding-left: 0.2em; } a.btn.create-action { height: 20px; } .create-resource .btn.create-action { margin-top: 1em; } +.btn-class { background: inherit; } +.btn-class span { color: black; } .btn-group.open .btn.dropdown-toggle.create-action { background-image: url('../icons/ic_note_add_black_24px.svg'); } li button.btn-edit-constructors, li button.btn-add-data, li button.btn-add-ontology, li button.btn-generate-containers { text-align: left; width: 100%; background-color: inherit; } .btn-container { background-image: url('../icons/folder.svg'); } @@ -48,8 +50,6 @@ li button.btn-edit-constructors, li button.btn-add-data, li button.btn-add-ontol .btn-import { background-image: url('../icons/ic_transform_black_24px.svg'); } .btn-chart { background-image: url('../icons/ic_show_chart_black_24px.svg'); } .btn-view { background-image: url('../icons/ic_view_list_black_24px.svg'); } -.btn-latest { background-image: url('../icons/ic_new_releases_black_24px.svg'); } -.btn-geo { background-image: url('../icons/ic_location_on_black_24px.svg'); } .btn-logo { background-position: left; background-repeat: no-repeat; padding-left: 32px; } .dropdown-menu .btn-logo { background-position: 12px center; padding-left: 40px; } .btn.btn-toggle-content { font-size: 0; color: transparent; background-image: url('../icons/baseline-expand_less-24px.svg'); background-position: center center; background-repeat: no-repeat; width: 48px; } @@ -82,17 +82,20 @@ li button.btn-edit-constructors, li button.btn-add-data, li button.btn-add-ontol .dropdown-menu > li > a.btn-list { background-image: url('../icons/view_list_black_24dp.svg'); background-position: 12px center; background-repeat: no-repeat; padding: 5px 5px 5px 40px; } .dropdown-menu > li > a.btn-table { background-image: url('../icons/ic_border_all_black_24px.svg'); background-position: 12px center; background-repeat: no-repeat; padding: 5px 5px 5px 40px; } .dropdown-menu > li > a.btn-grid { background-image: url('../icons/ic_grid_on_black_24px.svg'); background-position: 12px center; background-repeat: no-repeat; padding: 5px 5px 5px 40px; } -#doc-tree { display: none; width: 15%; position: fixed; left: 0; top: 106px; height: calc(100% - 106px); } +#left-sidebar { display: none; width: 15%; position: fixed; left: 0; top: 106px; height: calc(100% - 106px); } @media (max-width: 979px) { body { padding-top: 0; } - #doc-tree { display: block; width: auto; position: unset; top: unset; height: auto; } - #doc-tree .nav { max-height: 20em; overflow: auto; } + #left-sidebar { display: block; width: auto; position: unset; top: unset; height: auto; } + #left-sidebar .nav { max-height: 20em; overflow: auto; } } -#doc-tree .nav-list > li > a { margin-left: 0; margin-right: 0; } -#doc-tree .nav-list > li > a.btn-container, #doc-tree .nav-list > li > a.btn-app, #doc-tree .nav-list > li > a.btn-chart, #doc-tree .nav-list > li > a.btn-file, #doc-tree .nav-list > li > a.btn-geo, #doc-tree .nav-list > li > a.btn-import, #doc-tree .nav-list > li > a.btn-latest, #doc-tree .nav-list > li > a.btn-query, #doc-tree .nav-list > li > a.btn-service { padding-left: 24px; } -#doc-tree li { max-height: 20em; overflow: auto; } -#doc-tree li > a { display: inline-block; } +#left-sidebar .nav-list > li > a.btn-container { padding-left: 24px; } +#left-sidebar .nav-list > li > a { margin-left: 0; margin-right: 0; } +#left-sidebar ul { max-height: 22em; overflow: auto; } +#left-sidebar li > a { display: inline-block; } +#left-sidebar .btn-latest { background-image: url('../icons/ic_new_releases_black_24px.svg'); background-color: inherit; } +#left-sidebar .btn-geo { background-image: url('../icons/ic_location_on_black_24px.svg'); background-color: inherit; } + .btn.btn-expand-tree { height: 24px; width: 24px; background-image: url('../icons/expand_more_black_24dp.svg'); } .btn.btn-expand-tree:hover, .btn.btn-expand-tree:focus { background-position: 0 0; } .btn.btn-expanded-tree { height: 24px; width: 24px; background-image: url('../icons/chevron_right_black_24dp.svg'); } diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block.xsl index 1060d7ba2..745d5aab7 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block.xsl @@ -151,28 +151,31 @@ exclude-result-prefixes="#all" - - - - - - - - - - + - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + @@ -454,7 +457,71 @@ exclude-result-prefixes="#all" - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/chart.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/chart.xsl index 5541a334b..53307ebbc 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/chart.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/chart.xsl @@ -286,11 +286,16 @@ exclude-result-prefixes="#all" - - - - - + + + + + + + + + + - - - - + + - - - - - - - @@ -816,8 +813,7 @@ exclude-result-prefixes="#all" - - + - @@ -867,11 +862,10 @@ exclude-result-prefixes="#all" - - - + + - + @@ -880,7 +874,10 @@ exclude-result-prefixes="#all" - + + + + @@ -896,8 +893,7 @@ exclude-result-prefixes="#all" - - + - ldh:block-object-metadata-response + ldh:block-object-metadata-response $block/@about: + + @@ -302,18 +304,29 @@ exclude-result-prefixes="#all" + - + + + + + + + + + + - - + + + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/query.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/query.xsl index 550630534..fc2c10194 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/query.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/query.xsl @@ -280,6 +280,12 @@ exclude-result-prefixes="#all" + + + + + + @@ -325,7 +331,7 @@ exclude-result-prefixes="#all" - + @@ -536,7 +542,7 @@ exclude-result-prefixes="#all" - + @@ -581,11 +587,10 @@ exclude-result-prefixes="#all" - - - + + - + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/view.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/view.xsl index 3ebcc4766..e5d3cd2db 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/view.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/block/view.xsl @@ -69,11 +69,15 @@ exclude-result-prefixes="#all" - - - - - + + + + + + + + + @@ -86,9 +90,13 @@ exclude-result-prefixes="#all" 'container': $container, 'mode': $mode, 'refresh-content': $refresh-content, - 'query-uri': $query-uri + 'query-uri': $query-uri, + 'cache': ixsl:get(ixsl:get(ixsl:window(), 'LinkedDataHub.contents'), '`' || $block/@about || '`') }"/> - + + + + - + ldh:view-results-thunk + + + + ldh:load-object-metadata + @@ -154,8 +167,12 @@ exclude-result-prefixes="#all" - + + + + + @@ -177,7 +194,6 @@ exclude-result-prefixes="#all" - - + + ldh:set-object-metadata + + + + @@ -208,7 +229,7 @@ exclude-result-prefixes="#all" - + @@ -331,6 +352,7 @@ exclude-result-prefixes="#all" + @@ -365,8 +387,13 @@ exclude-result-prefixes="#all" map { 'request': $request, 'container': ., - 'count-var-name': $count-var-name + 'count-var-name': $count-var-name, + 'cache': $cache }"/> + + + + - @@ -495,11 +521,8 @@ exclude-result-prefixes="#all" + - - - - @@ -522,7 +545,6 @@ exclude-result-prefixes="#all" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - @@ -544,7 +606,7 @@ exclude-result-prefixes="#all" - + @@ -575,13 +637,27 @@ exclude-result-prefixes="#all" + + + + + + + + + + + + + + - - + + @@ -591,7 +667,7 @@ exclude-result-prefixes="#all" - + @@ -681,7 +757,6 @@ exclude-result-prefixes="#all" - @@ -694,13 +769,13 @@ exclude-result-prefixes="#all" + - - + $initial-load: @@ -769,6 +844,7 @@ exclude-result-prefixes="#all" + @@ -785,8 +861,10 @@ exclude-result-prefixes="#all" 'container': id($order-by-container-id, ixsl:page()), 'id': $id, 'predicate': $predicate, - 'order-by-predicate': $order-by-predicate + 'order-by-predicate': $order-by-predicate, + 'cache': $cache }"/> + - - - - - @@ -809,7 +882,7 @@ exclude-result-prefixes="#all" - + @@ -1113,46 +1186,114 @@ exclude-result-prefixes="#all" - - + + BLOCK DELEGATION: view-mode handler triggered + - - - + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + BLOCK DELEGATION: pager previous triggered + + + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + BLOCK DELEGATION: pager next triggered + + + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + BLOCK DELEGATION: container-order triggered + + + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + + BLOCK DELEGATION: btn-order-by triggered + + + BLOCK DELEGATION: block URI = + + BLOCK DELEGATION: cache found: + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + - + + - - - - - - - + + + + + + + + - - - - - - - - - - - - - - + + + + - - - - + + + + - + - + + - - - - - - - + + + + + + + + + - - - - - - - - - - - - - - + + + + + - - - - + + + + - + - + + - - - - - - - + + + + + + + + + + - - - - - - - - - - - - - - + + + + - - - + + + - + - + + + + + - - - - - - - - - + + + + + + - - - - - - - + + + + - - - - - - - - - + + + + + + + + - + - + - + + - - - - + - + @@ -1371,8 +1486,8 @@ exclude-result-prefixes="#all" - - + + @@ -1395,9 +1510,19 @@ exclude-result-prefixes="#all" - - - + + + + + + + + + + + + + @@ -1459,7 +1584,7 @@ exclude-result-prefixes="#all" - + @@ -1469,12 +1594,11 @@ exclude-result-prefixes="#all" - - - - - - + + + + + @@ -1487,20 +1611,21 @@ exclude-result-prefixes="#all" - + - + - + - + + - + - - - - - - + + + + + @@ -1535,20 +1659,21 @@ exclude-result-prefixes="#all" - + - + - + - + + + ldh:view-query-response @@ -1605,14 +1731,11 @@ exclude-result-prefixes="#all" - + - - - - - - + + + @@ -1630,23 +1753,25 @@ exclude-result-prefixes="#all" - - - - - + + + + + + + + + + + + + + + + + - - - - - - - - - - - + @@ -1675,7 +1800,6 @@ exclude-result-prefixes="#all" - + - - - - - + + ldh:render-view + @@ -1737,7 +1860,6 @@ exclude-result-prefixes="#all" - @@ -1750,6 +1872,7 @@ exclude-result-prefixes="#all" + @@ -1773,11 +1896,16 @@ exclude-result-prefixes="#all" - + + + + + + - + @@ -1788,7 +1916,7 @@ exclude-result-prefixes="#all" ldh:facet-filter-response - + @@ -1804,7 +1932,7 @@ exclude-result-prefixes="#all" - + @@ -1816,7 +1944,7 @@ exclude-result-prefixes="#all" ldh:parallax-response - + @@ -1872,7 +2000,7 @@ exclude-result-prefixes="#all" ldh:parallax-property-response - + @@ -1932,7 +2060,7 @@ exclude-result-prefixes="#all" ldh:facet-value-response - + @@ -2078,6 +2206,9 @@ exclude-result-prefixes="#all" ldh:result-count-response + + + @@ -2101,7 +2232,7 @@ exclude-result-prefixes="#all" - + @@ -2116,7 +2247,7 @@ exclude-result-prefixes="#all" ldh:order-by-response - + @@ -2130,7 +2261,7 @@ exclude-result-prefixes="#all" - + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl index 5731d47c9..27531e32f 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/functions.xsl @@ -93,7 +93,7 @@ exclude-result-prefixes="#all" - + @@ -494,7 +494,7 @@ exclude-result-prefixes="#all" - + diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/map.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/map.xsl index 0fd72e1b5..8c2a46433 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/map.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/client/map.xsl @@ -122,11 +122,11 @@ exclude-result-prefixes="#all" - - - + + + @@ -142,7 +142,8 @@ exclude-result-prefixes="#all" 'request': $request, 'container': $container, 'container-id': $container-id, - 'block-uri': $block-uri + 'map': $map, + 'initial-load': $initial-load }"/> - - + + - - - + @@ -331,17 +330,21 @@ exclude-result-prefixes="#all" - - + + + + + - - + + + @@ -429,6 +432,9 @@ exclude-result-prefixes="#all" + + +

diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/translations.rdf b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/translations.rdf index a7dfaff31..df130e8df 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/translations.rdf +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/bootstrap/2.3.2/translations.rdf @@ -104,6 +104,10 @@ Geo Geo + + Other + Otro + Files Archivos diff --git a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl index 052116f39..3f0ec1505 100644 --- a/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl +++ b/src/main/webapp/static/com/atomgraph/linkeddatahub/xsl/client.xsl @@ -286,7 +286,7 @@ WHERE - + @@ -323,15 +323,15 @@ WHERE - - + + - + - - - + + +
@@ -481,22 +481,6 @@ WHERE - - - - - -
  • - - - - / - -
  • -
    - @@ -607,9 +591,19 @@ WHERE + + + + + + + + + - - + + + @@ -751,7 +745,7 @@ WHERE - + Application change. Base URI: @@ -842,7 +836,8 @@ WHERE - + + @@ -853,24 +848,22 @@ WHERE - - - - - - - - - - - + + - + - +