diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000000..5f492ea0594 --- /dev/null +++ b/Dockerfile @@ -0,0 +1 @@ +# See `conf/docker` for Docker images diff --git a/conf/docker/build.sh b/conf/docker/build.sh new file mode 100755 index 00000000000..a4828ba607f --- /dev/null +++ b/conf/docker/build.sh @@ -0,0 +1,42 @@ +#!/bin/sh +# Creates images and pushes them to Docker Hub. +# The "kick-the-tires" tag should be relatively stable. No breaking changes. +# Push to custom tags or tags based on branch names to iterate on the images. +if [ -z "$1" ]; then + echo "No argument supplied. Please specify \"branch\" or \"custom my-custom-tag\" for experiments or \"stable\" if your change won't break anything." + exit 1 +fi + +if [ "$1" == 'branch' ]; then + echo "We'll push a tag to the branch you're on." + GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD) + TAG=$GIT_BRANCH +elif [ "$1" == 'stable' ]; then + echo "We'll push a tag to the most stable tag (which isn't saying much!)." + TAG=kick-the-tires +elif [ "$1" == 'custom' ]; then + if [ -z "$1" ]; then + echo "You must provide a custom tag as the second argument." + exit 1 + else + echo "We'll push a custom tag." + TAG=$2 + fi +else + echo "Unexpected argument: $1. Exiting. Run with no arguments for help." + exit 1 +fi +echo Images will be pushed to Docker Hub with the tag \"$TAG\". +# Use "conf" directory as context so we can copy schema.xml into Solr image. +docker build -t iqss/dataverse-solr:$TAG -f solr/Dockerfile ../../conf +docker push iqss/dataverse-solr:$TAG +# TODO: Think about if we really need dataverse.war because it's in dvinstall.zip. +# FIXME: Automate the building of dataverse.war and dvinstall.zip. Think about https://github.com/IQSS/dataverse/issues/3974 and https://github.com/IQSS/dataverse/pull/3975 +cp ../../target/dataverse*.war dataverse-glassfish/dataverse.war +cp ../../scripts/installer/dvinstall.zip dataverse-glassfish +cp ../../doc/sphinx-guides/source/_static/util/default.config dataverse-glassfish +cp ../../downloads/glassfish-4.1.zip dataverse-glassfish +cp ../../downloads/weld-osgi-bundle-2.2.10.Final-glassfish4.jar dataverse-glassfish +docker build -t iqss/dataverse-glassfish:$TAG dataverse-glassfish +# FIXME: Check the output of `docker build` and only push on success. +docker push iqss/dataverse-glassfish:$TAG diff --git a/conf/docker/dataverse-glassfish/.gitignore b/conf/docker/dataverse-glassfish/.gitignore new file mode 100644 index 00000000000..b0e6e38894f --- /dev/null +++ b/conf/docker/dataverse-glassfish/.gitignore @@ -0,0 +1,5 @@ +glassfish-4.1.zip +weld-osgi-bundle-2.2.10.Final-glassfish4.jar +dvinstall.zip +dataverse.war +default.config diff --git a/conf/docker/dataverse-glassfish/Dockerfile b/conf/docker/dataverse-glassfish/Dockerfile new file mode 100644 index 00000000000..939ce98fb72 --- /dev/null +++ b/conf/docker/dataverse-glassfish/Dockerfile @@ -0,0 +1,98 @@ +FROM centos:7.2.1511 +MAINTAINER Dataverse (support@dataverse.org) + +COPY glassfish-4.1.zip /tmp +COPY weld-osgi-bundle-2.2.10.Final-glassfish4.jar /tmp +COPY default.config /tmp +COPY dvinstall.zip /tmp + +# Install dependencies +#RUN yum install -y unzip +RUN yum install -y \ + cronie \ + git \ + java-1.8.0-openjdk-devel \ + nc \ + perl \ + postgresql \ + sha1sum \ + unzip \ + wget + +ENV GLASSFISH_DOWNLOAD_SHA1 d1a103d06682eb08722fbc9a93089211befaa080 +ENV GLASSFISH_DIRECTORY "/usr/local/glassfish4" +ENV HOST_DNS_ADDRESS "localhost" +ENV POSTGRES_DB "dvndb" +ENV POSTGRES_USER "dvnapp" +ENV RSERVE_USER "rserve" +ENV RSERVE_PASSWORD "rserve" + +#RUN ls /tmp +# +RUN find /tmp +# +#RUN exitEarly + +# Install Glassfish 4.1 + +RUN cd /tmp \ + && unzip glassfish-4.1.zip \ + && mv glassfish4 /usr/local \ + && cd /usr/local/glassfish4/glassfish/modules \ + && rm weld-osgi-bundle.jar \ + && cp /tmp/weld-osgi-bundle-2.2.10.Final-glassfish4.jar . \ + #FIXME: Patch Grizzly too! + && echo "Done installing and patching Glassfish" + +RUN chmod g=u /etc/passwd + +RUN mkdir -p /home/glassfish +RUN chgrp -R 0 /home/glassfish && \ + chmod -R g=u /home/glassfish + +RUN mkdir -p /usr/local/glassfish4 +RUN chgrp -R 0 /usr/local/glassfish4 && \ + chmod -R g=u /usr/local/glassfish4 + + +#RUN exitEarlyBeforeJq +RUN yum -y install epel-release +RUN yum install -y jq + +# Install jq +#RUN cd /tmp \ +# && wget https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 \ +# && mv jq-linux64 /usr/local/bin \ +# && chmod +x /usr/local/bin/jq-linux64 \ +# && ln -s /usr/local/bin/jq-linux64 /usr/local/bin/jq + +# Customized persistence xml to avoid database recreation +#RUN mkdir -p /tmp/WEB-INF/classes/META-INF/ +#COPY WEB-INF/classes/META-INF/persistence.xml /tmp/WEB-INF/classes/META-INF/ + +# Install iRods iCommands +#RUN cd /tmp \ +# && yum -y install epel-release \ +# && yum -y install ftp://ftp.renci.org/pub/irods/releases/4.1.6/centos7/irods-icommands-4.1.6-centos7-x86_64.rpm + +#COPY config-glassfish /root/dvinstall +#COPY restart-glassfish /root/dvinstall +#COPY config-dataverse /root/dvinstall + +#RUN cd /root/dvinstall && ./config-dataverse + +COPY ./entrypoint.sh / +#COPY ./ddl /root/dvinstall +#COPY ./init-postgres /root/dvinstall +#COPY ./init-glassfish /root/dvinstall +#COPY ./init-dataverse /root/dvinstall +#COPY ./setup-all.sh /root/dvinstall +#COPY ./setup-irods.sh /root/dvinstall +COPY ./Dockerfile / + +VOLUME /usr/local/glassfish4/glassfish/domains/domain1/files + +EXPOSE 8080 + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["dataverse"] diff --git a/conf/docker/dataverse-glassfish/entrypoint.sh b/conf/docker/dataverse-glassfish/entrypoint.sh new file mode 100755 index 00000000000..bc1b7eb3f93 --- /dev/null +++ b/conf/docker/dataverse-glassfish/entrypoint.sh @@ -0,0 +1,135 @@ +#!/bin/bash -x + +# Entrypoint script for Dataverse web application. This script waits +# for dependent services (Rserve, Postgres, Solr) to start before +# initializing Glassfish. + +echo "whoami before..." +whoami +if ! whoami &> /dev/null; then + if [ -w /etc/passwd ]; then + # Make `whoami` return the glassfish user. # See https://docs.openshift.org/3.6/creating_images/guidelines.html#openshift-origin-specific-guidelines + # Fancy bash magic from https://github.com/RHsyseng/container-rhel-examples/blob/1208dcd7d4f431fc6598184dba6341b9465f4197/starter-arbitrary-uid/bin/uid_entrypoint#L4 + echo "${USER_NAME:-glassfish}:x:$(id -u):0:${USER_NAME:-glassfish} user:/home/glassfish:/bin/bash" >> /etc/passwd + fi +fi +echo "whoami after" +whoami + +set -e + +if [ "$1" = 'dataverse' ]; then + + export GLASSFISH_DIRECTORY=/usr/local/glassfish4 + export HOST_DNS_ADDRESS=localhost + + TIMEOUT=30 + + if [ -n "$RSERVE_SERVICE_HOST" ]; then + RSERVE_HOST=$RSERVE_SERVICE_HOST + elif [ -n "$RSERVE_PORT_6311_TCP_ADDR" ]; then + RSERVE_HOST=$RSERVE_PORT_6311_TCP_ADDR + elif [ -z "$RSERVE_HOST" ]; then + RSERVE_HOST="localhost" + fi + export RSERVE_HOST + + if [ -n "$RSERVE_SERVICE_PORT" ]; then + RSERVE_PORT=$RSERVE_SERVICE_PORT + elif [ -n "$RSERVE_PORT_6311_TCP_PORT" ]; then + RSERVE_PORT=$RSERVE_PORT_6311_TCP_PORT + elif [ -z "$RSERVE_PORT" ]; then + RSERVE_PORT="6311" + fi + export RSERVE_PORT + + echo "Using Rserve at $RSERVE_HOST:$RSERVE_PORT" + + if ncat $RSERVE_HOST $RSERVE_PORT -w $TIMEOUT --send-only < /dev/null > /dev/null 2>&1 ; then + echo Rserve running; + else + echo Optional service Rserve not running. + fi + + + # postgres + if [ -n "$POSTGRES_SERVICE_HOST" ]; then + POSTGRES_HOST=$POSTGRES_SERVICE_HOST + elif [ -n "$POSTGRES_PORT_5432_TCP_ADDR" ]; then + POSTGRES_HOST=$POSTGRES_PORT_5432_TCP_ADDR + elif [ -z "$POSTGRES_HOST" ]; then + POSTGRES_HOST="localhost" + fi + export POSTGRES_HOST + + if [ -n "$POSTGRES_SERVICE_PORT" ]; then + POSTGRES_PORT=$POSTGRES_SERVICE_PORT + elif [ -n "$POSTGRES_PORT_5432_TCP_PORT" ]; then + POSTGRES_PORT=$POSTGRES_PORT_5432_TCP_PORT + else + POSTGRES_PORT=5432 + fi + export POSTGRES_PORT + + echo "Using Postgres at $POSTGRES_HOST:$POSTGRES_PORT" + + if ncat $POSTGRES_HOST $POSTGRES_PORT -w $TIMEOUT --send-only < /dev/null > /dev/null 2>&1 ; then + echo Postgres running; + else + echo Required service Postgres not running. Have you started the required services? + exit 1 + fi + + # solr + if [ -n "$SOLR_SERVICE_HOST" ]; then + SOLR_HOST=$SOLR_SERVICE_HOST + elif [ -n "$SOLR_PORT_8983_TCP_ADDR" ]; then + SOLR_HOST=$SOLR_PORT_8983_TCP_ADDR + elif [ -z "$SOLR_HOST" ]; then + SOLR_HOST="localhost" + fi + export SOLR_HOST + + if [ -n "$SOLR_SERVICE_PORT" ]; then + SOLR_PORT=$SOLR_SERVICE_PORT + elif [ -n "$SOLR_PORT_8983_TCP_PORT" ]; then + SOLR_PORT=$SOLR_PORT_8983_TCP_PORT + else + SOLR_PORT=8983 + fi + export SOLR_PORT + + echo "Using Solr at $SOLR_HOST:$SOLR_PORT" + + if ncat $SOLR_HOST $SOLR_PORT -w $TIMEOUT --send-only < /dev/null > /dev/null 2>&1 ; then + echo Solr running; + else + echo Required service Solr not running. Have you started the required services? + exit 1 + fi + + GLASSFISH_INSTALL_DIR="/usr/local/glassfish4" + cd $GLASSFISH_INSTALL_DIR + cp /tmp/dvinstall.zip $GLASSFISH_INSTALL_DIR + unzip dvinstall.zip + cd dvinstall + echo Copying the non-interactive file into place + cp /tmp/default.config . + echo Looking at first few lines of default.config + head default.config + # non-interactive install + echo Running non-interactive install + #./install -y -f > install.out 2> install.err + ./install -y -f + +# if [ -n "$DVICAT_PORT_1247_TCP_PORT" ]; then +# ./setup-irods.sh +# fi + + echo -e "\n\nDataverse started" + + sleep infinity +else + exec "$@" +fi + diff --git a/conf/docker/postgresql/Dockerfile b/conf/docker/postgresql/Dockerfile new file mode 100644 index 00000000000..81ecf0fdeb8 --- /dev/null +++ b/conf/docker/postgresql/Dockerfile @@ -0,0 +1,3 @@ +# PostgreSQL for Dataverse (but consider switching to the image from CentOS) +# +# See also conf/docker/dataverse-glassfish/Dockerfile diff --git a/conf/docker/solr/Dockerfile b/conf/docker/solr/Dockerfile new file mode 100644 index 00000000000..99114ce6a6d --- /dev/null +++ b/conf/docker/solr/Dockerfile @@ -0,0 +1,28 @@ +FROM centos:7.2.1511 +MAINTAINER Dataverse (support@dataverse.org) + +RUN yum install -y wget unzip perl git java-1.8.0-openjdk-devel postgresql.x86_64 + +# Install Solr 4.6.0 +# The context of the build is the "conf" directory. +COPY solr/4.6.0/schema.xml /tmp + +RUN cd /tmp && wget https://archive.apache.org/dist/lucene/solr/4.6.0/solr-4.6.0.tgz && \ + tar xvzf solr-4.6.0.tgz && \ + mv solr-4.6.0 /usr/local/ && \ + cd /usr/local/solr-4.6.0/example/solr/collection1/conf/ && \ + mv schema.xml schema.xml.backup && \ + cp /tmp/schema.xml . && \ + rm /tmp/solr-4.6.0.tgz + +RUN ln -s /usr/local/solr-4.6.0/example/logs /var/log/solr + +VOLUME /usr/local/solr-4.6.0/example/solr/collection1/data + +EXPOSE 8983 + +COPY docker/solr/Dockerfile /Dockerfile +COPY docker/solr/entrypoint.sh / + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["solr"] diff --git a/conf/docker/solr/entrypoint.sh b/conf/docker/solr/entrypoint.sh new file mode 100755 index 00000000000..7fd8d6380c2 --- /dev/null +++ b/conf/docker/solr/entrypoint.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ "$1" = 'solr' ]; then + cd /usr/local/solr-4.6.0/example/ + java -jar start.jar +elif [ "$1" = 'usage' ]; then + echo 'docker run -d iqss/dataverse-solr solr' +else + exec "$@" +fi diff --git a/conf/openshift/openshift.json b/conf/openshift/openshift.json new file mode 100644 index 00000000000..ec0442d401c --- /dev/null +++ b/conf/openshift/openshift.json @@ -0,0 +1,237 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "dataverse", + "labels": { + "name": "dataverse" + }, + "annotations": { + "openshift.io/description": "Dataverse is open source research data repository software: https://dataverse.org", + "openshift.io/display-name": "Dataverse" + } + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "dataverse-glassfish-service" + }, + "spec": { + "selector": { + "name": "iqss-dataverse-glassfish" + }, + "ports": [ + { + "name": "web", + "protocol": "TCP", + "port": 8080, + "targetPort": 8080 + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "dataverse-plus-glassfish" + }, + "spec": { + "dockerImageRepository": "iqss/dataverse-glassfish" + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "centos-postgresql-94-centos7" + }, + "spec": { + "dockerImageRepository": "centos/postgresql-94-centos7" + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "iqss-dataverse-solr" + }, + "spec": { + "dockerImageRepository": "iqss/dataverse-solr" + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "deploy-dataverse-glassfish", + "annotations": { + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "template": { + "metadata": { + "labels": { + "name": "iqss-dataverse-glassfish" + } + }, + "spec": { + "containers": [ + { + "name": "dataverse-plus-glassfish", + "image": "dataverse-plus-glassfish", + "ports": [ + { + "containerPort": 8080, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "ADMIN_PASSWORD", + "value": "admin" + }, + { + "name": "SMTP_HOST", + "value": "localhost" + }, + { + "name": "POSTGRES_USER", + "value": "dvnapp" + }, + { + "name": "POSTGRES_PASSWORD", + "value": "dvnappPassword" + }, + { + "name": "POSTGRES_DATABASE", + "value": "dvndb" + } + ], + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "capabilities": {}, + "privileged": false + } + }, + { + "name": "centos-postgresql-94-centos7", + "image": "centos-postgresql-94-centos7", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "POSTGRESQL_USER", + "value": "pgUserValue" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "pgPasswordValue" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "pgDatabaseValue" + } + ], + "resources": { + "limits": { + "memory": "256Mi" + } + }, + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "capabilities": {}, + "privileged": false + } + }, + { + "name": "iqss-dataverse-solr", + "image": "iqss-dataverse-solr", + "ports": [ + { + "containerPort": 8983, + "protocol": "TCP" + } + ], + "resources": { + "limits": { + "memory": "256Mi" + } + }, + "imagePullPolicy": "IfNotPresent", + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ] + } + }, + "strategy": { + "type": "Rolling", + "rollingParams": { + "updatePeriodSeconds": 1, + "intervalSeconds": 1, + "timeoutSeconds": 300 + }, + "resources": {} + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "dataverse-plus-glassfish" + ], + "from": { + "kind": "ImageStreamTag", + "name": "dataverse-plus-glassfish:kick-the-tires" + } + } + }, + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "centos-postgresql-94-centos7" + ], + "from": { + "kind": "ImageStreamTag", + "name": "centos-postgresql-94-centos7:latest" + } + } + }, + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "iqss-dataverse-solr" + ], + "from": { + "kind": "ImageStreamTag", + "name": "iqss-dataverse-solr:kick-the-tires" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "iqss-dataverse-glassfish" + } + } + } + ] +} diff --git a/doc/sphinx-guides/source/_static/api/dataset-update-metadata.json b/doc/sphinx-guides/source/_static/api/dataset-update-metadata.json new file mode 100644 index 00000000000..6e499d4e164 --- /dev/null +++ b/doc/sphinx-guides/source/_static/api/dataset-update-metadata.json @@ -0,0 +1,86 @@ +{ + "metadataBlocks": { + "citation": { + "displayName": "Citation Metadata", + "fields": [ + { + "typeName": "title", + "multiple": false, + "typeClass": "primitive", + "value": "newTitle" + }, + { + "typeName": "author", + "multiple": true, + "typeClass": "compound", + "value": [ + { + "authorName": { + "typeName": "authorName", + "multiple": false, + "typeClass": "primitive", + "value": "Spruce, Sabrina" + } + } + ] + }, + { + "typeName": "datasetContact", + "multiple": true, + "typeClass": "compound", + "value": [ + { + "datasetContactName": { + "typeName": "datasetContactName", + "multiple": false, + "typeClass": "primitive", + "value": "Spruce, Sabrina" + }, + "datasetContactEmail": { + "typeName": "datasetContactEmail", + "multiple": false, + "typeClass": "primitive", + "value": "spruce@mailinator.com" + } + } + ] + }, + { + "typeName": "dsDescription", + "multiple": true, + "typeClass": "compound", + "value": [ + { + "dsDescriptionValue": { + "typeName": "dsDescriptionValue", + "multiple": false, + "typeClass": "primitive", + "value": "test" + } + } + ] + }, + { + "typeName": "subject", + "multiple": true, + "typeClass": "controlledVocabulary", + "value": [ + "Other" + ] + }, + { + "typeName": "depositor", + "multiple": false, + "typeClass": "primitive", + "value": "Spruce, Sabrina" + }, + { + "typeName": "dateOfDeposit", + "multiple": false, + "typeClass": "primitive", + "value": "2017-04-19" + } + ] + } + } +} diff --git a/doc/sphinx-guides/source/_static/util/check_timer.bash b/doc/sphinx-guides/source/_static/util/check_timer.bash new file mode 100755 index 00000000000..e75ea686496 --- /dev/null +++ b/doc/sphinx-guides/source/_static/util/check_timer.bash @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# example monitoring script for EBJ timers. +# currently assumes that there are two timers +# real monitoring commands should replace the echo statements for production use + +r0=`curl -s http://localhost:8080/ejb-timer-service-app/timer` + +if [ $? -ne 0 ]; then + echo "alert - no timer service" # put real alert command here +fi + +r1=`echo $r0 | grep -c "There are 2 active persistent timers on this container"` + +if [ "1" -ne "$r1" ]; then + echo "alert - no active timers" # put real alert command here +fi + diff --git a/doc/sphinx-guides/source/_static/util/createsequence.sql b/doc/sphinx-guides/source/_static/util/createsequence.sql index 2af1e06d45d..2677832abd8 100644 --- a/doc/sphinx-guides/source/_static/util/createsequence.sql +++ b/doc/sphinx-guides/source/_static/util/createsequence.sql @@ -19,7 +19,7 @@ CACHE 1; ALTER TABLE datasetidentifier_seq OWNER TO "dvnapp"; --- And now create a PostgresQL FUNCTION, for JPA to +-- And now create a PostgreSQL FUNCTION, for JPA to -- access as a NamedStoredProcedure: CREATE OR REPLACE FUNCTION generateIdentifierAsSequentialNumber( diff --git a/doc/sphinx-guides/source/admin/monitoring.rst b/doc/sphinx-guides/source/admin/monitoring.rst index 5e2eb95abca..d2c98e39a31 100644 --- a/doc/sphinx-guides/source/admin/monitoring.rst +++ b/doc/sphinx-guides/source/admin/monitoring.rst @@ -9,3 +9,7 @@ In production you'll want to monitor the usual suspects such as CPU, memory, fre https://github.com/IQSS/dataverse/issues/2595 contains some information on enabling monitoring of Glassfish, which is disabled by default. There is a database table called ``actionlogrecord`` that captures events that may be of interest. See https://github.com/IQSS/dataverse/issues/2729 for more discussion around this table. + +Should you be interested in monitoring the EJB timers, this script may be used as an example: + +.. literalinclude:: ../_static/util/check_timer.bash diff --git a/doc/sphinx-guides/source/admin/timers.rst b/doc/sphinx-guides/source/admin/timers.rst index f118604654b..3c1ff40f935 100644 --- a/doc/sphinx-guides/source/admin/timers.rst +++ b/doc/sphinx-guides/source/admin/timers.rst @@ -24,21 +24,23 @@ The following JVM option instructs the application to act as the dedicated timer **IMPORTANT:** Note that this option is automatically set by the Dataverse installer script. That means that when **configuring a multi-server cluster**, it will be the responsibility of the installer to remove the option from the :fixedwidthplain:`domain.xml` of every node except the one intended to be the timer server. We also recommend that the following entry in the :fixedwidthplain:`domain.xml`: ```` is changed back to ```` on all the non-timer server nodes. Similarly, this option is automatically set by the installer script. Changing it back to the default setting on a server that doesn't need to run the timer will prevent a potential race condition, where multiple servers try to get a lock on the timer database. +**Note** that for the timer to work, the version of the PostgreSQL JDBC driver your instance is using must match the version of your PostgreSQL database. See the 'Timer not working' section of the :doc:`/admin/troubleshooting` guide. + Harvesting Timers ----------------- These timers are created when scheduled harvesting is enabled by a local admin user (via the "Manage Harvesting Clients" page). -In a multi-node cluster, all these timers will be created on the dedicated timer node (and not necessarily on the node where the harvesting clients was created and/or saved). +In a multi-node cluster, all these timers will be created on the dedicated timer node (and not necessarily on the node where the harvesting clients were created and/or saved). -A timer will be automatically removed, when a harvesting client with an active schedule is deleted, or if the schedule is turned off for an existing client. +A timer will be automatically removed when a harvesting client with an active schedule is deleted, or if the schedule is turned off for an existing client. Metadata Export Timer --------------------- This timer is created automatically whenever the application is deployed or restarted. There is no admin user-accessible configuration for this timer. -This timer runs a daily job that tries to export all the local, published datasets that haven't been exported yet, in all the supported metdata formats, and cache the results on the filesystem. (Note that, normally, an export will happen automatically whenever a dataset is published. So this scheduled job is there to catch any datasets for which that export did not succeed, for one reason or another). Also, since this functionality has been added in version 4.5: if you are upgrading from a previous version, none of your datasets are exported yet. So the first time this job runs, it will attempt to export them all. +This timer runs a daily job that tries to export all the local, published datasets that haven't been exported yet, in all supported metadata formats, and cache the results on the filesystem. (Note that normally an export will happen automatically whenever a dataset is published. This scheduled job is there to catch any datasets for which that export did not succeed, for one reason or another). Also, since this functionality has been added in version 4.5: if you are upgrading from a previous version, none of your datasets are exported yet. So the first time this job runs, it will attempt to export them all. This daily job will also update all the harvestable OAI sets configured on your server, adding new and/or newly published datasets or marking deaccessioned datasets as "deleted" in the corresponding sets as needed. @@ -47,4 +49,4 @@ This job is automatically scheduled to run at 2AM local time every night. If rea Known Issues ------------ -We've got several reports of an intermittent issue where the applicaiton fails to deploy with the error message "EJB Timer Service is not available." Please see the :doc:`/admin/troubleshooting` section of this guide for a workaround. \ No newline at end of file +We've received several reports of an intermittent issue where the application fails to deploy with the error message "EJB Timer Service is not available." Please see the :doc:`/admin/troubleshooting` section of this guide for a workaround. diff --git a/doc/sphinx-guides/source/admin/troubleshooting.rst b/doc/sphinx-guides/source/admin/troubleshooting.rst index fb7ed8a8326..662060b7438 100644 --- a/doc/sphinx-guides/source/admin/troubleshooting.rst +++ b/doc/sphinx-guides/source/admin/troubleshooting.rst @@ -38,3 +38,23 @@ Note that it may or may not work on your system, so it is provided as an example .. literalinclude:: ../_static/util/clear_timer.sh +Timer not working +----------------- + +Dataverse relies on EJB timers to perform scheduled tasks: harvesting from remote servers, updating the local OAI sets and running metadata exports. (See :doc:`timers` for details.) If these scheduled jobs are not running on your server, this may be the result of the incompatibility between the version of PostgreSQL database you are using, and PostgreSQL JDBC driver in use by your instance of Glassfish. The symptoms: + +If you are seeing the following in your server.log... + +:fixedwidthplain:`Handling timeout on` ... + +followed by an Exception stack trace with these lines in it: + +:fixedwidthplain:`Internal Exception: java.io.StreamCorruptedException: invalid stream header` ... + +:fixedwidthplain:`Exception Description: Could not deserialize object from byte array` ... + + +... it most likely means that it is the JDBC driver incompatibility that's preventing the timer from working correctly. +Make sure you install the correct version of the driver. For example, if you are running the version 9.3 of PostgreSQL, make sure you have the driver postgresql-9.3-1104.jdbc4.jar in your :fixedwidthplain:`/glassfish/lib` directory. Go `here `_ +to download the correct version of the driver. If you have an older driver in glassfish/lib, make sure to remove it, replace it with the new version and restart Glassfish. (You may need to remove the entire contents of :fixedwidthplain:`/glassfish/domains/domain1/generated` before you start Glassfish). + diff --git a/doc/sphinx-guides/source/api/native-api.rst b/doc/sphinx-guides/source/api/native-api.rst index 7cce55b81db..756966a610c 100644 --- a/doc/sphinx-guides/source/api/native-api.rst +++ b/doc/sphinx-guides/source/api/native-api.rst @@ -437,13 +437,6 @@ Place this ``user-add.json`` file in your current directory and run the followin curl -d @user-add.json -H "Content-type:application/json" "$SERVER_URL/api/builtin-users?password=$NEWUSER_PASSWORD&key=$BUILTIN_USERS_KEY" -Retrieving the API Token of a Builtin User -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To retrieve the API token of a builtin user, given that user's password, use the curl command below:: - - curl "$SERVER_URL/api/builtin-users/$DV_USER_NAME/api-token?password=$DV_USER_PASSWORD" - Roles ~~~~~ diff --git a/doc/sphinx-guides/source/conf.py b/doc/sphinx-guides/source/conf.py index 0efeed88168..4f40290c5cf 100755 --- a/doc/sphinx-guides/source/conf.py +++ b/doc/sphinx-guides/source/conf.py @@ -64,9 +64,9 @@ # built documents. # # The short X.Y version. -version = '4.8.1' +version = '4.8.2' # The full version, including alpha/beta/rc tags. -release = '4.8.1' +release = '4.8.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/sphinx-guides/source/developers/dev-environment.rst b/doc/sphinx-guides/source/developers/dev-environment.rst index 167cc7ba153..73cf0925549 100755 --- a/doc/sphinx-guides/source/developers/dev-environment.rst +++ b/doc/sphinx-guides/source/developers/dev-environment.rst @@ -29,7 +29,7 @@ As a `Java Enterprise Edition `_. +These guides are for the most recent version of Dataverse. For the guides for **version 4.8.1** please go `here `_. .. toctree:: :glob: diff --git a/doc/sphinx-guides/source/installation/config.rst b/doc/sphinx-guides/source/installation/config.rst index 31bbed45c07..90db8d55afb 100644 --- a/doc/sphinx-guides/source/installation/config.rst +++ b/doc/sphinx-guides/source/installation/config.rst @@ -598,6 +598,11 @@ dataverse.handlenet.admprivphrase +++++++++++++++++++++++++++++++++ This JVM setting is also part of **handles** configuration. The Handle.Net installer lets you choose whether to encrypt the admcredfile private key or not. If you do encrypt it, this is the pass phrase that it's encrypted with. +dataverse.timerServer ++++++++++++++++++++++ + +This JVM option is only relevant if you plan to run multiple Glassfish servers for redundancy. Only one Glassfish server can act as the dedicated timer server and for details on promoting or demoting a Glassfish server to handle this responsibility, see :doc:`/admin/timers`. + Database Settings ----------------- @@ -1204,3 +1209,10 @@ You can replace the default dataset metadata fields that are displayed above fil ``curl http://localhost:8080/api/admin/settings/:CustomDatasetSummaryFields -X PUT -d 'producer,subtitle,alternativeTitle'`` You have to put the datasetFieldType name attribute in the :CustomDatasetSummaryFields setting for this to work. + +:AllowApiTokenLookupViaApi +++++++++++++++++++++++++++ + +Dataverse 4.8.1 and below allowed API Token lookup via API but for better security this has been disabled by default. Set this to true if you really want the old behavior. + +``curl -X PUT -d 'true' http://localhost:8080/api/admin/settings/:AllowApiTokenLookupViaApi`` diff --git a/doc/sphinx-guides/source/installation/prep.rst b/doc/sphinx-guides/source/installation/prep.rst index 9662b5c40b6..035106e62be 100644 --- a/doc/sphinx-guides/source/installation/prep.rst +++ b/doc/sphinx-guides/source/installation/prep.rst @@ -14,6 +14,13 @@ We'll try to get you up and running as quickly as possible, but we thought you m Choose Your Own Installation Adventure -------------------------------------- +NDS Labs Workbench (for Testing Only) ++++++++++++++++++++++++++++++++++++++ + +The National Data Service (NDS) is community-driven effort guided by the National Data Service Consortium. NDS Labs has packaged Dataverse as `one of many data management tools `_ that can be quickly deployed for evaluation purposes in their tool based on Kubernetes called NDS Labs Workbench. To get started, visit http://www.nationaldataservice.org/projects/labs.html . + +Please note that the version of Dataverse in NDS Labs Workbench may lag behind the latest release. Craig Willis from NDS Labs did an excellent job of adding Dataverse 4 to NDS Labs Workbench and the Dataverse team hopes to some day take over the creation of Docker images so the latest version of Dataverse can be evaluated in the workbench. + Vagrant (for Testing Only) ++++++++++++++++++++++++++ diff --git a/doc/sphinx-guides/source/user/data-exploration/worldmap.rst b/doc/sphinx-guides/source/user/data-exploration/worldmap.rst index fe6be851be8..fe2a6e25785 100644 --- a/doc/sphinx-guides/source/user/data-exploration/worldmap.rst +++ b/doc/sphinx-guides/source/user/data-exploration/worldmap.rst @@ -9,7 +9,7 @@ WorldMap: Geospatial Data Exploration Dataverse and WorldMap ====================== -`WorldMap `_ is developed by the Center for Geographic Analysis (CGA) at Harvard and is open source software that helps researchers visualize and explore their data in maps. The WorldMap and Dataverse collaboration allows researchers to upload shapefiles or tabular files to Dataverse for long term storage and receive a persistent identifier (through DOI), then easily navigate into WorldMap to interact with the data and save to WorldMap as well. +`WorldMap `_ is developed by the Center for Geographic Analysis (CGA) at Harvard and is open source software that helps researchers visualize and explore their data in maps. The WorldMap and Dataverse collaboration allows researchers to upload shapefiles or tabular files to Dataverse for long term storage and receive a persistent identifier (through DOI), then easily navigate into WorldMap to interact with the data. Note: WorldMap hosts their own `user guide `_ that covers some of the same material as this page. @@ -33,11 +33,15 @@ Once you have uploaded your .zip shapefile, a Map Data button will appear next t To get started with visualizing your shapefile, click on the blue "Visualize on WorldMap" button in Geoconnect. It may take up to 45 seconds for the data to be sent to WorldMap and then back to Geoconnect. -Once this process has finished, you will be taken to a new page where you can style your map through Attribute, Classification Method, Number of Intervals, and Colors. Clicking "View on WorldMap" will open WorldMap in a new tab, allowing you to see how your map will be displayed there. +Once this process has finished, you will be taken to a new page where you can style your map through Attribute, Classification Method, Number of Intervals, and Colors. Clicking "Apply Changes" will send your map to both Dataverse and WorldMap, creating a preview of your map that will be visible on your file page and your dataset page. -After styling your map, you can either save it by clicking "Return to Dataverse" or delete it with the "Delete" button. If you decide to delete the map, it will no longer appear on WorldMap. Returning to Dataverse will send the styled map layer to both Dataverse and WorldMap. A preview of your map will now be visible on your file page and your dataset page. +Clicking "View on WorldMap" will open WorldMap in a new tab, allowing you to see how your map will be displayed there. -To replace your shapefile's map with a new one, simply click the Map Data button again. +You can delete your map with the "Delete" button. If you decide to delete the map, it will no longer appear on WorldMap, and your dataset in Dataverse will no longer display the map preview. + +When you're satisfied with your map, you may click "Return to the Dataverse" to go back to Dataverse. + +In the future, to replace your shapefile's map with a new one, simply click the Map Data button on the dataset or file page to return to the Geoconnect edit map page. Mapping tabular files with Geoconnect ===================================== @@ -121,9 +125,9 @@ Now that you have created your map: - Dataverse will contain a preview of the map and links to the larger version on WorldMap. -The map editor (pictured above) provides a set of options you can use to style your map. The "Return to the Dataverse" button saves your map and brings you back to Dataverse. "View on WorldMap" takes you to the map's page on WorldMap, which offers additional views and options. +The map editor (pictured above) provides a set of options you can use to style your map. Clicking "Apply Changes" saves the current version of your map to Dataverse and Worldmap. The "Return to the Dataverse" button brings you back to Dataverse. "View on WorldMap" takes you to the map's page on WorldMap, which offers additional views and options. -If you'd like to make future changes to your map, you can return to the editor by clicking the "Map Data" button on your file. +If you'd like to make further changes to your map in the future, you can return to the editor by clicking the "Map Data" button on your file. Removing your map ================= diff --git a/doc/sphinx-guides/source/user/dataset-management.rst b/doc/sphinx-guides/source/user/dataset-management.rst index cf6465e3e55..f0e9bd8a6e5 100755 --- a/doc/sphinx-guides/source/user/dataset-management.rst +++ b/doc/sphinx-guides/source/user/dataset-management.rst @@ -225,14 +225,14 @@ your own custom Terms of Use for your Datasets. Custom Terms of Use for Datasets -------------------------------- -If you are unable to use a CC0 waiver for your datasets you are able to set your own custom terms of use. To do so, select "No, do not apply CC0 - "Public Domain Dedication" and a Terms of Use textbox will show up allowing you to enter your own custom terms of use for your dataset. To add more information about the Terms of Use, click on "Additional Information \[+]". +If you are unable to use a CC0 waiver for your datasets you are able to set your own custom terms of use. To do so, select "No, do not apply CC0 - "Public Domain Dedication" and a Terms of Use textbox will show up allowing you to enter your own custom terms of use for your dataset. To add more information about the Terms of Use, we have provided fields like Special Permissions, Restrictions, Citation Requirements, etc. Here is an `example of a Data Usage Agreement `_ for datasets that have de-identified human subject data. Restricted Files + Terms of Access ---------------------------------- -If you restrict any files in your dataset, you will be prompted by a pop-up to enter Terms of Access for the data. This can also be edited in the Terms tab or selecting Terms in the "Edit" dropdown button in the dataset. You may also allow users to request access for your restricted files by enabling "Request Access". To add more information about the Terms of Access, click on "Additional Information \[+]". +If you restrict any files in your dataset, you will be prompted by a pop-up to enter Terms of Access for the data. This can also be edited in the Terms tab or selecting Terms in the "Edit" dropdown button in the dataset. You may also allow users to request access for your restricted files by enabling "Request Access". To add more information about the Terms of Access, we have provided fields like Data Access Place, Availability Status, Contact for Access, etc. **Note:** Some Dataverse installations do not allow for file restriction. diff --git a/pom.xml b/pom.xml index 06da0203cf1..50782bab444 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ edu.harvard.iq dataverse - 4.8.1 + 4.8.2 war dataverse diff --git a/scripts/api/setup-optional-harvard.sh b/scripts/api/setup-optional-harvard.sh index 1763fc33adf..a5553a64197 100755 --- a/scripts/api/setup-optional-harvard.sh +++ b/scripts/api/setup-optional-harvard.sh @@ -36,7 +36,7 @@ echo "- Enabling Geoconnect" curl -s -X PUT -d true "$SERVER/admin/settings/:GeoconnectCreateEditMaps" curl -s -X PUT -d true "$SERVER/admin/settings/:GeoconnectViewMaps" echo "- Setting system email" -curl -X PUT -d "Harvard Dataverse Support " http://localhost:8080/api/admin/settings/:SystemEmail +curl -X PUT -d "Harvard Dataverse Support " http://localhost:8080/api/admin/settings/:SystemEmail curl -X PUT -d ", The President & Fellows of Harvard College" http://localhost:8080/api/admin/settings/:FooterCopyright echo "- Setting up the Harvard Shibboleth institutional group" curl -s -X POST -H 'Content-type:application/json' --upload-file data/shibGroupHarvard.json "$SERVER/admin/groups/shib?key=$adminKey" diff --git a/scripts/installer/install b/scripts/installer/install index a620cb00eaa..9edb8d61059 100755 --- a/scripts/installer/install +++ b/scripts/installer/install @@ -155,15 +155,14 @@ my $API_URL = "http://localhost:8080/api"; # doesn't get paranoid) my %POSTGRES_DRIVERS = ( - # "8_4", "postgresql-8.3-603.jdbc4.jar", "8_4", "postgresql-8.4-703.jdbc4.jar", "9_0", "postgresql-9.0-802.jdbc4.jar", "9_1", "postgresql-9.1-902.jdbc4.jar", - "9_2", "postgresql-9.1-902.jdbc4.jar", - "9_3", "postgresql-9.1-902.jdbc4.jar", - "9_4", "postgresql-9.1-902.jdbc4.jar", - "9_5", "postgresql-9.1-902.jdbc4.jar", - "9_6", "postgresql-9.1-902.jdbc4.jar" + "9_2", "postgresql-9.2-1004.jdbc4.jar", + "9_3", "postgresql-9.3-1104.jdbc4.jar", + "9_4", "postgresql-9.4.1212.jar", + "9_5", "postgresql-42.1.4.jar", + "9_6", "postgresql-42.1.4.jar" ); # A few preliminary checks: @@ -967,6 +966,35 @@ if ( -e "/proc/meminfo" && open MEMINFO, "/proc/meminfo" ) { close MEMINFO; +# TODO: Figure out how to determine the amount of memory when running in Docker +# because we're wondering if Dataverse can run in the free OpenShift Online +# offering that only gives you 1 GB of memory. Obviously, if this is someone's +# first impression of Dataverse, we want to to run well! What if you try to +# ingest a large file or perform other memory-intensive operations? For more +# context, see https://github.com/IQSS/dataverse/issues/4040#issuecomment-331282286 + if ( -e "/sys/fs/cgroup/memory/memory.limit_in_bytes" && open CGROUPMEM, "/sys/fs/cgroup/memory/memory.limit_in_bytes" ) { + print "We must be running in Docker! Fancy!\n"; + while ( my $limitline = ) { + # The goal of this cgroup check is for + # "Setting the heap limit for Glassfish to 750MB" + # to change to some other value, based on memory available. + print "/sys/fs/cgroup/memory/memory.limit_in_bytes: $limitline\n"; + my $limit_in_kb = $limitline / 1024; + print "Docker limit_in_kb = $limit_in_kb but ignoring\n"; + # In openshift.json, notice how PostgreSQL and Solr have + # resources.limits.memory set to "256Mi". + # If you try to give the Dataverse/Glassfish container twice + # as much memory (512 MB) and allow $sys_mem_total to + # be set below, you should see the following: + # "Setting the heap limit for Glassfish to 192MB." + # FIXME: dataverse.war will not deploy with only 512 GB of memory. + # Again, the goal is 1 GB total (512MB + 256MB + 256MB) for + # Glassfish, PostgreSQL, and Solr to fit in the free OpenShift tier. + #print "setting sys_mem_total to: $limit_in_kb\n"; + #$sys_mem_total = $limit_in_kb; + } + close CGROUPMEM; + } } elsif ( -x "/usr/sbin/sysctl" ) { # MacOS X, probably... diff --git a/scripts/installer/pgdriver/postgresql-42.1.4.jar b/scripts/installer/pgdriver/postgresql-42.1.4.jar new file mode 100644 index 00000000000..08a54b105f8 Binary files /dev/null and b/scripts/installer/pgdriver/postgresql-42.1.4.jar differ diff --git a/scripts/installer/pgdriver/postgresql-9.2-1004.jdbc4.jar b/scripts/installer/pgdriver/postgresql-9.2-1004.jdbc4.jar new file mode 100644 index 00000000000..b9270d21b21 Binary files /dev/null and b/scripts/installer/pgdriver/postgresql-9.2-1004.jdbc4.jar differ diff --git a/scripts/installer/pgdriver/postgresql-9.3-1104.jdbc4.jar b/scripts/installer/pgdriver/postgresql-9.3-1104.jdbc4.jar new file mode 100644 index 00000000000..a79525d7a00 Binary files /dev/null and b/scripts/installer/pgdriver/postgresql-9.3-1104.jdbc4.jar differ diff --git a/scripts/installer/pgdriver/postgresql-9.4.1212.jar b/scripts/installer/pgdriver/postgresql-9.4.1212.jar new file mode 100644 index 00000000000..b0de752d880 Binary files /dev/null and b/scripts/installer/pgdriver/postgresql-9.4.1212.jar differ diff --git a/scripts/search/data/tabular/50by1000.dta.zip b/scripts/search/data/tabular/50by1000.dta.zip new file mode 100644 index 00000000000..4280a0608fa Binary files /dev/null and b/scripts/search/data/tabular/50by1000.dta.zip differ diff --git a/src/main/java/Bundle.properties b/src/main/java/Bundle.properties index dc0219f519c..a4e3fa63c58 100755 --- a/src/main/java/Bundle.properties +++ b/src/main/java/Bundle.properties @@ -158,7 +158,7 @@ notification.createDataset={0} was created in {1}. To learn more about what you notification.dataset.management.title=Dataset Management - Dataset User Guide notification.wasSubmittedForReview={0} was submitted for review to be published in {1}. Don''t forget to publish it or send it back to the contributor\! notification.wasReturnedByReviewer={0} was returned by the curator of {1}. -notification.wasPublished={0}, was published in {1}. +notification.wasPublished={0} was published in {1}. notification.worldMap.added={0}, dataset had WorldMap layer data added to it. notification.maplayer.deletefailed=Failed to delete the map layer associated with the restricted file {0} from WorldMap. Please try again, or contact WorldMap and/or Dataverse support. (Dataset: {1}) notification.generic.objectDeleted=The dataverse, dataset, or file for this notification has been deleted. @@ -1117,9 +1117,9 @@ dataset.publish.header=Publish Dataset dataset.rejectBtn=Return to Author dataset.submitBtn=Submit for Review dataset.disabledSubmittedBtn=Submitted for Review -dataset.submitMessage=Submit this dataset for review by the curator of this dataverse for possible publishing. +dataset.submitMessage=You will not be able to make changes to this dataset while it is in review. dataset.submit.success=Your dataset has been submitted for review. -dataset.inreview.infoMessage=This dataset has been submitted for review. +dataset.inreview.infoMessage=\u2013 This dataset is currently under review prior to publication. dataset.submit.failure=Dataset Submission Failed - {0} dataset.submit.failure.null=Can't submit for review. Dataset is null. dataset.submit.failure.isReleased=Latest version of dataset is already released. Only draft versions can be submitted for review. @@ -1155,6 +1155,7 @@ dataset.share.datasetShare=Share Dataset dataset.share.datasetShare.tip=Share this dataset on your favorite social media networks. dataset.share.datasetShare.shareText=View this dataset. dataset.locked.message=Dataset Locked +dataset.locked.inReview.message=Submitted for Review dataset.publish.error=This dataset may not be published because the {0} Service is currently inaccessible. Please try again. Does the issue continue to persist? dataset.publish.error.doi=This dataset may not be published because the DOI update failed. dataset.delete.error=Could not deaccession the dataset because the {0} update failed. @@ -1183,7 +1184,10 @@ dataset.asterisk.tip=Asterisks indicate required fields dataset.message.uploadFiles=Upload Dataset Files - You can drag and drop files from your desktop, directly into the upload widget. dataset.message.editMetadata=Edit Dataset Metadata - Add more metadata about this dataset to help others easily find it. dataset.message.editTerms=Edit Dataset Terms - Update this dataset's terms of use. -dataset.message.locked=Dataset Locked +dataset.message.locked.editNotAllowedInReview=Dataset cannot be edited due to In Review dataset lock. +dataset.message.locked.downloadNotAllowedInReview=Dataset file(s) may not be downloaded due to In Review dataset lock. +dataset.message.locked.downloadNotAllowed=Dataset file(s) may not be downloaded due to dataset lock. +dataset.message.locked.editNotAllowed=Dataset cannot be edited due to dataset lock. dataset.message.createSuccess=This dataset has been created. dataset.message.linkSuccess= {0} has been successfully linked to {1}. dataset.message.metadataSuccess=The metadata for this dataset has been updated. @@ -1275,6 +1279,7 @@ file.restrict=Restrict file.unrestrict=Unrestrict file.restricted.success=Files "{0}" will be restricted once you click on the Save Changes button. file.download.header=Download +file.download.subset.header=Download Data Subset file.preview=Preview: file.previewMap=Preview Map:o file.fileName=File Name @@ -1519,10 +1524,10 @@ file.results.btn.sort.option.oldest=Oldest file.results.btn.sort.option.size=Size file.results.btn.sort.option.type=Type file.compute.fileRestricted=File Restricted -file.compute.fileAccessDenied=You cannot compute on this restricted file because you don't have permission to access it. +file.compute.fileAccessDenied=You cannot compute on this restricted file because you do not have permission to access it. dataset.compute.datasetCompute=Dataset Compute Not Supported -dataset.compute.datasetAccessDenied=You cannot compute on this dataset because you don't have permission to access all of the restricted files. -dataset.compute.datasetComputeDisabled=You cannot compute on this dataset because this functionality is not enabled yet. Please click on a file to access computing capalibities. +dataset.compute.datasetAccessDenied=You cannot compute on this dataset because you do not have permission to access all of the restricted files. +dataset.compute.datasetComputeDisabled=You cannot compute on this dataset because this functionality is not enabled yet. Please click on a file to access computing features. # dataset-widgets.xhtml dataset.widgets.title=Dataset Thumbnail + Widgets diff --git a/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterService.java b/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterService.java index 4224b565159..17a6b1759eb 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterService.java +++ b/src/main/java/edu/harvard/iq/dataverse/DOIDataCiteRegisterService.java @@ -15,11 +15,9 @@ import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; -import javax.annotation.PreDestroy; import javax.ejb.Stateless; import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; -import javax.persistence.Query; import javax.persistence.TypedQuery; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; @@ -57,7 +55,7 @@ public String createIdentifier(String identifier, HashMap metada metadataTemplate.setPublisherYear(metadata.get("datacite.publicationyear")); String xmlMetadata = metadataTemplate.generateXML(); - logger.fine("XML to send to DataCite: " + xmlMetadata); + logger.log(Level.FINE, "XML to send to DataCite: {0}", xmlMetadata); String status = metadata.get("_status").trim(); String target = metadata.get("_target"); @@ -92,8 +90,14 @@ public String createIdentifier(String identifier, HashMap metada try (DataCiteRESTfullClient client = openClient()) { retString = client.postMetadata(xmlMetadata); client.postUrl(identifier.substring(identifier.indexOf(":") + 1), target); + } catch (UnsupportedEncodingException ex) { - Logger.getLogger(DOIDataCiteRegisterService.class.getName()).log(Level.SEVERE, null, ex); + logger.log(Level.SEVERE, null, ex); + + } catch ( RuntimeException rte ) { + logger.log(Level.SEVERE, "Error creating DOI at DataCite: {0}", rte.getMessage()); + logger.log(Level.SEVERE, "Exception", rte); + } } } else if (status.equals("unavailable")) { diff --git a/src/main/java/edu/harvard/iq/dataverse/DataCiteRESTfullClient.java b/src/main/java/edu/harvard/iq/dataverse/DataCiteRESTfullClient.java index 93607a56541..a329f663fb5 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DataCiteRESTfullClient.java +++ b/src/main/java/edu/harvard/iq/dataverse/DataCiteRESTfullClient.java @@ -169,11 +169,11 @@ public boolean testDOIExists(String doi) { * @param metadata * @return */ - public String postMetadata(String metadata) throws UnsupportedEncodingException { + public String postMetadata(String metadata) { HttpPost httpPost = new HttpPost(this.url + "/metadata"); httpPost.setHeader("Content-Type", "application/xml;charset=UTF-8"); - httpPost.setEntity(new StringEntity(metadata, "utf-8")); try { + httpPost.setEntity(new StringEntity(metadata, "utf-8")); HttpResponse response = httpClient.execute(httpPost,context); String data = EntityUtils.toString(response.getEntity(), encoding); @@ -183,6 +183,7 @@ public String postMetadata(String metadata) throws UnsupportedEncodingException throw new RuntimeException(errMsg); } return data; + } catch (IOException ioe) { logger.log(Level.SEVERE, "IOException when post metadata"); throw new RuntimeException("IOException when post metadata", ioe); diff --git a/src/main/java/edu/harvard/iq/dataverse/Dataset.java b/src/main/java/edu/harvard/iq/dataverse/Dataset.java index 144285299ad..84b4a4934bc 100644 --- a/src/main/java/edu/harvard/iq/dataverse/Dataset.java +++ b/src/main/java/edu/harvard/iq/dataverse/Dataset.java @@ -9,9 +9,10 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Date; +import java.util.HashSet; import java.util.List; import java.util.Objects; -import java.util.logging.Logger; +import java.util.Set; import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; @@ -73,7 +74,6 @@ sequence. Used when the Dataverse is (optionally) configured to use @Index(columnList = "thumbnailfile_id")}, uniqueConstraints = @UniqueConstraint(columnNames = {"authority,protocol,identifier,doiseparator"})) public class Dataset extends DvObjectContainer { - private static final Logger logger = Logger.getLogger(Dataset.class.getCanonicalName()); public static final String TARGET_URL = "/citation?persistentId="; private static final long serialVersionUID = 1L; @@ -100,8 +100,8 @@ public class Dataset extends DvObjectContainer { @OrderBy("versionNumber DESC, minorVersionNumber DESC") private List versions = new ArrayList<>(); - @OneToOne(mappedBy = "dataset", cascade = {CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST}, orphanRemoval = true) - private DatasetLock datasetLock; + @OneToMany(mappedBy = "dataset", cascade = CascadeType.ALL, orphanRemoval = true) + private Set datasetLocks; @OneToOne(cascade = {CascadeType.MERGE, CascadeType.PERSIST}) @JoinColumn(name = "thumbnailfile_id") @@ -154,7 +154,63 @@ public Dataset() { datasetVersion.setMinorVersionNumber((long) 0); versions.add(datasetVersion); } + + /** + * Checks whether {@code this} dataset is locked for a given reason. + * @param reason the reason we test for. + * @return {@code true} iff the data set is locked for {@code reason}. + */ + public boolean isLockedFor( DatasetLock.Reason reason ) { + for ( DatasetLock l : getLocks() ) { + if ( l.getReason() == reason ) { + return true; + } + } + return false; + } + + /** + * Retrieves the dataset lock for the passed reason. + * @param reason + * @return the dataset lock, or {@code null}. + */ + public DatasetLock getLockFor( DatasetLock.Reason reason ) { + for ( DatasetLock l : getLocks() ) { + if ( l.getReason() == reason ) { + return l; + } + } + return null; + } + + public Set getLocks() { + // lazy set creation + if ( datasetLocks == null ) { + setLocks( new HashSet<>() ); + } + return datasetLocks; + } + /** + * JPA use only! + * @param datasetLocks + */ + void setLocks(Set datasetLocks) { + this.datasetLocks = datasetLocks; + } + + public void addLock(DatasetLock datasetLock) { + getLocks().add(datasetLock); + } + + public void removeLock( DatasetLock aDatasetLock ) { + getLocks().remove( aDatasetLock ); + } + + public boolean isLocked() { + return !getLocks().isEmpty(); + } + public String getProtocol() { return protocol; } @@ -240,18 +296,6 @@ public void setFiles(List files) { this.files = files; } - public DatasetLock getDatasetLock() { - return datasetLock; - } - - public void setDatasetLock(DatasetLock datasetLock) { - this.datasetLock = datasetLock; - } - - public boolean isLocked() { - return (getDatasetLock()!=null); - } - public boolean isDeaccessioned() { // return true, if all published versions were deaccessioned boolean hasDeaccessionedVersions = false; diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetField.java b/src/main/java/edu/harvard/iq/dataverse/DatasetField.java index 68b086e75e3..7bea9250279 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DatasetField.java +++ b/src/main/java/edu/harvard/iq/dataverse/DatasetField.java @@ -299,7 +299,10 @@ public List getValues_nondisplay() List returnList = new ArrayList(); if (!datasetFieldValues.isEmpty()) { for (DatasetFieldValue dsfv : datasetFieldValues) { - returnList.add(dsfv.getValue()); + String value = dsfv.getValue(); + if (value != null) { + returnList.add(value); + } } } else { for (ControlledVocabularyValue cvv : controlledVocabularyValues) { diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetLock.java b/src/main/java/edu/harvard/iq/dataverse/DatasetLock.java index 8e572cd3c39..3114ab6dc45 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DatasetLock.java +++ b/src/main/java/edu/harvard/iq/dataverse/DatasetLock.java @@ -20,6 +20,7 @@ package edu.harvard.iq.dataverse; +import static edu.harvard.iq.dataverse.DatasetLock.Reason.Workflow; import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser; import java.util.Date; import java.io.Serializable; @@ -33,7 +34,6 @@ import javax.persistence.Index; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; -import javax.persistence.OneToOne; import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; @@ -52,7 +52,7 @@ @Table(indexes = {@Index(columnList="user_id"), @Index(columnList="dataset_id")}) @NamedQueries( @NamedQuery(name="DatasetLock.getLocksByDatasetId", - query="SELECT l FROM DatasetLock l WHERE l.dataset.id=:datasetId") + query="SELECT lock FROM DatasetLock lock WHERE lock.dataset.id=:datasetId") ) public class DatasetLock implements Serializable { @@ -79,13 +79,13 @@ public enum Reason { @Temporal(value = TemporalType.TIMESTAMP) private Date startTime; - @OneToOne + @ManyToOne @JoinColumn(nullable=false) private Dataset dataset; @ManyToOne @JoinColumn(nullable=false) - private AuthenticatedUser user; + private AuthenticatedUser user; @Enumerated(EnumType.STRING) @Column(nullable=false) @@ -119,7 +119,7 @@ public DatasetLock(Reason aReason, AuthenticatedUser aUser, String infoMessage) startTime = new Date(); user = aUser; info = infoMessage; - + } /** diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java b/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java index 030553916d3..2cae4aae9f6 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java +++ b/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java @@ -79,6 +79,7 @@ import java.util.logging.Level; import edu.harvard.iq.dataverse.datasetutility.TwoRavensHelper; import edu.harvard.iq.dataverse.datasetutility.WorldMapPermissionHelper; +import edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException; import edu.harvard.iq.dataverse.engine.command.impl.RequestRsyncScriptCommand; import edu.harvard.iq.dataverse.engine.command.impl.PublishDatasetResult; import edu.harvard.iq.dataverse.engine.command.impl.RestrictFileCommand; @@ -1503,12 +1504,17 @@ private String init(boolean initFull) { // Various info messages, when the dataset is locked (for various reasons): if (dataset.isLocked()) { - if (dataset.getDatasetLock().getReason().equals(DatasetLock.Reason.DcmUpload)) { - JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.summary"), BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.details")); - } else if (dataset.getDatasetLock().getReason().equals(DatasetLock.Reason.Workflow)) { - JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.message"), BundleUtil.getStringFromBundle("dataset.publish.workflow.inprogress")); - } else if (dataset.getDatasetLock().getReason().equals(DatasetLock.Reason.InReview)) { - JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.message"), BundleUtil.getStringFromBundle("dataset.inreview.infoMessage")); + if (dataset.isLockedFor(DatasetLock.Reason.Workflow)) { + JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.message"), + BundleUtil.getStringFromBundle("dataset.publish.workflow.inprogress")); + } + if (dataset.isLockedFor(DatasetLock.Reason.InReview)) { + JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.inReview.message"), + BundleUtil.getStringFromBundle("dataset.inreview.infoMessage")); + } + if (dataset.isLockedFor(DatasetLock.Reason.DcmUpload)) { + JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.summary"), + BundleUtil.getStringFromBundle("file.rsyncUpload.inProgressMessage.details")); } } @@ -1862,7 +1868,7 @@ private String releaseDataset(boolean minor) { // has been published. If a publishing workflow is configured, this may have sent the // dataset into a workflow limbo, potentially waiting for a third party system to complete // the process. So it may be premature to show the "success" message at this point. - if (dataset.isLocked() && dataset.getDatasetLock().getReason().equals(DatasetLock.Reason.Workflow)) { + if (dataset.isLockedFor(DatasetLock.Reason.Workflow)) { JH.addMessage(FacesMessage.SEVERITY_WARN, BundleUtil.getStringFromBundle("dataset.locked.message"), BundleUtil.getStringFromBundle("dataset.publish.workflow.inprogress")); } else { JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataset.message.publishSuccess")); @@ -2615,12 +2621,26 @@ public void refreshLock() { //requestContext.execute("refreshPage();"); } } + + public void refreshIngestLock() { + //RequestContext requestContext = RequestContext.getCurrentInstance(); + logger.fine("checking ingest lock"); + if (isStillLockedForIngest()) { + logger.fine("(still locked)"); + } else { + // OK, the dataset is no longer locked. + // let's tell the page to refresh: + logger.fine("no longer locked!"); + stateChanged = true; + //requestContext.execute("refreshPage();"); + } + } /* public boolean isLockedInProgress() { if (dataset != null) { - logger.fine("checking lock status of dataset " + dataset.getId()); + logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId()); if (dataset.isLocked()) { return true; } @@ -2629,19 +2649,18 @@ public boolean isLockedInProgress() { }*/ public boolean isDatasetLockedInWorkflow() { - if (dataset != null) { - if (dataset.isLocked()) { - if (dataset.getDatasetLock().getReason().equals(DatasetLock.Reason.Workflow)) { - return true; - } - } - } - return false; + return (dataset != null) + ? dataset.isLockedFor(DatasetLock.Reason.Workflow) + : false; } public boolean isStillLocked() { + if (dataset != null && dataset.getId() != null) { - logger.fine("checking lock status of dataset " + dataset.getId()); + logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId()); + if(dataset.getLocks().size() == 1 && dataset.getLockFor(DatasetLock.Reason.InReview) != null){ + return false; + } if (datasetService.checkDatasetLock(dataset.getId())) { return true; } @@ -2649,6 +2668,21 @@ public boolean isStillLocked() { return false; } + + public boolean isStillLockedForIngest() { + if (dataset.getId() != null) { + Dataset testDataset = datasetService.find(dataset.getId()); + if (testDataset != null && testDataset.getId() != null) { + logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId()); + + if (testDataset.getLockFor(DatasetLock.Reason.Ingest) != null) { + return true; + } + } + } + return false; + } + public boolean isLocked() { if (stateChanged) { return false; @@ -2662,11 +2696,57 @@ public boolean isLocked() { return false; } + public boolean isLockedForIngest() { + if (dataset.getId() != null) { + Dataset testDataset = datasetService.find(dataset.getId()); + if (stateChanged) { + return false; + } + + if (testDataset != null) { + if (testDataset.getLockFor(DatasetLock.Reason.Ingest) != null) { + return true; + } + } + } + return false; + } + + /** + * Authors are not allowed to edit but curators are allowed - when Dataset is inReview + * For all other locks edit should be locked for all editors. + */ + public boolean isLockedFromEdits() { + + try { + permissionService.checkEditDatasetLock(dataset, dvRequestService.getDataverseRequest(), new UpdateDatasetCommand(dataset, dvRequestService.getDataverseRequest())); + } catch (IllegalCommandException ex) { + return true; + } + return false; + } + + public boolean isLockedFromDownload(){ + + try { + permissionService.checkDownloadFileLock(dataset, dvRequestService.getDataverseRequest(), new CreateDatasetCommand(dataset, dvRequestService.getDataverseRequest())); + } catch (IllegalCommandException ex) { + return true; + } + return false; + + } + public void setLocked(boolean locked) { // empty method, so that we can use DatasetPage.locked in a hidden // input on the page. } + public void setLockedForIngest(boolean locked) { + // empty method, so that we can use DatasetPage.locked in a hidden + // input on the page. + } + public boolean isStateChanged() { return stateChanged; } @@ -3949,9 +4029,9 @@ public void downloadRsyncScript() { String lockInfoMessage = "script downloaded"; DatasetLock lock = datasetService.addDatasetLock(dataset.getId(), DatasetLock.Reason.DcmUpload, session.getUser() != null ? ((AuthenticatedUser)session.getUser()).getId() : null, lockInfoMessage); if (lock != null) { - dataset.setDatasetLock(lock); + dataset.addLock(lock); } else { - logger.warning("Failed to lock the dataset (dataset id="+dataset.getId()+")"); + logger.log(Level.WARNING, "Failed to lock the dataset (dataset id={0})", dataset.getId()); } } @@ -3978,6 +4058,7 @@ public String finishRsyncScriptAction() { * It returns the default summary fields( subject, description, keywords, related publications and notes) * if the custom summary datafields has not been set, otherwise will set the custom fields set by the sysadmins * + * @return the dataset fields to be shown in the dataset summary */ public List getDatasetSummaryFields() { customFields = settingsWrapper.getValueForKey(SettingsServiceBean.Key.CustomDatasetSummaryFields); diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DatasetServiceBean.java index da24256fea7..d10bb7912a3 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DatasetServiceBean.java +++ b/src/main/java/edu/harvard/iq/dataverse/DatasetServiceBean.java @@ -1,8 +1,3 @@ -/* - * To change this license header, choose License Headers in Project Properties. - * To change this template file, choose Tools | Templates - * and open the template in the editor. - */ package edu.harvard.iq.dataverse; import edu.harvard.iq.dataverse.authorization.AuthenticationServiceBean; @@ -24,6 +19,7 @@ import java.util.ArrayList; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -36,14 +32,10 @@ import javax.ejb.Stateless; import javax.ejb.TransactionAttribute; import javax.ejb.TransactionAttributeType; -import javax.inject.Inject; import javax.inject.Named; import javax.persistence.EntityManager; -import javax.persistence.NamedStoredProcedureQuery; -import javax.persistence.ParameterMode; import javax.persistence.PersistenceContext; import javax.persistence.Query; -import javax.persistence.StoredProcedureParameter; import javax.persistence.StoredProcedureQuery; import javax.persistence.TypedQuery; import javax.xml.stream.XMLOutputFactory; @@ -506,24 +498,13 @@ public boolean checkDatasetLock(Long datasetId) { return lock.size()>0; } - public String checkDatasetLockInfo(Long datasetId) { - String nativeQuery = "SELECT sl.info FROM DatasetLock sl WHERE sl.dataset_id = " + datasetId + " LIMIT 1;"; - String infoMessage; - try { - infoMessage = (String)em.createNativeQuery(nativeQuery).getSingleResult(); - } catch (Exception ex) { - infoMessage = null; - } - - return infoMessage; - } - @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - public DatasetLock addDatasetLock(Dataset dataset, DatasetLock lock) { - dataset.setDatasetLock(lock); - em.persist(lock); - return lock; + lock.setDataset(dataset); + dataset.addLock(lock); + em.persist(lock); + em.merge(dataset); + return lock; } @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) /*?*/ @@ -552,29 +533,25 @@ public DatasetLock addDatasetLock(Long datasetId, DatasetLock.Reason reason, Lon return addDatasetLock(dataset, lock); } + /** + * Removes all {@link DatasetLock}s for the dataset whose id is passed and reason + * is {@code aReason}. + * @param datasetId Id of the dataset whose locks will b removed. + * @param aReason The reason of the locks that will be removed. + */ @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) - public void removeDatasetLock(Long datasetId) { + public void removeDatasetLocks(Long datasetId, DatasetLock.Reason aReason) { Dataset dataset = em.find(Dataset.class, datasetId); - //em.refresh(dataset); (?) - DatasetLock lock = dataset.getDatasetLock(); - if (lock != null) { - AuthenticatedUser user = lock.getUser(); - dataset.setDatasetLock(null); - user.getDatasetLocks().remove(lock); - /* - * TODO - ? - * throw an exception if for whatever reason we can't remove the lock? - try { - */ - em.remove(lock); - /* - } catch (TransactionRequiredException te) { - ... - } catch (IllegalArgumentException iae) { - ... - } - */ - } + new HashSet<>(dataset.getLocks()).stream() + .filter( l -> l.getReason() == aReason ) + .forEach( lock -> { + dataset.removeLock(lock); + + AuthenticatedUser user = lock.getUser(); + user.getDatasetLocks().remove(lock); + + em.remove(lock); + }); } /* @@ -611,7 +588,7 @@ public String getTitleFromLatestVersion(Long datasetId, boolean includeDraft){ + ";").getSingleResult(); } catch (Exception ex) { - logger.info("exception trying to get title from latest version: " + ex); + logger.log(Level.INFO, "exception trying to get title from latest version: {0}", ex); return ""; } diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java b/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java index 9e97e8d475a..030a10244a2 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java +++ b/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java @@ -221,8 +221,7 @@ public void setDatasetFields(List datasetFields) { */ public boolean isInReview() { if (versionState != null && versionState.equals(VersionState.DRAFT)) { - DatasetLock l = getDataset().getDatasetLock(); - return (l != null) && l.getReason()==DatasetLock.Reason.InReview; + return getDataset().isLockedFor(DatasetLock.Reason.InReview); } else { return false; } diff --git a/src/main/java/edu/harvard/iq/dataverse/Dataverse.java b/src/main/java/edu/harvard/iq/dataverse/Dataverse.java index af009ec2063..df656bb61b1 100644 --- a/src/main/java/edu/harvard/iq/dataverse/Dataverse.java +++ b/src/main/java/edu/harvard/iq/dataverse/Dataverse.java @@ -162,7 +162,8 @@ public void setDefaultContributorRole(DataverseRole defaultContributorRole) { private boolean metadataBlockRoot; private boolean facetRoot; - private boolean themeRoot; + // By default, themeRoot should be true, as new dataverses should start with the default theme + private boolean themeRoot = true; private boolean templateRoot; diff --git a/src/main/java/edu/harvard/iq/dataverse/DataverseHeaderFragment.java b/src/main/java/edu/harvard/iq/dataverse/DataverseHeaderFragment.java index d1a2e7ab9dd..d3607a27093 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DataverseHeaderFragment.java +++ b/src/main/java/edu/harvard/iq/dataverse/DataverseHeaderFragment.java @@ -223,11 +223,11 @@ public String logout() { redirectPage = URLDecoder.decode(redirectPage, "UTF-8"); } catch (UnsupportedEncodingException ex) { Logger.getLogger(LoginPage.class.getName()).log(Level.SEVERE, null, ex); - redirectPage = "dataverse.xhtml&alias=" + dataverseService.findRootDataverse().getAlias(); + redirectPage = redirectToRoot(); } if (StringUtils.isEmpty(redirectPage)) { - redirectPage = "dataverse.xhtml&alias=" + dataverseService.findRootDataverse().getAlias(); + redirectPage = redirectToRoot(); } logger.log(Level.INFO, "Sending user to = " + redirectPage); @@ -236,6 +236,10 @@ public String logout() { private Boolean signupAllowed = null; + private String redirectToRoot(){ + return "dataverse.xhtml?alias=" + dataverseService.findRootDataverse().getAlias(); + } + public boolean isSignupAllowed() { if (signupAllowed != null) { return signupAllowed; diff --git a/src/main/java/edu/harvard/iq/dataverse/DataversePage.java b/src/main/java/edu/harvard/iq/dataverse/DataversePage.java index c10bc5724a0..07a21c8c557 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DataversePage.java +++ b/src/main/java/edu/harvard/iq/dataverse/DataversePage.java @@ -639,7 +639,7 @@ public String save() { JsfHelper.addSuccessMessage(message); editMode = null; - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } catch (CommandException ex) { @@ -732,7 +732,7 @@ public String saveLinkedDataverse() { String msg = "Only authenticated users can link a dataverse."; logger.severe(msg); JsfHelper.addErrorMessage(msg); - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } linkingDataverse = dataverseService.find(linkingDataverseId); @@ -745,7 +745,7 @@ public String saveLinkedDataverse() { String msg = "Unable to link " + dataverse.getDisplayName() + " to " + linkingDataverse.getDisplayName() + ". An internal error occurred."; logger.log(Level.SEVERE, "{0} {1}", new Object[]{msg, ex}); JsfHelper.addErrorMessage(msg); - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } SavedSearch savedSearchOfChildren = createSavedSearchForChildren(savedSearchCreator); @@ -758,20 +758,20 @@ public String saveLinkedDataverse() { DataverseRequest dataverseRequest = new DataverseRequest(savedSearchCreator, SavedSearchServiceBean.getHttpServletRequest()); savedSearchService.makeLinksForSingleSavedSearch(dataverseRequest, savedSearchOfChildren, debug); JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataverse.linked.success", getSuccessMessageArguments())); - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } catch (SearchException | CommandException ex) { // error: solr is down, etc. can't link children right now JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataverse.linked.internalerror", getSuccessMessageArguments())); String msg = dataverse.getDisplayName() + " has been successfully linked to " + linkingDataverse.getDisplayName() + " but contents will not appear until an internal error has been fixed."; logger.log(Level.SEVERE, "{0} {1}", new Object[]{msg, ex}); //JsfHelper.addErrorMessage(msg); - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } } else { // defer: please wait for the next timer/cron job //JsfHelper.addSuccessMessage(dataverse.getDisplayName() + " has been successfully linked to " + linkingDataverse.getDisplayName() + ". Please wait for its contents to appear."); JsfHelper.addSuccessMessage(BundleUtil.getStringFromBundle("dataverse.linked.success.wait", getSuccessMessageArguments())); - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } } @@ -819,7 +819,7 @@ public String saveSavedSearch() { String msg = "Only authenticated users can save a search."; logger.severe(msg); JsfHelper.addErrorMessage(msg); - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } SavedSearch savedSearch = new SavedSearch(searchIncludeFragment.getQuery(), linkingDataverse, savedSearchCreator); @@ -843,12 +843,12 @@ public String saveSavedSearch() { arguments.add(linkString); String successMessageString = BundleUtil.getStringFromBundle("dataverse.saved.search.success", arguments); JsfHelper.addSuccessMessage(successMessageString); - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } catch (CommandException ex) { String msg = "There was a problem linking this search to yours: " + ex; logger.severe(msg); JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataverse.saved.search.failure") + " " + ex); - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } } @@ -876,7 +876,7 @@ public String releaseDataverse() { } else { JsfHelper.addErrorMessage(BundleUtil.getStringFromBundle("dataverse.publish.not.authorized")); } - return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + return returnRedirect(); } @@ -1016,5 +1016,9 @@ public void validateAlias(FacesContext context, UIComponent toValidate, Object v } } } + + private String returnRedirect(){ + return "/dataverse.xhtml?alias=" + dataverse.getAlias() + "&faces-redirect=true"; + } } diff --git a/src/main/java/edu/harvard/iq/dataverse/DataverseServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DataverseServiceBean.java index 8adc5832040..026af897c6b 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DataverseServiceBean.java +++ b/src/main/java/edu/harvard/iq/dataverse/DataverseServiceBean.java @@ -468,7 +468,40 @@ public Map getAllHarvestedDataverseDescriptions(){ return ret; }*/ + + public String getParentAliasString(SolrSearchResult solrSearchResult){ + Long dvId = solrSearchResult.getEntityId(); + String retVal = ""; + + if (dvId == null) { + return retVal; + } + + String searchResult; + try { + System.out.print("select t0.ALIAS FROM DATAVERSE t0, DVOBJECT t1, DVOBJECT t2 WHERE (t0.ID = t1.ID) AND (t2.OWNER_ID = t1.ID) AND (t2.ID =" + dvId + ")"); + searchResult = (String) em.createNativeQuery("select t0.ALIAS FROM DATAVERSE t0, DVOBJECT t1, DVOBJECT t2 WHERE (t0.ID = t1.ID) AND (t2.OWNER_ID = t1.ID) AND (t2.ID =" + dvId + ")").getSingleResult(); + + } catch (Exception ex) { + System.out.print("catching exception"); + System.out.print("catching exception" + ex.getMessage()); + return retVal; + } + if (searchResult == null) { + System.out.print("searchResult == null"); + return retVal; + } + + if (searchResult != null) { + System.out.print(searchResult); + return searchResult; + } + + return retVal; + } + + public void populateDvSearchCard(SolrSearchResult solrSearchResult) { Long dvId = solrSearchResult.getEntityId(); diff --git a/src/main/java/edu/harvard/iq/dataverse/EditDatafilesPage.java b/src/main/java/edu/harvard/iq/dataverse/EditDatafilesPage.java index 562a60f6e21..8c5aa3b0414 100644 --- a/src/main/java/edu/harvard/iq/dataverse/EditDatafilesPage.java +++ b/src/main/java/edu/harvard/iq/dataverse/EditDatafilesPage.java @@ -2084,19 +2084,16 @@ private boolean isFileAlreadyUploaded(DataFile dataFile) { public boolean isLocked() { if (dataset != null) { - logger.fine("checking lock status of dataset " + dataset.getId()); + logger.log(Level.FINE, "checking lock status of dataset {0}", dataset.getId()); if (dataset.isLocked()) { // refresh the dataset and version, if the current working // version of the dataset is locked: } Dataset lookedupDataset = datasetService.find(dataset.getId()); - DatasetLock datasetLock = null; - if (lookedupDataset != null) { - datasetLock = lookedupDataset.getDatasetLock(); - if (datasetLock != null) { - logger.fine("locked!"); - return true; - } + + if ( (lookedupDataset!=null) && lookedupDataset.isLocked() ) { + logger.fine("locked!"); + return true; } } return false; @@ -2127,12 +2124,12 @@ public void setFileMetadataSelected(FileMetadata fm){ public void setFileMetadataSelected(FileMetadata fm, String guestbook) { fileMetadataSelected = fm; - logger.fine("set the file for the advanced options popup (" + fileMetadataSelected.getLabel() + ")"); + logger.log(Level.FINE, "set the file for the advanced options popup ({0})", fileMetadataSelected.getLabel()); } public FileMetadata getFileMetadataSelected() { if (fileMetadataSelected != null) { - logger.fine("returning file metadata for the advanced options popup (" + fileMetadataSelected.getLabel() + ")"); + logger.log(Level.FINE, "returning file metadata for the advanced options popup ({0})", fileMetadataSelected.getLabel()); } else { logger.fine("file metadata for the advanced options popup is null."); } @@ -2226,7 +2223,7 @@ public void saveAsDesignatedThumbnail() { } public void deleteDatasetLogoAndUseThisDataFileAsThumbnailInstead() { - logger.fine("For dataset id " + dataset.getId() + " the current thumbnail is from a dataset logo rather than a dataset file, blowing away the logo and using this FileMetadata id instead: " + fileMetadataSelectedForThumbnailPopup); + logger.log(Level.FINE, "For dataset id {0} the current thumbnail is from a dataset logo rather than a dataset file, blowing away the logo and using this FileMetadata id instead: {1}", new Object[]{dataset.getId(), fileMetadataSelectedForThumbnailPopup}); /** * @todo Rather than deleting and merging right away, try to respect how * this page seems to stage actions and giving the user a chance to diff --git a/src/main/java/edu/harvard/iq/dataverse/FilePage.java b/src/main/java/edu/harvard/iq/dataverse/FilePage.java index 43b5ac99396..6b156725b90 100644 --- a/src/main/java/edu/harvard/iq/dataverse/FilePage.java +++ b/src/main/java/edu/harvard/iq/dataverse/FilePage.java @@ -14,6 +14,8 @@ import edu.harvard.iq.dataverse.datasetutility.WorldMapPermissionHelper; import edu.harvard.iq.dataverse.engine.command.Command; import edu.harvard.iq.dataverse.engine.command.exception.CommandException; +import edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException; +import edu.harvard.iq.dataverse.engine.command.impl.CreateDatasetCommand; import edu.harvard.iq.dataverse.engine.command.impl.RestrictFileCommand; import edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetCommand; import edu.harvard.iq.dataverse.export.ExportException; @@ -502,17 +504,28 @@ public String save() { return ""; } + private Boolean thumbnailAvailable = null; + public boolean isThumbnailAvailable(FileMetadata fileMetadata) { // new and optimized logic: // - check download permission here (should be cached - so it's free!) // - only then ask the file service if the thumbnail is available/exists. - // the service itself no longer checks download permissions. + // the service itself no longer checks download permissions. + // (Also, cache the result the first time the check is performed... + // remember - methods referenced in "rendered=..." attributes are + // called *multiple* times as the page is loading!) + if (thumbnailAvailable != null) { + return thumbnailAvailable; + } + if (!fileDownloadHelper.canDownloadFile(fileMetadata)) { - return false; + thumbnailAvailable = false; + } else { + thumbnailAvailable = datafileService.isThumbnailAvailable(fileMetadata.getDataFile()); } - - return datafileService.isThumbnailAvailable(fileMetadata.getDataFile()); + + return thumbnailAvailable; } private String returnToDatasetOnly(){ @@ -692,6 +705,31 @@ public boolean isReplacementFile(){ public boolean isPubliclyDownloadable() { return FileUtil.isPubliclyDownloadable(fileMetadata); } + + /** + * Authors are not allowed to edit but curators are allowed - when Dataset is inReview + * For all other locks edit should be locked for all editors. + */ + public boolean isLockedFromEdits() { + Dataset testDataset = fileMetadata.getDataFile().getOwner(); + + try { + permissionService.checkEditDatasetLock(testDataset, dvRequestService.getDataverseRequest(), new UpdateDatasetCommand(testDataset, dvRequestService.getDataverseRequest())); + } catch (IllegalCommandException ex) { + return true; + } + return false; + } + + public boolean isLockedFromDownload(){ + Dataset testDataset = fileMetadata.getDataFile().getOwner(); + try { + permissionService.checkDownloadFileLock(testDataset, dvRequestService.getDataverseRequest(), new CreateDatasetCommand(testDataset, dvRequestService.getDataverseRequest())); + } catch (IllegalCommandException ex) { + return true; + } + return false; + } public String getPublicDownloadUrl() { try { diff --git a/src/main/java/edu/harvard/iq/dataverse/LoginPage.java b/src/main/java/edu/harvard/iq/dataverse/LoginPage.java index 99a1af7571a..6bde3ba6775 100644 --- a/src/main/java/edu/harvard/iq/dataverse/LoginPage.java +++ b/src/main/java/edu/harvard/iq/dataverse/LoginPage.java @@ -21,12 +21,15 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import java.util.Random; import java.util.logging.Level; import java.util.logging.Logger; import javax.ejb.EJB; import javax.faces.application.FacesMessage; +import javax.faces.component.UIComponent; import javax.faces.context.FacesContext; import javax.faces.event.AjaxBehaviorEvent; +import javax.faces.validator.ValidatorException; import javax.faces.view.ViewScoped; import javax.inject.Inject; import javax.inject.Named; @@ -101,6 +104,11 @@ public enum EditMode {LOGIN, SUCCESS, FAILED}; private String redirectPage = "dataverse.xhtml"; private AuthenticationProvider authProvider; + private int numFailedLoginAttempts; + Random random; + long op1; + long op2; + Long userSum; public void init() { Iterator credentialsIterator = authSvc.getAuthenticationProviderIdsOfType( CredentialsAuthenticationProvider.class ).iterator(); @@ -109,6 +117,7 @@ public void init() { } resetFilledCredentials(null); authProvider = authSvc.getAuthenticationProvider(systemConfig.getDefaultAuthProvider()); + random = new Random(); } public List listCredentialsAuthenticationProviders() { @@ -164,14 +173,14 @@ public String login() { session.setUser(r); if ("dataverse.xhtml".equals(redirectPage)) { - redirectPage = redirectPage + "&alias=" + dataverseService.findRootDataverse().getAlias(); + redirectPage = redirectToRoot(); } try { redirectPage = URLDecoder.decode(redirectPage, "UTF-8"); } catch (UnsupportedEncodingException ex) { Logger.getLogger(LoginPage.class.getName()).log(Level.SEVERE, null, ex); - redirectPage = "dataverse.xhtml&alias=" + dataverseService.findRootDataverse().getAlias(); + redirectPage = redirectToRoot(); } logger.log(Level.FINE, "Sending user to = {0}", redirectPage); @@ -179,6 +188,9 @@ public String login() { } catch (AuthenticationFailedException ex) { + numFailedLoginAttempts++; + op1 = new Long(random.nextInt(10)); + op2 = new Long(random.nextInt(10)); AuthenticationResponse response = ex.getResponse(); switch ( response.getStatus() ) { case FAIL: @@ -202,6 +214,10 @@ public String login() { } } + + private String redirectToRoot(){ + return "dataverse.xhtml?alias=" + dataverseService.findRootDataverse().getAlias(); + } public String getCredentialsAuthProviderId() { return credentialsAuthProviderId; @@ -256,4 +272,46 @@ public String getLoginButtonText() { return BundleUtil.getStringFromBundle("login.button", Arrays.asList("???")); } } + + public int getNumFailedLoginAttempts() { + return numFailedLoginAttempts; + } + + public boolean isRequireExtraValidation() { + if (numFailedLoginAttempts > 2) { + return true; + } else { + return false; + } + } + + public long getOp1() { + return op1; + } + + public long getOp2() { + return op2; + } + + public Long getUserSum() { + return userSum; + } + + public void setUserSum(Long userSum) { + this.userSum = userSum; + } + + // TODO: Consolidate with SendFeedbackDialog.validateUserSum? + public void validateUserSum(FacesContext context, UIComponent component, Object value) throws ValidatorException { + // The FacesMessage text is on the xhtml side. + FacesMessage msg = new FacesMessage(""); + ValidatorException validatorException = new ValidatorException(msg); + if (value == null) { + throw validatorException; + } + if (op1 + op2 != (Long) value) { + throw validatorException; + } + } + } diff --git a/src/main/java/edu/harvard/iq/dataverse/PermissionServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/PermissionServiceBean.java index 2b6666771a1..4d0dad2411c 100644 --- a/src/main/java/edu/harvard/iq/dataverse/PermissionServiceBean.java +++ b/src/main/java/edu/harvard/iq/dataverse/PermissionServiceBean.java @@ -26,6 +26,11 @@ import javax.persistence.PersistenceContext; import static edu.harvard.iq.dataverse.engine.command.CommandHelper.CH; import edu.harvard.iq.dataverse.engine.command.DataverseRequest; +import edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException; +import edu.harvard.iq.dataverse.engine.command.impl.CreateDatasetCommand; +import edu.harvard.iq.dataverse.engine.command.impl.PublishDatasetCommand; +import edu.harvard.iq.dataverse.engine.command.impl.UpdateDatasetCommand; +import edu.harvard.iq.dataverse.util.BundleUtil; import java.util.Arrays; import java.util.HashMap; import java.util.LinkedList; @@ -539,6 +544,48 @@ public List getDvObjectIdsUserHasRoleOn(User user, List rol return dataversesUserHasPermissionOn; } + public void checkEditDatasetLock(Dataset dataset, DataverseRequest dataverseRequest, Command command) throws IllegalCommandException { + if (dataset.isLocked()) { + if (dataset.isLockedFor(DatasetLock.Reason.InReview)) { + // The "InReview" lock is not really a lock for curators. They can still make edits. + if (!isUserAllowedOn(dataverseRequest.getUser(), new PublishDatasetCommand(dataset, dataverseRequest, true), dataset)) { + throw new IllegalCommandException(BundleUtil.getStringFromBundle("dataset.message.locked.editNotAllowedInReview"), command); + } + } + if (dataset.isLockedFor(DatasetLock.Reason.Ingest)) { + throw new IllegalCommandException(BundleUtil.getStringFromBundle("dataset.message.locked.editNotAllowed"), command); + } + // TODO: Do we need to check for "Workflow"? Should the message be more specific? + if (dataset.isLockedFor(DatasetLock.Reason.Workflow)) { + throw new IllegalCommandException(BundleUtil.getStringFromBundle("dataset.message.locked.editNotAllowed"), command); + } + // TODO: Do we need to check for "DcmUpload"? Should the message be more specific? + if (dataset.isLockedFor(DatasetLock.Reason.DcmUpload)) { + throw new IllegalCommandException(BundleUtil.getStringFromBundle("dataset.message.locked.editNotAllowed"), command); + } + } + } - + public void checkDownloadFileLock(Dataset dataset, DataverseRequest dataverseRequest, Command command) throws IllegalCommandException { + if (dataset.isLocked()) { + if (dataset.isLockedFor(DatasetLock.Reason.InReview)) { + // The "InReview" lock is not really a lock for curators or contributors. They can still download. + if (!isUserAllowedOn(dataverseRequest.getUser(), new UpdateDatasetCommand(dataset, dataverseRequest), dataset)) { + throw new IllegalCommandException(BundleUtil.getStringFromBundle("dataset.message.locked.downloadNotAllowedInReview"), command); + } + } + if (dataset.isLockedFor(DatasetLock.Reason.Ingest)) { + throw new IllegalCommandException(BundleUtil.getStringFromBundle("dataset.message.locked.downloadNotAllowed"), command); + } + // TODO: Do we need to check for "Workflow"? Should the message be more specific? + if (dataset.isLockedFor(DatasetLock.Reason.Workflow)) { + throw new IllegalCommandException(BundleUtil.getStringFromBundle("dataset.message.locked.downloadNotAllowed"), command); + } + // TODO: Do we need to check for "DcmUpload"? Should the message be more specific? + if (dataset.isLockedFor(DatasetLock.Reason.DcmUpload)) { + throw new IllegalCommandException(BundleUtil.getStringFromBundle("dataset.message.locked.downloadNotAllowed"), command); + } + } + } + } diff --git a/src/main/java/edu/harvard/iq/dataverse/SendFeedbackDialog.java b/src/main/java/edu/harvard/iq/dataverse/SendFeedbackDialog.java index d68a610bd1a..67d6e673438 100644 --- a/src/main/java/edu/harvard/iq/dataverse/SendFeedbackDialog.java +++ b/src/main/java/edu/harvard/iq/dataverse/SendFeedbackDialog.java @@ -31,6 +31,7 @@ public class SendFeedbackDialog implements java.io.Serializable { private String userMessage = ""; private String messageSubject = ""; private String messageTo = ""; + // FIXME: Remove "support@thedata.org". There's no reason to email the Dataverse *project*. People should email the *installation* instead. private String defaultRecipientEmail = "support@thedata.org"; Long op1, op2, userSum; // Either the dataverse or the dataset that the message is pertaining to @@ -161,6 +162,7 @@ public void validateUserSum(FacesContext context, UIComponent component, Object if (op1 + op2 !=(Long)value) { + // TODO: Remove this English "Sum is incorrect" string. contactFormFragment.xhtml uses contact.sum.invalid instead. FacesMessage msg = new FacesMessage("Sum is incorrect, please try again."); msg.setSeverity(FacesMessage.SEVERITY_ERROR); diff --git a/src/main/java/edu/harvard/iq/dataverse/Shib.java b/src/main/java/edu/harvard/iq/dataverse/Shib.java index 9945d10c916..b5a8b6d6a51 100644 --- a/src/main/java/edu/harvard/iq/dataverse/Shib.java +++ b/src/main/java/edu/harvard/iq/dataverse/Shib.java @@ -13,6 +13,7 @@ import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser; import edu.harvard.iq.dataverse.util.BundleUtil; import edu.harvard.iq.dataverse.util.JsfHelper; +import edu.harvard.iq.dataverse.util.SystemConfig; import java.io.IOException; import java.sql.Timestamp; import java.util.ArrayList; @@ -441,9 +442,9 @@ public String getPrettyFacesHomePageString(boolean includeFacetDashRedirect) { String rootDvAlias = getRootDataverseAlias(); if (includeFacetDashRedirect) { if (rootDvAlias != null) { - return plainHomepageString + "?alias=" + rootDvAlias + "&faces-redirect=true"; + return plainHomepageString + "?alias=" + rootDvAlias + "&faces-redirect=true"; } else { - return plainHomepageString + "?faces-redirect=true"; + return plainHomepageString + "?faces-redirect=true"; } } else if (rootDvAlias != null) { /** diff --git a/src/main/java/edu/harvard/iq/dataverse/ThemeWidgetFragment.java b/src/main/java/edu/harvard/iq/dataverse/ThemeWidgetFragment.java index 8f9c79f4dc8..17b59eb27fd 100644 --- a/src/main/java/edu/harvard/iq/dataverse/ThemeWidgetFragment.java +++ b/src/main/java/edu/harvard/iq/dataverse/ThemeWidgetFragment.java @@ -262,7 +262,7 @@ public void resetForm() { } public String cancel() { - return "dataverse?faces-redirect=true&alias="+editDv.getAlias(); // go to dataverse page + return "dataverse.xhtml?faces-redirect=true&alias="+editDv.getAlias(); // go to dataverse page } @@ -285,7 +285,7 @@ public String save() { this.cleanupTempDirectory(); } JsfHelper.addSuccessMessage(JH.localize("dataverse.theme.success")); - return "dataverse?faces-redirect=true&alias="+editDv.getAlias(); // go to dataverse page + return "dataverse.xhtml?faces-redirect=true&alias="+editDv.getAlias(); // go to dataverse page } } diff --git a/src/main/java/edu/harvard/iq/dataverse/api/BuiltinUsers.java b/src/main/java/edu/harvard/iq/dataverse/api/BuiltinUsers.java index 233d57e1b45..633623719a4 100644 --- a/src/main/java/edu/harvard/iq/dataverse/api/BuiltinUsers.java +++ b/src/main/java/edu/harvard/iq/dataverse/api/BuiltinUsers.java @@ -10,6 +10,7 @@ import edu.harvard.iq.dataverse.authorization.providers.builtin.PasswordEncryption; import edu.harvard.iq.dataverse.authorization.users.ApiToken; import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser; +import edu.harvard.iq.dataverse.settings.SettingsServiceBean; import java.sql.Timestamp; import java.util.Calendar; import java.util.logging.Level; @@ -53,6 +54,14 @@ public class BuiltinUsers extends AbstractApiBean { @GET @Path("{username}/api-token") public Response getApiToken( @PathParam("username") String username, @QueryParam("password") String password ) { + boolean disabled = true; + boolean lookupAllowed = settingsSvc.isTrueForKey(SettingsServiceBean.Key.AllowApiTokenLookupViaApi, false); + if (lookupAllowed) { + disabled = false; + } + if (disabled) { + return error(Status.FORBIDDEN, "This API endpoint has been disabled."); + } BuiltinUser u = null; if (retrievingApiTokenViaEmailEnabled) { u = builtinUserSvc.findByUsernameOrEmail(username); diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java b/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java index 7b072aadd3a..8fcf97a6de1 100644 --- a/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java +++ b/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java @@ -405,26 +405,6 @@ public Response publishDataseUsingGetDeprecated( @PathParam("id") String id, @Qu return publishDataset(id, type); } - // TODO SBG: Delete me - @EJB - WorkflowServiceBean workflows; - - @PUT - @Path("{id}/actions/wf/{wfid}") - public Response DELETEME(@PathParam("id") String id, @PathParam("wfid") String wfid) { - try { - Workflow wf = workflows.getWorkflow(Long.parseLong(wfid)).get(); - Dataset ds = findDatasetOrDie(id); - WorkflowContext ctxt = new WorkflowContext(createDataverseRequest(findUserOrDie()), ds, 0, 0, WorkflowContext.TriggerType.PostPublishDataset, "DataCite"); - workflows.start(wf, ctxt); - return ok("Started workflow " + wf.getName() + " on dataset " + ds.getId() ); - - } catch (WrappedResponse ex) { - return ex.getResponse(); - } - } - // TODO SBG: /Delete me - @POST @Path("{id}/actions/:publish") public Response publishDataset(@PathParam("id") String id, @QueryParam("type") String type) { @@ -655,7 +635,7 @@ public Response getRsync(@PathParam("identifier") String id) { @POST @Path("{identifier}/dataCaptureModule/checksumValidation") public Response receiveChecksumValidationResults(@PathParam("identifier") String id, JsonObject jsonFromDcm) { - logger.fine("jsonFromDcm: " + jsonFromDcm); + logger.log(Level.FINE, "jsonFromDcm: {0}", jsonFromDcm); AuthenticatedUser authenticatedUser = null; try { authenticatedUser = findAuthenticatedUserOrDie(); @@ -712,13 +692,7 @@ public Response submitForReview(@PathParam("id") String idSupplied) { Dataset updatedDataset = execCommand(new SubmitDatasetForReviewCommand(createDataverseRequest(findUserOrDie()), findDatasetOrDie(idSupplied))); JsonObjectBuilder result = Json.createObjectBuilder(); - boolean inReview = false; - try{ - inReview = updatedDataset.getDatasetLock().getReason().equals(DatasetLock.Reason.InReview); - } catch (Exception e){ - System.out.print("submit exception: " + e.getMessage()); - // if there's no lock then it can't be in review by definition - } + boolean inReview = updatedDataset.isLockedFor(DatasetLock.Reason.InReview); result.add("inReview", inReview); result.add("message", "Dataset id " + updatedDataset.getId() + " has been submitted for review."); @@ -747,12 +721,7 @@ public Response returnToAuthor(@PathParam("id") String idSupplied, String jsonBo } AuthenticatedUser authenticatedUser = findAuthenticatedUserOrDie(); Dataset updatedDataset = execCommand(new ReturnDatasetToAuthorCommand(createDataverseRequest(authenticatedUser), dataset, reasonForReturn )); - boolean inReview = false; - try{ - inReview = updatedDataset.getDatasetLock().getReason().equals(DatasetLock.Reason.InReview); - } catch (Exception e){ - // if there's no lock then it can't be in review by definition - } + boolean inReview = updatedDataset.isLockedFor(DatasetLock.Reason.InReview); JsonObjectBuilder result = Json.createObjectBuilder(); result.add("inReview", inReview); @@ -767,9 +736,8 @@ public Response returnToAuthor(@PathParam("id") String idSupplied, String jsonBo * Add a File to an existing Dataset * * @param idSupplied - * @param datasetId * @param jsonData - * @param testFileInputStream + * @param fileInputStream * @param contentDispositionHeader * @param formDataBodyPart * @return diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Dataverses.java b/src/main/java/edu/harvard/iq/dataverse/api/Dataverses.java index bb13ced99c6..97f3925b5ec 100644 --- a/src/main/java/edu/harvard/iq/dataverse/api/Dataverses.java +++ b/src/main/java/edu/harvard/iq/dataverse/api/Dataverses.java @@ -245,8 +245,11 @@ public Response createDataset( String jsonBody, @PathParam("identifier") String } Dataset managedDs = execCommand(new CreateDatasetCommand(ds, createDataverseRequest(u))); - return created( "/datasets/" + managedDs.getId(), - Json.createObjectBuilder().add("id", managedDs.getId()) ); + return created("/datasets/" + managedDs.getId(), + Json.createObjectBuilder() + .add("id", managedDs.getId()) + .add("persistentId", managedDs.getGlobalId()) + ); } catch ( WrappedResponse ex ) { return ex.getResponse(); diff --git a/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/ContainerManagerImpl.java b/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/ContainerManagerImpl.java index 3409f419969..5301024afa1 100644 --- a/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/ContainerManagerImpl.java +++ b/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/ContainerManagerImpl.java @@ -129,7 +129,6 @@ public DepositReceipt replaceMetadata(String uri, Deposit deposit, AuthCredentia String globalId = urlManager.getTargetIdentifier(); Dataset dataset = datasetService.findByGlobalId(globalId); if (dataset != null) { - SwordUtil.datasetLockCheck(dataset); Dataverse dvThatOwnsDataset = dataset.getOwner(); UpdateDatasetCommand updateDatasetCommand = new UpdateDatasetCommand(dataset, dvReq); if (!permissionService.isUserAllowedOn(user, updateDatasetCommand, dataset)) { @@ -222,7 +221,6 @@ public void deleteContainer(String uri, AuthCredentials authCredentials, SwordCo if (!permissionService.isUserAllowedOn(user, deleteDatasetVersionCommand, dataset)) { throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "User " + user.getDisplayInfo().getTitle() + " is not authorized to modify " + dvThatOwnsDataset.getAlias()); } - SwordUtil.datasetLockCheck(dataset); DatasetVersion.VersionState datasetVersionState = dataset.getLatestVersion().getVersionState(); if (dataset.isReleased()) { if (datasetVersionState.equals(DatasetVersion.VersionState.DRAFT)) { diff --git a/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/MediaResourceManagerImpl.java b/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/MediaResourceManagerImpl.java index 714883c9c33..c79e8660329 100644 --- a/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/MediaResourceManagerImpl.java +++ b/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/MediaResourceManagerImpl.java @@ -159,7 +159,6 @@ public void deleteMediaResource(String uri, AuthCredentials authCredentials, Swo DataFile fileToDelete = dataFileService.find(fileIdLong); if (fileToDelete != null) { Dataset dataset = fileToDelete.getOwner(); - SwordUtil.datasetLockCheck(dataset); Dataset datasetThatOwnsFile = fileToDelete.getOwner(); Dataverse dataverseThatOwnsFile = datasetThatOwnsFile.getOwner(); /** @@ -216,7 +215,6 @@ DepositReceipt replaceOrAddFiles(String uri, Deposit deposit, AuthCredentials au if (!permissionService.isUserAllowedOn(user, updateDatasetCommand, dataset)) { throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, "user " + user.getDisplayInfo().getTitle() + " is not authorized to modify dataset with global ID " + dataset.getGlobalId()); } - SwordUtil.datasetLockCheck(dataset); //--------------------------------------- // Make sure that the upload type is not rsync diff --git a/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/StatementManagerImpl.java b/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/StatementManagerImpl.java index 5089204f854..f6c9bcca18c 100644 --- a/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/StatementManagerImpl.java +++ b/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/StatementManagerImpl.java @@ -14,7 +14,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.logging.Logger; +import static java.util.stream.Collectors.joining; import javax.ejb.EJB; import javax.inject.Inject; import javax.servlet.http.HttpServletRequest; @@ -91,14 +93,16 @@ public Statement getStatement(String editUri, Map map, AuthCrede states.put("latestVersionState", dataset.getLatestVersion().getVersionState().toString()); Boolean isMinorUpdate = dataset.getLatestVersion().isMinorUpdate(); states.put("isMinorUpdate", isMinorUpdate.toString()); - DatasetLock lock = dataset.getDatasetLock(); - if (lock != null) { + + if ( dataset.isLocked() ) { states.put("locked", "true"); - states.put("lockedDetail", lock.getInfo()); - states.put("lockedStartTime", lock.getStartTime().toString()); + states.put("lockedDetail", dataset.getLocks().stream().map( l-> l.getInfo() ).collect( joining(",")) ); + Optional earliestLock = dataset.getLocks().stream().min((l1, l2) -> (int)Math.signum(l1.getStartTime().getTime()-l2.getStartTime().getTime()) ); + states.put("lockedStartTime", earliestLock.get().getStartTime().toString()); } else { states.put("locked", "false"); } + statement.setStates(states); List fileMetadatas = dataset.getLatestVersion().getFileMetadatas(); for (FileMetadata fileMetadata : fileMetadatas) { diff --git a/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/SwordUtil.java b/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/SwordUtil.java index a35acfb200e..39575fb6fdb 100644 --- a/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/SwordUtil.java +++ b/src/main/java/edu/harvard/iq/dataverse/api/datadeposit/SwordUtil.java @@ -1,7 +1,5 @@ package edu.harvard.iq.dataverse.api.datadeposit; -import edu.harvard.iq.dataverse.Dataset; -import edu.harvard.iq.dataverse.DatasetLock; import org.swordapp.server.SwordError; import org.swordapp.server.UriRegistry; @@ -12,7 +10,7 @@ public class SwordUtil { static String DCTERMS = "http://purl.org/dc/terms/"; - /** + /* * @todo get rid of this method */ public static SwordError throwSpecialSwordErrorWithoutStackTrace(String SwordUriRegistryError, String error) { @@ -28,7 +26,7 @@ public static SwordError throwSpecialSwordErrorWithoutStackTrace(String SwordUri return swordError; } - /** + /* * @todo get rid of this method */ public static SwordError throwRegularSwordErrorWithoutStackTrace(String error) { @@ -41,12 +39,4 @@ public static SwordError throwRegularSwordErrorWithoutStackTrace(String error) { return swordError; } - public static void datasetLockCheck(Dataset dataset) throws SwordError { - DatasetLock datasetLock = dataset.getDatasetLock(); - if (datasetLock != null) { - String message = "Please try again later. Unable to perform operation due to dataset lock: " + datasetLock.getInfo(); - throw new SwordError(UriRegistry.ERROR_BAD_REQUEST, message); - } - } - } diff --git a/src/main/java/edu/harvard/iq/dataverse/authorization/AuthenticationServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/authorization/AuthenticationServiceBean.java index 401a5d0a932..9e3a438b11b 100644 --- a/src/main/java/edu/harvard/iq/dataverse/authorization/AuthenticationServiceBean.java +++ b/src/main/java/edu/harvard/iq/dataverse/authorization/AuthenticationServiceBean.java @@ -114,7 +114,8 @@ public void startup() { registerProviderFactory( new BuiltinAuthenticationProviderFactory(builtinUserServiceBean, passwordValidatorService) ); registerProviderFactory( new ShibAuthenticationProviderFactory() ); registerProviderFactory( new OAuth2AuthenticationProviderFactory() ); - } catch (AuthorizationSetupException ex) { + + } catch (AuthorizationSetupException ex) { logger.log(Level.SEVERE, "Exception setting up the authentication provider factories: " + ex.getMessage(), ex); } diff --git a/src/main/java/edu/harvard/iq/dataverse/authorization/providers/builtin/DataverseUserPage.java b/src/main/java/edu/harvard/iq/dataverse/authorization/providers/builtin/DataverseUserPage.java index 37c3a75cddd..df2db501035 100644 --- a/src/main/java/edu/harvard/iq/dataverse/authorization/providers/builtin/DataverseUserPage.java +++ b/src/main/java/edu/harvard/iq/dataverse/authorization/providers/builtin/DataverseUserPage.java @@ -340,14 +340,14 @@ public String save() { // go back to where user came from if ("dataverse.xhtml".equals(redirectPage)) { - redirectPage = redirectPage + "&alias=" + dataverseService.findRootDataverse().getAlias(); + redirectPage = redirectPage + "?alias=" + dataverseService.findRootDataverse().getAlias(); } try { redirectPage = URLDecoder.decode(redirectPage, "UTF-8"); } catch (UnsupportedEncodingException ex) { logger.log(Level.SEVERE, "Server does not support 'UTF-8' encoding.", ex); - redirectPage = "dataverse.xhtml&alias=" + dataverseService.findRootDataverse().getAlias(); + redirectPage = "dataverse.xhtml?alias=" + dataverseService.findRootDataverse().getAlias(); } logger.log(Level.FINE, "Sending user to = {0}", redirectPage); diff --git a/src/main/java/edu/harvard/iq/dataverse/batch/jobs/importer/filesystem/FileRecordJobListener.java b/src/main/java/edu/harvard/iq/dataverse/batch/jobs/importer/filesystem/FileRecordJobListener.java index 6d41582017a..f58f6d1f0af 100644 --- a/src/main/java/edu/harvard/iq/dataverse/batch/jobs/importer/filesystem/FileRecordJobListener.java +++ b/src/main/java/edu/harvard/iq/dataverse/batch/jobs/importer/filesystem/FileRecordJobListener.java @@ -236,9 +236,12 @@ public void afterJob() throws Exception { } // remove dataset lock - if (dataset != null && dataset.getId() != null) { - datasetServiceBean.removeDatasetLock(dataset.getId()); - } + // Disabled now, see L.A.'s comment at beforeJob() +// if (dataset != null && dataset.getId() != null) { +// datasetServiceBean.removeDatasetLock(dataset.getId(), DatasetLock.Reason.Ingest); +// } + + getJobLogger().log(Level.INFO, "Removing dataset lock."); // job step info diff --git a/src/main/java/edu/harvard/iq/dataverse/dataaccess/ImageThumbConverter.java b/src/main/java/edu/harvard/iq/dataverse/dataaccess/ImageThumbConverter.java index ef580feb4af..f40bedc21eb 100644 --- a/src/main/java/edu/harvard/iq/dataverse/dataaccess/ImageThumbConverter.java +++ b/src/main/java/edu/harvard/iq/dataverse/dataaccess/ImageThumbConverter.java @@ -309,11 +309,11 @@ private static boolean generateWorldMapThumbnail(StorageIO storageIO, return false; } } catch (FileNotFoundException fnfe) { - logger.fine("No .img file for this worldmap file yet; giving up."); + logger.fine("No .img file for this worldmap file yet; giving up. Original Error: " + fnfe); return false; } catch (IOException ioex) { - logger.warning("caught IOException trying to open an input stream for worldmap .img file (" + storageIO.getDataFile().getStorageIdentifier() + ")"); + logger.warning("caught IOException trying to open an input stream for worldmap .img file (" + storageIO.getDataFile().getStorageIdentifier() + "). Original Error: " + ioex); return false; } diff --git a/src/main/java/edu/harvard/iq/dataverse/dataaccess/S3AccessIO.java b/src/main/java/edu/harvard/iq/dataverse/dataaccess/S3AccessIO.java index 4729051e1ba..4d0c730da33 100644 --- a/src/main/java/edu/harvard/iq/dataverse/dataaccess/S3AccessIO.java +++ b/src/main/java/edu/harvard/iq/dataverse/dataaccess/S3AccessIO.java @@ -25,8 +25,12 @@ import edu.harvard.iq.dataverse.Dataverse; import edu.harvard.iq.dataverse.DvObject; import edu.harvard.iq.dataverse.datavariable.DataVariable; +import edu.harvard.iq.dataverse.util.FileUtil; +import java.io.ByteArrayInputStream; import java.io.File; +import java.io.FileInputStream; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -34,9 +38,11 @@ import java.nio.channels.Channels; import java.nio.channels.WritableByteChannel; import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Date; import java.util.List; +import java.util.Random; import java.util.logging.Logger; import org.apache.commons.io.IOUtils; @@ -186,6 +192,7 @@ public void savePath(Path fileSystemPath) throws IOException { File inputFile = fileSystemPath.toFile(); if (dvObject instanceof DataFile) { s3.putObject(new PutObjectRequest(bucketName, key, inputFile)); + newFileSize = inputFile.length(); } else { throw new IOException("DvObject type other than datafile is not yet supported"); @@ -205,6 +212,25 @@ public void savePath(Path fileSystemPath) throws IOException { setSize(newFileSize); } + /** + * Implements the StorageIO saveInputStream() method. + * This implementation is somewhat problematic, because S3 cannot save an object of + * an unknown length. This effectively nullifies any benefits of streaming; + * as we cannot start saving until we have read the entire stream. + * One way of solving this would be to buffer the entire stream as byte[], + * in memory, then save it... Which of course would be limited by the amount + * of memory available, and thus would not work for streams larger than that. + * So we have eventually decided to save save the stream to a temp file, then + * save to S3. This is slower, but guaranteed to work on any size stream. + * An alternative we may want to consider is to not implement this method + * in the S3 driver, and make it throw the UnsupportedDataAccessOperationException, + * similarly to how we handle attempts to open OutputStreams, in this and the + * Swift driver. + * + * @param inputStream InputStream we want to save + * @param auxItemTag String representing this Auxiliary type ("extension") + * @throws IOException if anything goes wrong. + */ @Override public void saveInputStream(InputStream inputStream, Long filesize) throws IOException { if (filesize == null || filesize < 0) { @@ -235,24 +261,23 @@ public void saveInputStream(InputStream inputStream) throws IOException { if (!this.canWrite()) { open(DataAccessOption.WRITE_ACCESS); } - //TODO? Copying over the object to a byte array is farily inefficient. - // We need the length of the data to upload inputStreams (see our putObject calls). - // There may be ways to work around this, see https://github.com/aws/aws-sdk-java/issues/474 to start. - // This is out of scope of creating the S3 driver and referenced in issue #4064! - byte[] bytes = IOUtils.toByteArray(inputStream); - long length = bytes.length; - ObjectMetadata metadata = new ObjectMetadata(); - metadata.setContentLength(length); + String directoryString = FileUtil.getFilesTempDirectory(); + + Random rand = new Random(); + Path tempPath = Paths.get(directoryString, Integer.toString(rand.nextInt(Integer.MAX_VALUE))); + File tempFile = createTempFile(tempPath, inputStream); + try { - s3.putObject(bucketName, key, inputStream, metadata); + s3.putObject(bucketName, key, tempFile); } catch (SdkClientException ioex) { String failureMsg = ioex.getMessage(); if (failureMsg == null) { failureMsg = "S3AccessIO: Unknown exception occured while uploading a local file into S3 Storage."; } - + tempFile.delete(); throw new IOException(failureMsg); } + tempFile.delete(); setSize(s3.getObjectMetadata(bucketName, key).getContentLength()); } @@ -336,7 +361,7 @@ public void savePathAsAux(Path fileSystemPath, String auxItemTag) throws IOExcep String destinationKey = getDestinationKey(auxItemTag); try { File inputFile = fileSystemPath.toFile(); - s3.putObject(new PutObjectRequest(bucketName, destinationKey, inputFile)); + s3.putObject(new PutObjectRequest(bucketName, destinationKey, inputFile)); } catch (AmazonClientException ase) { logger.warning("Caught an AmazonServiceException in S3AccessIO.savePathAsAux(): " + ase.getMessage()); throw new IOException("S3AccessIO: Failed to save path as an auxiliary object."); @@ -367,31 +392,71 @@ public void saveInputStreamAsAux(InputStream inputStream, String auxItemTag, Lon } } - //todo: add new method with size? - //or just check the data file content size? - // this method copies a local InputStream into this DataAccess Auxiliary location: + /** + * Implements the StorageIO saveInputStreamAsAux() method. + * This implementation is problematic, because S3 cannot save an object of + * an unknown length. This effectively nullifies any benefits of streaming; + * as we cannot start saving until we have read the entire stream. + * One way of solving this would be to buffer the entire stream as byte[], + * in memory, then save it... Which of course would be limited by the amount + * of memory available, and thus would not work for streams larger than that. + * So we have eventually decided to save save the stream to a temp file, then + * save to S3. This is slower, but guaranteed to work on any size stream. + * An alternative we may want to consider is to not implement this method + * in the S3 driver, and make it throw the UnsupportedDataAccessOperationException, + * similarly to how we handle attempts to open OutputStreams, in this and the + * Swift driver. + * + * @param inputStream InputStream we want to save + * @param auxItemTag String representing this Auxiliary type ("extension") + * @throws IOException if anything goes wrong. + */ @Override public void saveInputStreamAsAux(InputStream inputStream, String auxItemTag) throws IOException { if (!this.canWrite()) { open(DataAccessOption.WRITE_ACCESS); } + + String directoryString = FileUtil.getFilesTempDirectory(); + + Random rand = new Random(); + String pathNum = Integer.toString(rand.nextInt(Integer.MAX_VALUE)); + Path tempPath = Paths.get(directoryString, pathNum); + File tempFile = createTempFile(tempPath, inputStream); + String destinationKey = getDestinationKey(auxItemTag); - byte[] bytes = IOUtils.toByteArray(inputStream); - long length = bytes.length; - ObjectMetadata metadata = new ObjectMetadata(); - metadata.setContentLength(length); + try { - s3.putObject(bucketName, destinationKey, inputStream, metadata); + s3.putObject(bucketName, destinationKey, tempFile); } catch (SdkClientException ioex) { String failureMsg = ioex.getMessage(); if (failureMsg == null) { failureMsg = "S3AccessIO: Unknown exception occured while saving a local InputStream as S3Object"; } + tempFile.delete(); throw new IOException(failureMsg); } + tempFile.delete(); } - + + //Helper method for supporting saving streams with unknown length to S3 + //We save those streams to a file and then upload the file + private File createTempFile(Path path, InputStream inputStream) throws IOException { + + File targetFile = new File(path.toUri()); //File needs a name + OutputStream outStream = new FileOutputStream(targetFile); + + byte[] buffer = new byte[8 * 1024]; + int bytesRead; + while ((bytesRead = inputStream.read(buffer)) != -1) { + outStream.write(buffer, 0, bytesRead); + } + IOUtils.closeQuietly(inputStream); + IOUtils.closeQuietly(outStream); + return targetFile; + } + @Override public List listAuxObjects() throws IOException { if (!this.canWrite()) { @@ -405,7 +470,7 @@ public List listAuxObjects() throws IOException { List storedAuxFilesSummary = storedAuxFilesList.getObjectSummaries(); try { while (storedAuxFilesList.isTruncated()) { - logger.fine("S3 listAuxObjects: going to second page of list"); + logger.fine("S3 listAuxObjects: going to next page of list"); storedAuxFilesList = s3.listNextBatchOfObjects(storedAuxFilesList); storedAuxFilesSummary.addAll(storedAuxFilesList.getObjectSummaries()); } @@ -416,7 +481,7 @@ public List listAuxObjects() throws IOException { for (S3ObjectSummary item : storedAuxFilesSummary) { String destinationKey = item.getKey(); - String fileName = destinationKey.substring(destinationKey.lastIndexOf("/")); + String fileName = destinationKey.substring(destinationKey.lastIndexOf(".") + 1); logger.fine("S3 cached aux object fileName: " + fileName); ret.add(fileName); } diff --git a/src/main/java/edu/harvard/iq/dataverse/dataaccess/StorageIO.java b/src/main/java/edu/harvard/iq/dataverse/dataaccess/StorageIO.java index 00e1222c68d..0e53430f5ba 100644 --- a/src/main/java/edu/harvard/iq/dataverse/dataaccess/StorageIO.java +++ b/src/main/java/edu/harvard/iq/dataverse/dataaccess/StorageIO.java @@ -106,6 +106,27 @@ public boolean canWrite() { public abstract void savePath(Path fileSystemPath) throws IOException; // same, for an InputStream: + /** + * This method copies a local InputStream into this DataAccess location. + * Note that the S3 driver implementation of this abstract method is problematic, + * because S3 cannot save an object of an unknown length. This effectively + * nullifies any benefits of streaming; as we cannot start saving until we + * have read the entire stream. + * One way of solving this would be to buffer the entire stream as byte[], + * in memory, then save it... Which of course would be limited by the amount + * of memory available, and thus would not work for streams larger than that. + * So we have eventually decided to save save the stream to a temp file, then + * save to S3. This is slower, but guaranteed to work on any size stream. + * An alternative we may want to consider is to not implement this method + * in the S3 driver, and make it throw the UnsupportedDataAccessOperationException, + * similarly to how we handle attempts to open OutputStreams, in this and the + * Swift driver. + * (Not an issue in either FileAccessIO or SwiftAccessIO implementations) + * + * @param inputStream InputStream we want to save + * @param auxItemTag String representing this Auxiliary type ("extension") + * @throws IOException if anything goes wrong. + */ public abstract void saveInputStream(InputStream inputStream) throws IOException; public abstract void saveInputStream(InputStream inputStream, Long filesize) throws IOException; @@ -133,7 +154,27 @@ public boolean canWrite() { // this method copies a local filesystem Path into this DataAccess Auxiliary location: public abstract void savePathAsAux(Path fileSystemPath, String auxItemTag) throws IOException; - // this method copies a local InputStream into this DataAccess Auxiliary location: + /** + * This method copies a local InputStream into this DataAccess Auxiliary location. + * Note that the S3 driver implementation of this abstract method is problematic, + * because S3 cannot save an object of an unknown length. This effectively + * nullifies any benefits of streaming; as we cannot start saving until we + * have read the entire stream. + * One way of solving this would be to buffer the entire stream as byte[], + * in memory, then save it... Which of course would be limited by the amount + * of memory available, and thus would not work for streams larger than that. + * So we have eventually decided to save save the stream to a temp file, then + * save to S3. This is slower, but guaranteed to work on any size stream. + * An alternative we may want to consider is to not implement this method + * in the S3 driver, and make it throw the UnsupportedDataAccessOperationException, + * similarly to how we handle attempts to open OutputStreams, in this and the + * Swift driver. + * (Not an issue in either FileAccessIO or SwiftAccessIO implementations) + * + * @param inputStream InputStream we want to save + * @param auxItemTag String representing this Auxiliary type ("extension") + * @throws IOException if anything goes wrong. + */ public abstract void saveInputStreamAsAux(InputStream inputStream, String auxItemTag) throws IOException; public abstract void saveInputStreamAsAux(InputStream inputStream, String auxItemTag, Long filesize) throws IOException; diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AbstractPublishDatasetCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AbstractPublishDatasetCommand.java index 38708a8efac..9f04f64e0b6 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AbstractPublishDatasetCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AbstractPublishDatasetCommand.java @@ -21,11 +21,7 @@ public AbstractPublishDatasetCommand(Dataset datasetIn, DataverseRequest aReques } protected WorkflowContext buildContext( String doiProvider, WorkflowContext.TriggerType triggerType) { - return new WorkflowContext(getRequest(), theDataset, - theDataset.getLatestVersion().getVersionNumber(), - theDataset.getLatestVersion().getMinorVersionNumber(), - triggerType, - doiProvider); + return new WorkflowContext(getRequest(), theDataset, doiProvider, triggerType); } } diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AddLockCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AddLockCommand.java index 1f9ee1e96c2..3001d1532e1 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AddLockCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AddLockCommand.java @@ -28,8 +28,9 @@ public AddLockCommand(DataverseRequest aRequest, Dataset aDataset, DatasetLock a @Override public DatasetLock execute(CommandContext ctxt) throws CommandException { - lock.setDataset(dataset); + ctxt.datasets().addDatasetLock(dataset, lock); + return lock; } diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/CreateDataverseCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/CreateDataverseCommand.java index c64995a6958..b78c2f316d2 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/CreateDataverseCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/CreateDataverseCommand.java @@ -78,8 +78,6 @@ public Dataverse execute(CommandContext ctxt) throws CommandException { created.setDefaultContributorRole(ctxt.roles().findBuiltinRoleByAlias(DataverseRole.EDITOR)); } - // By default, themeRoot should be true - created.setThemeRoot(true); // @todo for now we are saying all dataverses are permission root created.setPermissionRoot(true); diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/DeleteDatasetVersionCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/DeleteDatasetVersionCommand.java index 5ff5b71b836..c4d53466f82 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/DeleteDatasetVersionCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/DeleteDatasetVersionCommand.java @@ -36,6 +36,7 @@ public DeleteDatasetVersionCommand(DataverseRequest aRequest, Dataset dataset) { @Override protected void executeImpl(CommandContext ctxt) throws CommandException { + ctxt.permissions().checkEditDatasetLock(doomed, getRequest(), this); // if you are deleting a dataset that only has 1 draft, we are actually destroying the dataset if (doomed.getVersions().size() == 1) { diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/FinalizeDatasetPublicationCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/FinalizeDatasetPublicationCommand.java index acc07284404..faa7d3885f9 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/FinalizeDatasetPublicationCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/FinalizeDatasetPublicationCommand.java @@ -23,13 +23,10 @@ import edu.harvard.iq.dataverse.privateurl.PrivateUrl; import edu.harvard.iq.dataverse.settings.SettingsServiceBean; import edu.harvard.iq.dataverse.util.BundleUtil; -import edu.harvard.iq.dataverse.workflow.Workflow; import edu.harvard.iq.dataverse.workflow.WorkflowContext.TriggerType; import java.io.IOException; import java.sql.Timestamp; import java.util.Date; -import java.util.Optional; -import java.util.ResourceBundle; import java.util.logging.Level; import java.util.logging.Logger; @@ -100,16 +97,25 @@ public Dataset execute(CommandContext ctxt) throws CommandException { } theDataset.getEditVersion().setVersionState(DatasetVersion.VersionState.RELEASED); - exportMetadata(ctxt.settings()); boolean doNormalSolrDocCleanUp = true; ctxt.index().indexDataset(theDataset, doNormalSolrDocCleanUp); ctxt.solrIndex().indexPermissionsForOneDvObject(theDataset); - ctxt.engine().submit(new RemoveLockCommand(getRequest(), theDataset)); + // Remove locks + ctxt.engine().submit(new RemoveLockCommand(getRequest(), theDataset, DatasetLock.Reason.Workflow)); + if ( theDataset.isLockedFor(DatasetLock.Reason.InReview) ) { + ctxt.engine().submit( + new RemoveLockCommand(getRequest(), theDataset, DatasetLock.Reason.InReview) ); + } - ctxt.workflows().getDefaultWorkflow(TriggerType.PostPublishDataset) - .ifPresent(wf -> ctxt.workflows().start(wf, buildContext(doiProvider, TriggerType.PostPublishDataset))); + ctxt.workflows().getDefaultWorkflow(TriggerType.PostPublishDataset).ifPresent(wf -> { + try { + ctxt.workflows().start(wf, buildContext(doiProvider, TriggerType.PostPublishDataset)); + } catch (CommandException ex) { + logger.log(Level.SEVERE, "Error invoking post-publish workflow: " + ex.getMessage(), ex); + } + }); Dataset resultSet = ctxt.em().merge(theDataset); @@ -258,7 +264,7 @@ private void notifyUsersDatasetPublish(CommandContext ctxt, DvObject subject) { } /** - * Whether it's EZID or DataCiteif, if the registration is + * Whether it's EZID or DataCite, if the registration is * refused because the identifier already exists, we'll generate another one * and try to register again... but only up to some * reasonably high number of times - so that we don't diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/PublishDatasetCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/PublishDatasetCommand.java index 22732ea34f7..430a2778d80 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/PublishDatasetCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/PublishDatasetCommand.java @@ -1,14 +1,8 @@ package edu.harvard.iq.dataverse.engine.command.impl; -import edu.harvard.iq.dataverse.DataFile; import edu.harvard.iq.dataverse.Dataset; import edu.harvard.iq.dataverse.DatasetLock; -import edu.harvard.iq.dataverse.DatasetVersionUser; -import edu.harvard.iq.dataverse.DvObject; -import edu.harvard.iq.dataverse.UserNotification; -import edu.harvard.iq.dataverse.*; import edu.harvard.iq.dataverse.authorization.Permission; -import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser; import edu.harvard.iq.dataverse.engine.command.CommandContext; import edu.harvard.iq.dataverse.engine.command.DataverseRequest; import edu.harvard.iq.dataverse.engine.command.RequiredPermissions; @@ -17,14 +11,11 @@ import edu.harvard.iq.dataverse.settings.SettingsServiceBean; import edu.harvard.iq.dataverse.workflow.Workflow; import edu.harvard.iq.dataverse.workflow.WorkflowContext.TriggerType; -import edu.harvard.iq.dataverse.util.BundleUtil; -import java.io.IOException; -import java.sql.Timestamp; -import java.util.Date; import java.util.Optional; +import static java.util.stream.Collectors.joining; /** - * Kick-off a dataset publication process. The process may complete immediatly, + * Kick-off a dataset publication process. The process may complete immediately, * but may also result in a workflow being started and pending on some external * response. Either way, the process will be completed by an instance of * {@link FinalizeDatasetPublicationCommand}. @@ -64,20 +55,17 @@ public PublishDatasetResult execute(CommandContext ctxt) throws CommandException theDataset.getEditVersion().setVersionNumber(new Long(theDataset.getVersionNumber())); theDataset.getEditVersion().setMinorVersionNumber(new Long(theDataset.getMinorVersionNumber() + 1)); - } else /* major, non-first release */ { + } else { + // major, non-first release theDataset.getEditVersion().setVersionNumber(new Long(theDataset.getVersionNumber() + 1)); theDataset.getEditVersion().setMinorVersionNumber(new Long(0)); } theDataset = ctxt.em().merge(theDataset); - //Move remove lock to after merge... SEK 9/1/17 (why? -- L.A.) - ctxt.engine().submit( new RemoveLockCommand(getRequest(), theDataset)); - Optional prePubWf = ctxt.workflows().getDefaultWorkflow(TriggerType.PrePublishDataset); if ( prePubWf.isPresent() ) { // We start a workflow - ctxt.engine().submit( new AddLockCommand(getRequest(), theDataset, new DatasetLock(DatasetLock.Reason.Workflow, getRequest().getAuthenticatedUser()))); ctxt.workflows().start(prePubWf.get(), buildContext(doiProvider, TriggerType.PrePublishDataset) ); return new PublishDatasetResult(theDataset, false); @@ -100,9 +88,11 @@ private void verifyCommandArguments() throws IllegalCommandException { throw new IllegalCommandException("This dataset may not be published because its host dataverse (" + theDataset.getOwner().getAlias() + ") has not been published.", this); } - if (theDataset.isLocked() && !theDataset.getDatasetLock().getReason().equals(DatasetLock.Reason.InReview)) { - - throw new IllegalCommandException("This dataset is locked. Reason: " + theDataset.getDatasetLock().getReason().toString() + ". Please try publishing later.", this); + if ( theDataset.isLockedFor(DatasetLock.Reason.Workflow) + || theDataset.isLockedFor(DatasetLock.Reason.Ingest) ) { + throw new IllegalCommandException("This dataset is locked. Reason: " + + theDataset.getLocks().stream().map(l -> l.getReason().name()).collect( joining(",") ) + + ". Please try publishing later.", this); } if (theDataset.getLatestVersion().isReleased()) { diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/RemoveLockCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/RemoveLockCommand.java index 669e00ea9ba..b9c2f20f37c 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/RemoveLockCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/RemoveLockCommand.java @@ -1,6 +1,7 @@ package edu.harvard.iq.dataverse.engine.command.impl; import edu.harvard.iq.dataverse.Dataset; +import edu.harvard.iq.dataverse.DatasetLock; import edu.harvard.iq.dataverse.authorization.Permission; import edu.harvard.iq.dataverse.engine.command.AbstractVoidCommand; import edu.harvard.iq.dataverse.engine.command.CommandContext; @@ -16,15 +17,17 @@ public class RemoveLockCommand extends AbstractVoidCommand { private final Dataset dataset; + private final DatasetLock.Reason reason; - public RemoveLockCommand(DataverseRequest aRequest, Dataset aDataset) { + public RemoveLockCommand(DataverseRequest aRequest, Dataset aDataset, DatasetLock.Reason aReason) { super(aRequest, aDataset); dataset = aDataset; + reason = aReason; } @Override protected void executeImpl(CommandContext ctxt) throws CommandException { - ctxt.datasets().removeDatasetLock(dataset.getId()); + ctxt.datasets().removeDatasetLocks(dataset.getId(), reason); } } diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/ReturnDatasetToAuthorCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/ReturnDatasetToAuthorCommand.java index 3ee601bde30..fc5272dc406 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/ReturnDatasetToAuthorCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/ReturnDatasetToAuthorCommand.java @@ -1,6 +1,7 @@ package edu.harvard.iq.dataverse.engine.command.impl; import edu.harvard.iq.dataverse.Dataset; +import edu.harvard.iq.dataverse.DatasetLock; import edu.harvard.iq.dataverse.DatasetVersionUser; import edu.harvard.iq.dataverse.UserNotification; import edu.harvard.iq.dataverse.authorization.Permission; @@ -44,7 +45,7 @@ public Dataset execute(CommandContext ctxt) throws CommandException { throw new IllegalCommandException("You must enter a reason for returning a dataset to the author(s).", this); } */ - ctxt.engine().submit( new RemoveLockCommand(getRequest(), theDataset)); + ctxt.engine().submit( new RemoveLockCommand(getRequest(), theDataset, DatasetLock.Reason.InReview)); Dataset updatedDataset = save(ctxt); return updatedDataset; @@ -56,7 +57,15 @@ public Dataset save(CommandContext ctxt) throws CommandException { theDataset.getEditVersion().setLastUpdateTime(updateTime); // We set "in review" to false because now the ball is back in the author's court. theDataset.setModificationTime(updateTime); - theDataset.setDatasetLock(null); + // TODO: ctxt.datasets().removeDatasetLocks() doesn't work. Try RemoveLockCommand? + AuthenticatedUser authenticatedUser = null; + for (DatasetLock lock : theDataset.getLocks()) { + if (DatasetLock.Reason.InReview.equals(lock.getReason())) { + theDataset.removeLock(lock); + // TODO: Are we supposed to remove the dataset lock from the user? What's going on here? + authenticatedUser = lock.getUser(); + } + } Dataset savedDataset = ctxt.em().merge(theDataset); ctxt.em().flush(); diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetCommand.java index e5a7fde5cdb..fb3824f541c 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetCommand.java @@ -13,7 +13,6 @@ import edu.harvard.iq.dataverse.engine.command.DataverseRequest; import edu.harvard.iq.dataverse.engine.command.RequiredPermissions; import edu.harvard.iq.dataverse.engine.command.exception.CommandException; -import edu.harvard.iq.dataverse.engine.command.exception.CommandExecutionException; import edu.harvard.iq.dataverse.engine.command.exception.IllegalCommandException; import edu.harvard.iq.dataverse.settings.SettingsServiceBean; import java.sql.Timestamp; @@ -78,6 +77,7 @@ public void setValidateLenient(boolean validateLenient) { @Override public Dataset execute(CommandContext ctxt) throws CommandException { + ctxt.permissions().checkEditDatasetLock(theDataset, getRequest(), this); // first validate // @todo for now we run through an initFields method that creates empty fields for anything without a value // that way they can be checked for required diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetVersionCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetVersionCommand.java index 05bd79c275d..1d1c31315c0 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetVersionCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetVersionCommand.java @@ -36,6 +36,7 @@ public UpdateDatasetVersionCommand(DataverseRequest aRequest, DatasetVersion the public DatasetVersion execute(CommandContext ctxt) throws CommandException { Dataset ds = newVersion.getDataset(); + ctxt.permissions().checkEditDatasetLock(ds, getRequest(), this); DatasetVersion latest = ds.getLatestVersion(); if ( latest == null ) { diff --git a/src/main/java/edu/harvard/iq/dataverse/ingest/IngestMessageBean.java b/src/main/java/edu/harvard/iq/dataverse/ingest/IngestMessageBean.java index 46fa7370d3f..c9886dcab13 100644 --- a/src/main/java/edu/harvard/iq/dataverse/ingest/IngestMessageBean.java +++ b/src/main/java/edu/harvard/iq/dataverse/ingest/IngestMessageBean.java @@ -20,30 +20,23 @@ package edu.harvard.iq.dataverse.ingest; -import edu.harvard.iq.dataverse.DatasetVersion; import edu.harvard.iq.dataverse.DatasetServiceBean; import edu.harvard.iq.dataverse.DataFileServiceBean; import edu.harvard.iq.dataverse.DataFile; import edu.harvard.iq.dataverse.Dataset; -import edu.harvard.iq.dataverse.ingest.IngestServiceBean; +import edu.harvard.iq.dataverse.DatasetLock; -import java.io.File; -import java.util.ArrayList; import java.util.Iterator; -import java.util.List; import java.util.logging.Logger; import javax.ejb.ActivationConfigProperty; import javax.ejb.EJB; import javax.ejb.MessageDriven; import javax.ejb.TransactionAttribute; import javax.ejb.TransactionAttributeType; -import javax.faces.application.FacesMessage; import javax.jms.JMSException; import javax.jms.Message; import javax.jms.MessageListener; import javax.jms.ObjectMessage; -import javax.naming.Context; -import javax.naming.InitialContext; /** * @@ -135,7 +128,7 @@ public void onMessage(Message message) { if (datafile != null) { Dataset dataset = datafile.getOwner(); if (dataset != null && dataset.getId() != null) { - datasetService.removeDatasetLock(dataset.getId()); + datasetService.removeDatasetLocks(dataset.getId(), DatasetLock.Reason.Ingest); } } } diff --git a/src/main/java/edu/harvard/iq/dataverse/mydata/DataRetrieverAPI.java b/src/main/java/edu/harvard/iq/dataverse/mydata/DataRetrieverAPI.java index c369c1f52e0..efcfdbdae96 100644 --- a/src/main/java/edu/harvard/iq/dataverse/mydata/DataRetrieverAPI.java +++ b/src/main/java/edu/harvard/iq/dataverse/mydata/DataRetrieverAPI.java @@ -4,6 +4,7 @@ package edu.harvard.iq.dataverse.mydata; import edu.harvard.iq.dataverse.DataverseRoleServiceBean; +import edu.harvard.iq.dataverse.DataverseServiceBean; import edu.harvard.iq.dataverse.DataverseSession; import edu.harvard.iq.dataverse.DvObjectServiceBean; import edu.harvard.iq.dataverse.RoleAssigneeServiceBean; @@ -22,6 +23,7 @@ import edu.harvard.iq.dataverse.search.SearchException; import edu.harvard.iq.dataverse.search.SearchFields; import edu.harvard.iq.dataverse.search.SortBy; +import java.math.BigDecimal; import java.util.List; import java.util.Map; import java.util.Random; @@ -67,6 +69,8 @@ public class DataRetrieverAPI extends AbstractApiBean { SearchServiceBean searchService; @EJB AuthenticationServiceBean authenticationService; + @EJB + DataverseServiceBean dataverseService; //@EJB //MyDataQueryHelperServiceBean myDataQueryHelperServiceBean; @EJB @@ -522,6 +526,12 @@ private JsonArrayBuilder formatSolrDocs(SolrQueryResponse solrResponse, RoleTagR // ------------------------------------------- myDataCardInfo = doc.getJsonForMyData(); + if (!doc.getEntity().isInstanceofDataFile()){ + String parentAlias = dataverseService.getParentAliasString(doc); + System.out.print("parentAlias: " + parentAlias); + myDataCardInfo.add("parent_alias",parentAlias); + } + // ------------------------------------------- // (b) Add role info // ------------------------------------------- diff --git a/src/main/java/edu/harvard/iq/dataverse/passwordreset/PasswordResetPage.java b/src/main/java/edu/harvard/iq/dataverse/passwordreset/PasswordResetPage.java index 228c6741a80..a0b3ec437c2 100644 --- a/src/main/java/edu/harvard/iq/dataverse/passwordreset/PasswordResetPage.java +++ b/src/main/java/edu/harvard/iq/dataverse/passwordreset/PasswordResetPage.java @@ -13,6 +13,7 @@ import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser; import edu.harvard.iq.dataverse.settings.SettingsServiceBean; import edu.harvard.iq.dataverse.util.BundleUtil; +import edu.harvard.iq.dataverse.util.SystemConfig; import java.util.logging.Level; import java.util.logging.Logger; import javax.ejb.EJB; diff --git a/src/main/java/edu/harvard/iq/dataverse/search/SearchServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/search/SearchServiceBean.java index 660b5cc80e7..96b51ed3240 100644 --- a/src/main/java/edu/harvard/iq/dataverse/search/SearchServiceBean.java +++ b/src/main/java/edu/harvard/iq/dataverse/search/SearchServiceBean.java @@ -455,7 +455,7 @@ public SolrQueryResponse search(DataverseRequest dataverseRequest, Dataverse dat */ if (type.equals("dataverses")) { solrSearchResult.setName(name); - solrSearchResult.setHtmlUrl(baseUrl + "/dataverse/" + identifier); + solrSearchResult.setHtmlUrl(baseUrl + SystemConfig.DATAVERSE_PATH + identifier); // Do not set the ImageUrl, let the search include fragment fill in // the thumbnail, similarly to how the dataset and datafile cards // are handled. diff --git a/src/main/java/edu/harvard/iq/dataverse/settings/SettingsServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/settings/SettingsServiceBean.java index 29376f04d97..94024bf5949 100644 --- a/src/main/java/edu/harvard/iq/dataverse/settings/SettingsServiceBean.java +++ b/src/main/java/edu/harvard/iq/dataverse/settings/SettingsServiceBean.java @@ -32,6 +32,7 @@ public class SettingsServiceBean { * So there. */ public enum Key { + AllowApiTokenLookupViaApi, /** * Ordered, comma-separated list of custom fields to show above the fold * on dataset page such as "data_type,sample,pdb" diff --git a/src/main/java/edu/harvard/iq/dataverse/util/SystemConfig.java b/src/main/java/edu/harvard/iq/dataverse/util/SystemConfig.java index ee26d1e19c5..c99d99a4b05 100644 --- a/src/main/java/edu/harvard/iq/dataverse/util/SystemConfig.java +++ b/src/main/java/edu/harvard/iq/dataverse/util/SystemConfig.java @@ -42,6 +42,8 @@ public class SystemConfig { @EJB AuthenticationServiceBean authenticationService; + + public static final String DATAVERSE_PATH = "/dataverse/"; /** * A JVM option for the advertised fully qualified domain name (hostname) of diff --git a/src/main/java/edu/harvard/iq/dataverse/workflow/PendingWorkflowInvocation.java b/src/main/java/edu/harvard/iq/dataverse/workflow/PendingWorkflowInvocation.java index c335436f5b7..b2f4171a190 100644 --- a/src/main/java/edu/harvard/iq/dataverse/workflow/PendingWorkflowInvocation.java +++ b/src/main/java/edu/harvard/iq/dataverse/workflow/PendingWorkflowInvocation.java @@ -20,7 +20,7 @@ /** * A workflow whose current step waits for an external system to complete a - * (probably lengthy) process. Meanwhile, it sits in the database, pending. + * (probably lengthy) process. Meanwhile, it sits in the database, pending away. * * @author michael */ @@ -38,6 +38,7 @@ public class PendingWorkflowInvocation implements Serializable { @OneToOne Dataset dataset; + long nextVersionNumber; long nextMinorVersionNumber; @@ -165,5 +166,4 @@ public int getTypeOrdinal() { public void setTypeOrdinal(int typeOrdinal) { this.typeOrdinal = typeOrdinal; } - } diff --git a/src/main/java/edu/harvard/iq/dataverse/workflow/WorkflowContext.java b/src/main/java/edu/harvard/iq/dataverse/workflow/WorkflowContext.java index 09129a6d796..0cca2bd64a9 100644 --- a/src/main/java/edu/harvard/iq/dataverse/workflow/WorkflowContext.java +++ b/src/main/java/edu/harvard/iq/dataverse/workflow/WorkflowContext.java @@ -6,8 +6,8 @@ import java.util.UUID; /** - * The context in which the workflow is performed. Contains information steps might - * need, such as the dataset being worked on an version data. + * The context in which a workflow is performed. Contains information steps might + * need, such as the dataset being worked on and version data. * * Design-wise, this class allows us to add parameters to {@link WorkflowStep} without * changing its method signatures, which would break break client code. @@ -29,7 +29,16 @@ public enum TriggerType { private String invocationId = UUID.randomUUID().toString(); - public WorkflowContext(DataverseRequest request, Dataset dataset, long nextVersionNumber, long nextMinorVersionNumber, TriggerType type, String doiProvider) { + public WorkflowContext( DataverseRequest aRequest, Dataset aDataset, String doiProvider, TriggerType aTriggerType ) { + this( aRequest, aDataset, + aDataset.getLatestVersion().getVersionNumber(), + aDataset.getLatestVersion().getMinorVersionNumber(), + aTriggerType, + doiProvider); + } + + public WorkflowContext(DataverseRequest request, Dataset dataset, long nextVersionNumber, + long nextMinorVersionNumber, TriggerType type, String doiProvider) { this.request = request; this.dataset = dataset; this.nextVersionNumber = nextVersionNumber; @@ -74,5 +83,4 @@ public TriggerType getType() { return type; } - } diff --git a/src/main/java/edu/harvard/iq/dataverse/workflow/WorkflowServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/workflow/WorkflowServiceBean.java index 3791e9f3851..4b581883274 100644 --- a/src/main/java/edu/harvard/iq/dataverse/workflow/WorkflowServiceBean.java +++ b/src/main/java/edu/harvard/iq/dataverse/workflow/WorkflowServiceBean.java @@ -1,5 +1,7 @@ package edu.harvard.iq.dataverse.workflow; +import edu.harvard.iq.dataverse.DatasetLock; +import edu.harvard.iq.dataverse.DatasetServiceBean; import edu.harvard.iq.dataverse.EjbDataverseEngine; import edu.harvard.iq.dataverse.RoleAssigneeServiceBean; import edu.harvard.iq.dataverse.engine.command.exception.CommandException; @@ -17,15 +19,16 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.ServiceConfigurationError; -import java.util.ServiceLoader; import java.util.logging.Level; import java.util.logging.Logger; import javax.ejb.Asynchronous; import javax.ejb.EJB; import javax.ejb.Stateless; +import javax.ejb.TransactionAttribute; +import javax.ejb.TransactionAttributeType; import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; +import javax.persistence.Query; /** * Service bean for managing and executing {@link Workflow}s @@ -38,8 +41,11 @@ public class WorkflowServiceBean { private static final Logger logger = Logger.getLogger(WorkflowServiceBean.class.getName()); private static final String WORKFLOW_ID_KEY = "WorkflowServiceBean.WorkflowId:"; - @PersistenceContext + @PersistenceContext(unitName = "VDCNet-ejbPU") EntityManager em; + + @EJB + DatasetServiceBean datasets; @EJB SettingsServiceBean settings; @@ -76,9 +82,13 @@ public WorkflowServiceBean() { * * @param wf the workflow to execute. * @param ctxt the context in which the workflow is executed. + * @throws CommandException If the dataset could not be locked. */ - public void start(Workflow wf, WorkflowContext ctxt) { - forward(wf, ctxt, 0); + @Asynchronous + public void start(Workflow wf, WorkflowContext ctxt) throws CommandException { + ctxt = refresh(ctxt); + lockDataset(ctxt); + forward(wf, ctxt); } /** @@ -92,37 +102,22 @@ public void start(Workflow wf, WorkflowContext ctxt) { * #doResume(edu.harvard.iq.dataverse.workflow.PendingWorkflowInvocation, * java.lang.String) */ + @Asynchronous public void resume(PendingWorkflowInvocation pending, String body) { em.remove(em.merge(pending)); doResume(pending, body); } + @Asynchronous - private void forward(Workflow wf, WorkflowContext ctxt, int idx) { - WorkflowStepData wsd = wf.getSteps().get(idx); - WorkflowStep step = createStep(wsd); - WorkflowStepResult res = step.run(ctxt); - - if (res == WorkflowStepResult.OK) { - if (idx == wf.getSteps().size() - 1) { - workflowCompleted(wf, ctxt); - } else { - forward(wf, ctxt, ++idx); - } - - } else if (res instanceof Failure) { - logger.log(Level.WARNING, "Workflow {0} failed: {1}", new Object[]{ctxt.getInvocationId(), ((Failure) res).getReason()}); - rollback(wf, ctxt, (Failure) res, idx - 1); - - } else if (res instanceof Pending) { - pauseAndAwait(wf, ctxt, (Pending) res, idx); - } + private void forward(Workflow wf, WorkflowContext ctxt) { + executeSteps(wf, ctxt, 0); } - @Asynchronous private void doResume(PendingWorkflowInvocation pending, String body) { Workflow wf = pending.getWorkflow(); List stepsLeft = wf.getSteps().subList(pending.getPendingStepIdx(), wf.getSteps().size()); + WorkflowStep pendingStep = createStep(stepsLeft.get(0)); final WorkflowContext ctxt = pending.reCreateContext(roleAssignees); @@ -132,52 +127,129 @@ private void doResume(PendingWorkflowInvocation pending, String body) { } else if (res instanceof Pending) { pauseAndAwait(wf, ctxt, (Pending) res, pending.getPendingStepIdx()); } else { - forward(wf, ctxt, pending.getPendingStepIdx() + 1); + executeSteps(wf, ctxt, pending.getPendingStepIdx() + 1); } } @Asynchronous - private void rollback(Workflow wf, WorkflowContext ctxt, Failure failure, int idx) { - WorkflowStepData wsd = wf.getSteps().get(idx); - logger.log(Level.INFO, "{0} rollback of step {1}", new Object[]{ctxt.getInvocationId(), idx}); - try { - createStep(wsd).rollback(ctxt, failure); - } finally { - if (idx > 0) { - rollback(wf, ctxt, failure, --idx); - } else { - unlockDataset(ctxt); + private void rollback(Workflow wf, WorkflowContext ctxt, Failure failure, int lastCompletedStepIdx) { + ctxt = refresh(ctxt); + final List steps = wf.getSteps(); + + for ( int stepIdx = lastCompletedStepIdx; stepIdx >= 0; --stepIdx ) { + WorkflowStepData wsd = steps.get(stepIdx); + WorkflowStep step = createStep(wsd); + + try { + logger.log(Level.INFO, "Workflow {0} step {1}: Rollback", new Object[]{ctxt.getInvocationId(), stepIdx}); + rollbackStep(step, ctxt, failure); + + } catch (Exception e) { + logger.log(Level.WARNING, "Workflow " + ctxt.getInvocationId() + + " step " + stepIdx + ": Rollback error: " + e.getMessage(), e); } + + } + + logger.log( Level.INFO, "Removing workflow lock"); + try { + engine.submit( new RemoveLockCommand(ctxt.getRequest(), ctxt.getDataset(), DatasetLock.Reason.Workflow) ); + + // Corner case - delete locks generated within this same transaction. + Query deleteQuery = em.createQuery("DELETE from DatasetLock l WHERE l.dataset.id=:id AND l.reason=:reason"); + deleteQuery.setParameter("id", ctxt.getDataset().getId() ); + deleteQuery.setParameter("reason", DatasetLock.Reason.Workflow ); + deleteQuery.executeUpdate(); + + } catch (CommandException ex) { + logger.log(Level.SEVERE, "Error restoring dataset locks state after rollback: " + ex.getMessage(), ex); } } /** - * Unlocks the dataset after the workflow is over. - * @param ctxt + * Execute the passed workflow, starting from {@code initialStepIdx}. + * @param wf The workflow to run. + * @param ctxt Execution context to run the workflow in. + * @param initialStepIdx 0-based index of the first step to run. */ - @Asynchronous - private void unlockDataset( WorkflowContext ctxt ) { - try { - engine.submit( new RemoveLockCommand(ctxt.getRequest(), ctxt.getDataset()) ); - } catch (CommandException ex) { - logger.log(Level.SEVERE, "Cannot unlock dataset after rollback: " + ex.getMessage(), ex); + private void executeSteps(Workflow wf, WorkflowContext ctxt, int initialStepIdx ) { + final List steps = wf.getSteps(); + + for ( int stepIdx = initialStepIdx; stepIdx < steps.size(); stepIdx++ ) { + WorkflowStepData wsd = steps.get(stepIdx); + WorkflowStep step = createStep(wsd); + WorkflowStepResult res = runStep(step, ctxt); + + try { + if (res == WorkflowStepResult.OK) { + logger.log(Level.INFO, "Workflow {0} step {1}: OK", new Object[]{ctxt.getInvocationId(), stepIdx}); + + } else if (res instanceof Failure) { + logger.log(Level.WARNING, "Workflow {0} failed: {1}", new Object[]{ctxt.getInvocationId(), ((Failure) res).getReason()}); + rollback(wf, ctxt, (Failure) res, stepIdx-1 ); + return; + + } else if (res instanceof Pending) { + pauseAndAwait(wf, ctxt, (Pending) res, stepIdx); + return; + } + + } catch ( Exception e ) { + logger.log(Level.WARNING, "Workflow {0} step {1}: Uncought exception:", new Object[]{ctxt.getInvocationId(), e.getMessage()}); + logger.log(Level.WARNING, "Trace:", e); + rollback(wf, ctxt, (Failure) res, stepIdx-1 ); + return; + } } + + workflowCompleted(wf, ctxt); + + } + + ////////////////////////////////////////////////////////////// + // Internal methods to run each step in its own transaction. + // + + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + WorkflowStepResult runStep( WorkflowStep step, WorkflowContext ctxt ) { + return step.run(ctxt); + } + + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + WorkflowStepResult resumeStep( WorkflowStep step, WorkflowContext ctxt, Map localData, String externalData ) { + return step.resume(ctxt, localData, externalData); + } + + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + void rollbackStep( WorkflowStep step, WorkflowContext ctxt, Failure reason ) { + step.rollback(ctxt, reason); + } + + @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) + void lockDataset( WorkflowContext ctxt ) throws CommandException { + final DatasetLock datasetLock = new DatasetLock(DatasetLock.Reason.Workflow, ctxt.getRequest().getAuthenticatedUser()); +// engine.submit(new AddLockCommand(ctxt.getRequest(), ctxt.getDataset(), datasetLock)); + datasetLock.setDataset(ctxt.getDataset()); + em.persist(datasetLock); + em.flush(); } + // + // + ////////////////////////////////////////////////////////////// + private void pauseAndAwait(Workflow wf, WorkflowContext ctxt, Pending pendingRes, int idx) { PendingWorkflowInvocation pending = new PendingWorkflowInvocation(wf, ctxt, pendingRes); pending.setPendingStepIdx(idx); em.persist(pending); } - @Asynchronous private void workflowCompleted(Workflow wf, WorkflowContext ctxt) { logger.log(Level.INFO, "Workflow {0} completed.", ctxt.getInvocationId()); if ( ctxt.getType() == TriggerType.PrePublishDataset ) { try { engine.submit( new FinalizeDatasetPublicationCommand(ctxt.getDataset(), ctxt.getDoiProvider(), ctxt.getRequest()) ); - unlockDataset(ctxt); - + } catch (CommandException ex) { logger.log(Level.SEVERE, "Exception finalizing workflow " + ctxt.getInvocationId() +": " + ex.getMessage(), ex); rollback(wf, ctxt, new Failure("Exception while finalizing the publication: " + ex.getMessage()), wf.steps.size()-1); @@ -273,5 +345,11 @@ private WorkflowStep createStep(WorkflowStepData wsd) { } return provider.getStep(wsd.getStepType(), wsd.getStepParameters()); } + + private WorkflowContext refresh( WorkflowContext ctxt ) { + return new WorkflowContext( ctxt.getRequest(), + datasets.find( ctxt.getDataset().getId() ), ctxt.getNextVersionNumber(), + ctxt.getNextMinorVersionNumber(), ctxt.getType(), ctxt.getDoiProvider() ); + } } diff --git a/src/main/java/edu/harvard/iq/dataverse/workflow/internalspi/HttpSendReceiveClientStep.java b/src/main/java/edu/harvard/iq/dataverse/workflow/internalspi/HttpSendReceiveClientStep.java index 8d882de5303..3bbd294ee72 100644 --- a/src/main/java/edu/harvard/iq/dataverse/workflow/internalspi/HttpSendReceiveClientStep.java +++ b/src/main/java/edu/harvard/iq/dataverse/workflow/internalspi/HttpSendReceiveClientStep.java @@ -54,7 +54,7 @@ public WorkflowStepResult run(WorkflowContext context) { } } catch (Exception ex) { - logger.log(Level.SEVERE, "Error communicating with remote server: " + ex.getMessage(), ex ); + logger.log(Level.SEVERE, "Error communicating with remote server: " + ex.getMessage(), ex); return new Failure("Error executing request: " + ex.getLocalizedMessage(), "Cannot communicate with remote server."); } } @@ -66,6 +66,7 @@ public WorkflowStepResult resume(WorkflowContext context, Map in if ( pat.matcher(response).matches() ) { return OK; } else { + logger.log(Level.WARNING, "Remote system returned a bad reposonse: {0}", externalData); return new Failure("Response from remote server did not match expected one (response:" + response + ")"); } } diff --git a/src/main/resources/META-INF/persistence.xml b/src/main/resources/META-INF/persistence.xml index 9303aa98ea4..8b4e33858ac 100644 --- a/src/main/resources/META-INF/persistence.xml +++ b/src/main/resources/META-INF/persistence.xml @@ -15,8 +15,9 @@ - + + -
-

- -

-
+ or !empty termsOfUseAndAccess.conditions or !empty termsOfUseAndAccess.disclaimer)}"> --> + +
-
-
+ +
- - - + + + @@ -303,17 +300,13 @@ - -
-

- -

-
+ or !empty termsOfUseAndAccess.studyCompletion)}"> --> + +
-
- - + + diff --git a/src/main/webapp/dataset.xhtml b/src/main/webapp/dataset.xhtml index 78955d3ddae..b425fbc8de1 100755 --- a/src/main/webapp/dataset.xhtml +++ b/src/main/webapp/dataset.xhtml @@ -136,7 +136,7 @@ -