Commit af41ae98 authored by Nikos Marinos's avatar Nikos Marinos

dockerised ftp

parent 30c158dc
......@@ -15,17 +15,18 @@ RUN cd /opt && wget http://ftp.mozilla.org/pub/firefox/releases/57.0.4/linux-x86
# RUN apt-get install -y default-jdk libstdc++6
RUN apt-get install -y default-jdk
COPY node-libxml node-libxml
WORKDIR node-libxml
RUN ["npm", "install"]
#COPY node-libxml node-libxml
#WORKDIR node-libxml
#RUN ["npm", "install"]
WORKDIR ${HOME}
COPY package.json yarn.lock ./
ENV NODE_ENV "development"
RUN npm install -g node-pre-gyp
# We do a development install because react-styleguidist is a dev dependency and we want to run tests
RUN [ "yarn", "install", "--check-files", "--frozen-lockfile" ]
RUN [ "yarn", "install" ]
# Remove cache and offline mirror
RUN [ "yarn", "cache", "clean"]
......@@ -40,6 +41,7 @@ COPY static static
COPY test test
COPY webpack webpack
COPY server server
COPY ./*.sh ./
ENV NODE_ENV ${NODE_ENV}
......
......@@ -75,11 +75,22 @@ Create the file `local-development.json` inside the `config` folder.
"editoria-typescript": "< editoria-typescript-recipe-id >"
}
},
"eutils-api-key": "< your-ncbi-eutils-api-key >",
"ftp_root": "/var/ftpdata/",
"ftp-users" : ["ftpuser1", "ftpuser2", "ftpuser3"]
"users" : [
{
"username": "ftpuser1",
"email": "john@example.com",
"password": "xxxx"
},
{
"username": "ftpuser2",
"email": "bloke@example.com",
"password": "yyyy"
}
]
}
```
......@@ -88,11 +99,7 @@ This will give your database a secret, as well as enable manuscript docx to HTML
It will accompany any NCBI eUtils calls with an API Key ("eutils-api-key") that will allow for privileged handling by the NCBI hosts. This is optional.
It will create PubSweet "dummy" user accounts ("ftp-users")
that will be the owners of the manuscripts uploaded by the FTP Bulk Upload service. They should
have the same username as the ftp users. Note that these are not the FTP credentials.
The FTP accounts are created separately on the FTP service (container). The FTP Bulk upload service
will monitor the "ftp_root" local folder for changes (file uploads).
It also will create PubSweet user accounts ("users").
Run the docker container for the database.
......
version: '3'
services:
app:
env_file:
- .env.local
ftpd_server:
volumes: # remember to replace /folder_on_disk/ with the path to where you want to store the files on the host machine
- "~/ftpdata:/home/ftpusers/"
- "~/passwd:/etc/pure-ftpd/passwd"
postgres:
ports:
- "5432:5432"
......@@ -6,13 +6,11 @@ services:
build:
context: .
dockerfile: ./Dockerfile
command: sh -c "./scripts/wait-for-it.sh postgres:5432 -s -t 40 -- npx pubsweet server"
# volumes:
# - ./:/home/xpub
command: sh -c "./scripts/wait-for-it.sh postgres:5432 -s -t 40 -- ./wrapperScript.sh"
volumes: # remember to replace /folder_on_disk/ with the path to where you want to store the files on the host machine
- "ftpdata:/home/xpub/ftpdata/"
depends_on:
# - java
- postgres
- ftpd_server
- minio
environment:
PGHOST: postgres
......@@ -60,8 +58,10 @@ services:
# ADDED_FLAGS: "--tls=2"
restart: always
volumes: # remember to replace /folder_on_disk/ with the path to where you want to store the files on the host machine
- "~/ftpdata/:/home/ftpusers/"
- "ftpdata:/home/ftpusers/"
- "~/passwd:/etc/pure-ftpd/passwd"
# env_file:
# - .env.ftp
ports:
- "21:21"
- "30000-30009:30000-30009"
......@@ -97,3 +97,4 @@ volumes:
postgres-volume:
minio_data: {}
minio_config: {}
ftpdata:
......@@ -36,7 +36,8 @@
"minio": "^7.0.1",
"moment": "^2.18.1",
"node-fetch": "^2.2.0",
"node-libxml": "file:node-libxml",
"node-libxml": "^3.2.3",
"node-pre-gyp": "^0.11.0",
"pdfjs-dist": "^2.0.489",
"prop-types": "^15.5.10",
"properties-reader": "0.0.16",
......@@ -72,7 +73,6 @@
"typeface-lora": "^0.0.54",
"typeface-open-sans": "^0.0.54",
"wax-editor-react": "^0.2.5",
"winston": "^2.4.0",
"xmldom": "^0.1.27",
"xpath": "0.0.27",
"xpub-journal": "^0.0.6",
......@@ -143,8 +143,9 @@
"start": "docker-compose up --build",
"start:production": "docker-compose -f ./docker-compose.yml -f ./docker-compose.prod.yml up --build",
"start:services": "docker-compose up postgres ftpd_server minio annotator-server",
"ftp:listener": "./wrapperScript.sh &",
"server": "yarn seed && yarn ftp:listener && pubsweet server",
"start:services:local": "docker-compose -f ./docker-compose.yml -f ./docker-compose.local.yml up postgres ftpd_server minio annotator-server",
"ftp:listener": "./runFtpMonitor.sh &",
"server": "pubsweet server",
"build": "NODE_ENV=production pubsweet build",
"rebuild:node-libxml": "./rebuild-node-libxml.sh"
},
......
#!/bin/bash
yarn seed
./runFtpMonitor.sh
#!/bin/bash
cmd="node server/bulk/api"
# Start the first process
ps aux |grep $cmd |grep -q -v grep
PROCESS_1_STATUS=$?
if [ $PROCESS_1_STATUS -ne 0 ]; then
$cmd
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start node server/bulk/api: $status"
exit $status
fi
else
echo "Process already running"
exit 1
fi
# Start the second process
#./my_second_process -D
#status=$?
#if [ $status -ne 0 ]; then
# echo "Failed to start my_second_process: $status"
# exit $status
#fi
# Naive check runs checks once a minute to see if either of the processes exited.
# This illustrates part of the heavy lifting you need to do if you want to run
# more than one service in a container. The container exits with an error
# if it detects that either of the processes has exited.
# Otherwise it loops forever, waking up every 60 seconds
while sleep 60; do
ps aux |grep "node server/bulk/api" |grep -q -v grep
PROCESS_1_STATUS=$?
# ps aux |grep my_second_process |grep -q -v grep
# PROCESS_2_STATUS=$?
# If the greps above find anything, they exit with 0 status
# If they are not both 0, then something is wrong
if [ $PROCESS_1_STATUS -ne 0 ]; then
# if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 ]; then
echo "One of the processes has already exited."
exit 1
fi
done
\ No newline at end of file
......@@ -6,7 +6,7 @@ const config = require('config')
const seed = () => {
try {
const ftpUsers = config.get('ftp-users')
const ftpUsers = config.get('users')
let counter = 0
ftpUsers.forEach(async user => {
await new User(user).save().catch(() => {
......
......@@ -35,9 +35,6 @@ We will be asked for the password (twice).
More information, along with more commands for managing users, can be found here: https://github.com/stilliard/docker-pure-ftpd
#####Important:
The FTP service currently accepts only FTP "Active" connections. It does not accept "passive" FTP connections.
### FTP user for tagging
Part of the application is the construction of a tar package for every manuscript to be send
to the taggers. We need to create an ftp account, like above, so that the packages will be made
......@@ -45,4 +42,12 @@ available to that location for the tagger to download from.
`pure-pw useradd tagger -f /etc/pure-ftpd/passwd/pureftpd.passwd -m -u ftpuser -d /home/ftpusers/tagger`
The `ftp_tagger: 'tagger'` value (currently in `development.js`) should match this FTP username.
\ No newline at end of file
The `ftp_tagger: 'tagger'` value (currently in `development.js`) should match this FTP username.
## Database user creation
The uploaded manuscripts should belong to PubSweet users.
Therefore, we need to create them. This can be accomplished by
using the user seeding feature described in the [main README file](../../../README.md).
They should have the same username as the ftp users. Note that these are not the FTP credentials.
The FTP accounts are created separately on the FTP service (container). The FTP Bulk upload service
will monitor the "ftp_root" local folder for changes (file uploads).
\ No newline at end of file
......@@ -15,12 +15,12 @@ const config = require('config')
// global.globalPath = ''
// global.globalTmpPath = ''
// global.manId = ''
// global.token = ''
global.token = ''
// Initialize watcher.
const rootPath = `${process.env.HOME}/${config.get('ftp_directory')}/`
const pubsweetServer = config.get('pubsweet-server.baseUrl')
const ftpUsers = config.get('ftp-users')
const ftpUsers = config.get('users')
const ignoreTaggerFolder = `${rootPath}${config.get('ftp_tagger')}/*`
const watcher = chokidar.watch(`${rootPath}**/*.tar.gz`, {
......@@ -34,7 +34,7 @@ const watcher = chokidar.watch(`${rootPath}**/*.tar.gz`, {
function tidyUp(filePath, tmpPath, manId, emsid, isError) {
try {
deleteManuscript(manId)
if (isError) deleteManuscript(manId)
const parentDir = path.dirname(filePath)
const fileName = path.basename(filePath)
const fileNameExt = isError ? 'ERROR.' : `loaded.mid-${emsid}.`
......
......@@ -7,5 +7,19 @@
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC"
"license": "ISC",
"dependencies": {
"child_process": "^1.0.2",
"chokidar": "^2.0.4",
"form-data": "^2.3.3",
"fs": "^0.0.1-security",
"lodash": "^4.17.11",
"mime-types": "^2.1.21",
"node-fetch": "^2.2.1",
"path": "^0.12.7",
"tmp": "^0.0.33",
"winston": "^3.1.0",
"xmldom": "^0.1.27",
"xpath": "^0.0.27"
}
}
......@@ -12,9 +12,7 @@ const admin = config.get('dbManager')
const pubsweetServer = config.get('pubsweet-server.baseUrl')
let manId
global.destination = ''
const ftpLocation = `${process.env.HOME}/${config.get(
'ftp_directory',
)}/${config.get('ftp_tagger')}`
const ftpLocation = `${process.env.FTP_DIR}/${process.env.ftp_tagger}`
const manuscriptIndex = Math.floor(Math.random() * Math.floor(9999999))
const authBearer = passport.authenticate('bearer', { session: false })
......
#!/bin/bash
# Start the first process
ps aux |grep "node server/bulk/api" |grep -q -v grep
PROCESS_1_STATUS=$?
if [ $PROCESS_1_STATUS -ne 0 ]; then
node server/bulk/api
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start node server/bulk/api: $status"
exit $status
fi
else
echo "Process already running"
exit 1
fi
# Start the second process
#./my_second_process -D
#status=$?
#if [ $status -ne 0 ]; then
# echo "Failed to start my_second_process: $status"
# exit $status
#fi
# Naive check runs checks once a minute to see if either of the processes exited.
# This illustrates part of the heavy lifting you need to do if you want to run
# more than one service in a container. The container exits with an error
# if it detects that either of the processes has exited.
# Otherwise it loops forever, waking up every 60 seconds
while sleep 60; do
ps aux |grep "node server/bulk/api" |grep -q -v grep
PROCESS_1_STATUS=$?
# ps aux |grep my_second_process |grep -q -v grep
# PROCESS_2_STATUS=$?
# If the greps above find anything, they exit with 0 status
# If they are not both 0, then something is wrong
if [ $PROCESS_1_STATUS -ne 0 ]; then
# if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 ]; then
echo "One of the processes has already exited."
exit 1
fi
done
\ No newline at end of file
#sleep 30
cmd="npx pubsweet server"
$cmd &
# Wait up to 3 minutes for server to respond, check every 20 seconds
COUNTER=0
while [ $COUNTER -lt 15 ]; do
RUNNING=$(curl --silent --connect-timeout 20 "http://localhost:3000/" | grep "/assets/")
if [ -n "$RUNNING" ] ; then
echo "xPub is running"
#echo "Creating the Users"
#node scripts/adduser.js rakeshnambiar rakeshnbr@ebi.ac.uk Password_01 false
./post_init.sh
exit 0
fi
echo "Waiting for xPub..."
sleep 20
let COUNTER=COUNTER+1
done
echo "ERROR: xPub is not running"
exit 1
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment