diff --git a/transfer_service/config/vos_ts.conf b/transfer_service/config/vos_ts.conf deleted file mode 100644 index 167bf8d77d70c12f3b79d8657c069a6f28b73df0..0000000000000000000000000000000000000000 --- a/transfer_service/config/vos_ts.conf +++ /dev/null @@ -1,112 +0,0 @@ -################### -# System services # -################### - -# Postgres -[file_catalog] -; hostname or IP address of the machine that hosts the Postgres database -host = file_catalog -; port at which the database is available, default is 5432 TCP -port = 5432 -; database name -db = vospace_testdb -; user name, default is 'postgres' -user = postgres -; password, default is 'postgres' -password = postgres - -# Redis -[job_cache] -; hostname or IP address of the machine that hosts the Redis cache system -host = job_cache -; port at which the cache service is available, default is 6379 TCP -port = 6379 -; db index representing the db that stores the scheduling queues, default is 0 -db_sched = 0 - -# Spectrum Archive -[spectrum_archive] -; hostname or IP address of the tape library frontend -host = tape-fe.ia2.inaf.it -; SSH port -port = 22 -; login user -user = root -; SSH private key file absolute path -pkey_file_path = /root/.ssh/tape_rsa -; tape pool name -tape_pool = pl_generic_rw_01 - - -############################ -# VOSpace backend settings # -############################ - -[checksum] -; suffix for files containing MD5 checksums -md5_file_suffix = -md5sum.txt -; buffer size in bytes when reading a chunk of data, default is 4096 B -file_buffer_size = 4096 B - -[file_grouper] -; minimum number of files contained by a 'leaf' directory, default is 1000 -min_num_files = 1000 -; maximum size for a 'leaf' directory, default is 100 GB -max_dir_size = 100 GB - -[scheduling] -; maximum number of jobs within a 'pending' queue, default is 32 -max_pending_jobs = 32 -; maximum number of jobs within a 'ready' queue, default is 4 -max_ready_jobs = 4 -; maximum number of jobs within a 'terminated' queue, default is 8 -max_terminated_jobs = 8 -; minimum time interval in seconds between two consecutive checks on -; job queues, default is 15 s, minimum allowed is 10 s -exec_wait_time = 15 - -[transfer] -; split data to be retrieved in blocks of a given size -block_size = 1 TB - -[cleanup] -; Physically delete from disk all nodes previously deleted by the user via ui, -; that are older than a given amount of time specified here below -days = 0 -hours = 0 -minutes = 1 -seconds = 30 - -[mail] -smtp_server = smtp2.oats.inaf.it -smtp_port = 25 -no_reply_email = noreply-vospace@inaf.it -admin_email = cristiano.urban@inaf.it - -[logging] -; log level, allowed values are: DEBUG, INFO, WARNING, ERROR, CRITICAL -; Default level is INFO -log_level = DEBUG -; format of log records -log_format = %(asctime)s - %(name)s - %(levelname)s - %(message)s -; log queue name -log_queue = vos_ts_logs -; physical path on disk where log files are stored -log_dir = /var/log/vos_ts -; log file name -log_file = vos_ts.log -; dir to store results of storage and import operations -res_dir = ${log_dir}/results - - -#################### -# Storage settings # -#################### - -[transfer_node] -; user folder -base_path = /home/{username} -; data entry point -store_path = ${base_path}/store -; data exit point -retrieve_path = ${base_path}/retrieve diff --git a/transfer_service/db_connector.py b/transfer_service/db_connector.py index a47254ce1452deee0d0d2813c4fb9d0ae8653b0f..f9ae0dec66726c3f4289035e18d2c7580e23e31a 100644 --- a/transfer_service/db_connector.py +++ b/transfer_service/db_connector.py @@ -401,6 +401,18 @@ class DbConnector(object): if not conn.closed: conn.rollback() return result[0]["user_name"] + + def getUserEmail(self, userId): + """Returns the user email address for a given user id.""" + with self.getConnection() as conn: + try: + cursor = conn.cursor(cursor_factory = RealDictCursor) + cursor.execute("SELECT e_mail FROM users WHERE user_id = %s;", (userId,)) + result = cursor.fetchall() + except Exception as e: + if not conn.closed: + conn.rollback() + return result[0]["e_mail"] ##### Storage ##### @@ -916,3 +928,26 @@ class DbConnector(object): if not conn.closed: conn.rollback() return True + + ##### Users ##### + + def insertUser(self, userId, username, email): + """Inserts users data.""" + with self.getConnection() as conn: + try: + cursor = conn.cursor(cursor_factory = RealDictCursor) + cursor.execute(""" + INSERT INTO users(user_id, + user_name, + e_mail) + VALUES (%s, %s, %s) + ON CONFLICT (user_id) + DO NOTHING; + """, + (userId, + username, + email,)) + conn.commit() + except Exception as e: + if not conn.closed: + conn.rollback()