Skip to content

Commit

Permalink
Merge pull request #2817 from antgonza/py3.6-pet
Browse files Browse the repository at this point in the history
python3.6 qiita_pet
  • Loading branch information
charles-cowart authored Feb 22, 2019
2 parents c9f7338 + 64f27d5 commit 57fc48f
Show file tree
Hide file tree
Showing 28 changed files with 182 additions and 157 deletions.
2 changes: 1 addition & 1 deletion qiita_db/metadata_template/test/test_prep_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -1204,7 +1204,7 @@ def test_qiime_map_fp(self):
pt = qdb.metadata_template.prep_template.PrepTemplate(1)
exp = join(qdb.util.get_mountpoint('templates')[0][1],
'1_prep_1_qiime_[0-9]*-[0-9]*.txt')
self.assertRegexpMatches(pt.qiime_map_fp, exp)
self.assertRegex(pt.qiime_map_fp, exp)

def test_check_restrictions(self):
obs = self.tester.check_restrictions(
Expand Down
6 changes: 4 additions & 2 deletions qiita_db/metadata_template/test/test_sample_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -1349,7 +1349,9 @@ def test_update_numpy(self):
['%s.Sample1' % self.new_study.id, {
'bool_col': 'false', 'date_col': '2015-09-01 00:00:00'}],
['qiita_sample_column_names', {
'columns': ['bool_col', 'date_col']}]]
'columns': sorted(['bool_col', 'date_col'])}]]
# making sure they are always in the same order
obs[2][1]['columns'] = sorted(obs[2][1]['columns'])
self.assertEqual(sorted(obs), sorted(exp))

def test_generate_files(self):
Expand Down Expand Up @@ -2232,7 +2234,7 @@ def test_delete_column_specimen_id(self):
self.metadata, self.new_study)
self.new_study.specimen_id_column = 'latitude'

with self.assertRaisesRegexp(
with self.assertRaisesRegex(
qdb.exceptions.QiitaDBOperationNotPermittedError,
'"latitude" cannot be deleted, this column is currently '
r'selected as the tube identifier \(specimen_id_column\)'):
Expand Down
2 changes: 1 addition & 1 deletion qiita_db/portal.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def _check_studies(self, studies):
sql = "SELECT study_id FROM qiita.study WHERE study_id IN %s"
qdb.sql_connection.TRN.add(sql, [tuple(studies)])
existing = qdb.sql_connection.TRN.execute_fetchflatten()
if len(existing) != len(studies):
if len(existing) != len(list(studies)):
bad = map(str, set(studies).difference(existing))
raise qdb.exceptions.QiitaDBError(
"The following studies do not exist: %s" % ", ".join(bad))
Expand Down
3 changes: 2 additions & 1 deletion qiita_db/study.py
Original file line number Diff line number Diff line change
Expand Up @@ -1018,7 +1018,8 @@ def tags(self):
sql = """SELECT study_tag
FROM qiita.study_tags
LEFT JOIN qiita.per_study_tags USING (study_tag)
WHERE study_id = {0}""".format(self._id)
WHERE study_id = {0}
ORDER BY study_tag""".format(self._id)
qdb.sql_connection.TRN.add(sql)
return [t[0] for t in qdb.sql_connection.TRN.execute_fetchindex()]

Expand Down
8 changes: 4 additions & 4 deletions qiita_db/test/test_processing_job.py
Original file line number Diff line number Diff line change
Expand Up @@ -1007,10 +1007,10 @@ class ProcessingJobDuplicated(TestCase):
def test_create_duplicated(self):
job = _create_job()
job._set_status('success')
with self.assertRaisesRegexp(ValueError, 'Cannot create job because '
'the parameters are the same as jobs '
'that are queued, running or already '
'have succeeded:') as context:
with self.assertRaisesRegex(ValueError, 'Cannot create job because '
'the parameters are the same as jobs '
'that are queued, running or already '
'have succeeded:') as context:
_create_job(False)

# If it failed it's because we have jobs in non finished status so
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ def test_post_create_analysis_handler(self):
args = {'name': 'New Test Analysis',
'description': 'Test Analysis Description'}
response = self.post('/analysis/create/', args)
self.assertRegexpMatches(
self.assertRegex(
response.effective_url,
r"http://localhost:\d+/analysis/description/\d+/")
r"http://127.0.0.1:\d+/analysis/description/\d+/")
self.assertEqual(response.code, 200)

# The new analysis id is located at the -2 position (see regex above)
Expand Down
2 changes: 1 addition & 1 deletion qiita_pet/handlers/api_proxy/prep_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ def prep_template_post_req(study_id, user_id, prep_template, data_type,
# join all the warning messages into one. Note that this info
# will be ignored if an exception is raised
if warns:
msg = '\n'.join(set(str(w) for w in warns))
msg = '\n'.join(set(str(w.message) for w in warns))
status = 'warning'
except Exception as e:
# Some error occurred while processing the prep template
Expand Down
18 changes: 9 additions & 9 deletions qiita_pet/handlers/api_proxy/tests/test_artifact.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,15 +207,15 @@ def test_artifact_patch_request_errors(self):
def test_artifact_get_prep_req(self):
obs = artifact_get_prep_req('test@foo.bar', [4])
exp = {'status': 'success', 'msg': '', 'data': {
4: ['1.SKB2.640194', '1.SKM4.640180', '1.SKB3.640195',
'1.SKB6.640176', '1.SKD6.640190', '1.SKM6.640187',
'1.SKD9.640182', '1.SKM8.640201', '1.SKM2.640199',
'1.SKD2.640178', '1.SKB7.640196', '1.SKD4.640185',
'1.SKB8.640193', '1.SKM3.640197', '1.SKD5.640186',
'1.SKB1.640202', '1.SKM1.640183', '1.SKD1.640179',
'1.SKD3.640198', '1.SKB5.640181', '1.SKB4.640189',
'1.SKB9.640200', '1.SKM9.640192', '1.SKD8.640184',
'1.SKM5.640177', '1.SKM7.640188', '1.SKD7.640191']}}
4: ['1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195',
'1.SKB4.640189', '1.SKB5.640181', '1.SKB6.640176',
'1.SKB7.640196', '1.SKB8.640193', '1.SKB9.640200',
'1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',
'1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190',
'1.SKD7.640191', '1.SKD8.640184', '1.SKD9.640182',
'1.SKM1.640183', '1.SKM2.640199', '1.SKM3.640197',
'1.SKM4.640180', '1.SKM5.640177', '1.SKM6.640187',
'1.SKM7.640188', '1.SKM8.640201', '1.SKM9.640192']}}
self.assertEqual(obs, exp)

obs = artifact_get_prep_req('demo@microbio.me', [4])
Expand Down
35 changes: 17 additions & 18 deletions qiita_pet/handlers/api_proxy/tests/test_prep_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def test_prep_template_get_req(self):
list(obs.keys()), ['status', 'message', 'template'])
self.assertEqual(obs['status'], 'success')
self.assertEqual(obs['message'], '')
self.assertEqual(obs['template'].keys(), [
self.assertCountEqual(obs['template'].keys(), [
'1.SKB2.640194', '1.SKM4.640180', '1.SKB3.640195', '1.SKB6.640176',
'1.SKD6.640190', '1.SKM6.640187', '1.SKD9.640182', '1.SKM8.640201',
'1.SKM2.640199', '1.SKD2.640178', '1.SKB7.640196', '1.SKD4.640185',
Expand Down Expand Up @@ -404,8 +404,8 @@ def test_prep_template_jobs_get_req(self):
self._wait_for_parallel_job('prep_template_%s' % pt.id)
obs = prep_template_jobs_get_req(pt.id, 'test@foo.bar')
self.assertEqual(len(obs), 1)
self.assertEqual(obs.values(),
[{'error': '', 'status': 'success', 'step': None}])
self.assertCountEqual(
obs.values(), [{'error': '', 'status': 'success', 'step': None}])

obs = prep_template_jobs_get_req(pt.id, 'demo@microbio.me')
exp = {'status': 'error',
Expand Down Expand Up @@ -435,21 +435,20 @@ def test_prep_template_post_req(self):
'16S', name=" ")
exp = {'status': 'warning',
'message': [
('Some columns required to generate a QIIME-compliant '
'mapping file are not present in the template. A '
'placeholder value (XXQIITAXX) has been used to populate '
'these columns. Missing columns: BarcodeSequence, '
'LinkerPrimerSequence'),
('Some functionality will be disabled due to missing '
'columns:'),
('\tDemultiplexing with multiple input files disabled.: '
'barcode, primer, run_prefix;'),
'\tDemultiplexing disabled.: barcode;',
('\tEBI submission disabled: center_name, '
'experiment_design_description, instrument_model, '
'library_construction_protocol, platform.'),
('See the Templates tutorial for a description of these '
'fields.')],
'Both a converter and dtype were specified for column '
'sample_name - only the converter will be used', 'Some '
'functionality will be disabled due to missing columns:',
'\tEBI submission disabled: center_name, '
'experiment_design_description, instrument_model, '
'library_construction_protocol, platform;',
'\tDemultiplexing disabled.: barcode;', '\tDemultiplexing '
'with multiple input files disabled.: barcode, primer, '
'run_prefix.', 'See the Templates tutorial for a '
'description of these fields.', 'Some columns required to '
'generate a QIIME-compliant mapping file are not present '
'in the template. A placeholder value (XXQIITAXX) '
'has been used to populate these columns. Missing columns: '
'BarcodeSequence, LinkerPrimerSequence'],
'file': 'update.txt',
'id': 'ignored in test'}

Expand Down
2 changes: 1 addition & 1 deletion qiita_pet/handlers/api_proxy/tests/test_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def test_workflow_handler_post_req(self):
'"rev_comp_mapping_barcodes": false, '
'"min_per_read_length_fraction": 0.75, "sequence_max_n": 0}')
obs = workflow_handler_post_req("test@foo.bar", 1, params)
self.assertRegexpMatches(
self.assertRegex(
obs.pop('message'), 'Cannot create job because the parameters are '
'the same as jobs that are queued, running or already have '
'succeeded:\n')
Expand Down
30 changes: 21 additions & 9 deletions qiita_pet/handlers/api_proxy/tests/test_studies.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,8 +347,11 @@ def test_study_files_get_req_multiple(self):
'remaining': ['uploaded_file.txt'], 'message': '',
'file_types': [
('raw_forward_seqs', True,
['test_2.R1.fastq.gz', 'test_1.R1.fastq.gz']),
sorted(['test_2.R1.fastq.gz', 'test_1.R1.fastq.gz'])),
('raw_reverse_seqs', False, [])]}
# making sure they are always in the same order
oft = obs['file_types'][0]
obs['file_types'][0] = (oft[0], oft[1], sorted(oft[2]))
self.assertEqual(obs, exp)

# let's add reverse
Expand All @@ -361,10 +364,15 @@ def test_study_files_get_req_multiple(self):
'shared@foo.bar', 1, pt.id, 'per_sample_FASTQ')
exp = {'status': 'success', 'num_prefixes': 2, 'artifacts': [],
'remaining': ['uploaded_file.txt'], 'message': '',
'file_types': [('raw_forward_seqs', True,
['test_2.R1.fastq.gz', 'test_1.R1.fastq.gz']),
('raw_reverse_seqs', False,
['test_2.R2.fastq.gz', 'test_1.R2.fastq.gz'])]}
'file_types': [
('raw_forward_seqs', True, sorted(
['test_2.R1.fastq.gz', 'test_1.R1.fastq.gz'])),
('raw_reverse_seqs', False, sorted(
['test_2.R2.fastq.gz', 'test_1.R2.fastq.gz']))]}
# making sure they are always in the same order
oft = obs['file_types']
obs['file_types'][0] = (oft[0][0], oft[0][1], sorted(oft[0][2]))
obs['file_types'][1] = (oft[1][0], oft[1][1], sorted(oft[1][2]))
self.assertEqual(obs, exp)

# let's an extra file that matches
Expand Down Expand Up @@ -395,11 +403,15 @@ def test_study_files_get_req_multiple(self):
'artifacts': [(1, 'Identification of the Microbiomes for '
'Cannabis Soils (1) - Raw data 1 (1)')],
'file_types': [
('raw_barcodes', True,
['test_2.R1.fastq.gz', 'test_1.R1.fastq.gz']),
('raw_forward_seqs', True,
['test_2.R2.fastq.gz', 'test_1.R2.fastq.gz']),
('raw_barcodes', True, sorted(
['test_2.R1.fastq.gz', 'test_1.R1.fastq.gz'])),
('raw_forward_seqs', True, sorted(
['test_2.R2.fastq.gz', 'test_1.R2.fastq.gz'])),
('raw_reverse_seqs', False, ['test_1.R3.fastq.gz'])]}
# making sure they are always in the same order
oft = obs['file_types']
obs['file_types'][0] = (oft[0][0], oft[0][1], sorted(oft[0][2]))
obs['file_types'][1] = (oft[1][0], oft[1][1], sorted(oft[1][2]))
self.assertEqual(obs, exp)

PREP.delete(pt.id)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,8 @@ def test_get_artifact_summary_handler(self):
summary = relpath(a.html_summary_fp[1], qiita_config.base_data_dir)
response = self.get('/artifact/html_summary/%s' % summary)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, '<b>HTML TEST - not important</b>\n')
self.assertEqual(response.body.decode('ascii'),
'<b>HTML TEST - not important</b>\n')


if __name__ == '__main__':
Expand Down
4 changes: 2 additions & 2 deletions qiita_pet/handlers/rest/study_preparation.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def post(self, study_id, *args, **kwargs):
p = PrepTemplate.create(data, study_id, data_type,
investigation_type)
except QiitaError as e:
self.fail(e.message, 406)
self.fail(str(e), 406)
return

self.write({'id': p.id})
Expand Down Expand Up @@ -80,7 +80,7 @@ def post(self, study_id, prep_id):
artifact_deets['artifact_name'],
p)
except QiitaError as e:
self.fail(e.message, 406)
self.fail(str(e), 406)
return

self.write({'id': art.id})
Expand Down
50 changes: 23 additions & 27 deletions qiita_pet/handlers/study_handlers/edit_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,7 @@ def __init__(self, study=None, **kwargs):

# Get people from the study_person table to populate the PI and
# lab_person fields
choices = [(sp.id, u"%s, %s"
% (sp.name.decode('utf-8'),
sp.affiliation.decode('utf-8')))
choices = [(sp.id, u"%s, %s" % (sp.name, sp.affiliation))
for sp in StudyPerson.iter()]
choices.insert(0, ('', ''))

Expand All @@ -83,21 +81,19 @@ def __init__(self, study=None, **kwargs):
if study:
study_info = study.info

self.study_title.data = study.title.decode('utf-8')
self.study_alias.data = study_info['study_alias'].decode('utf-8')
self.study_title.data = study.title
self.study_alias.data = study_info['study_alias']
dois = []
pids = []
for p, is_doi in study.publications:
if is_doi:
dois.append(p)
else:
pids.append(p)
self.publication_doi.data = ",".join(dois).decode('utf-8')
self.publication_pid.data = ",".join(pids).decode('utf-8')
self.study_abstract.data = study_info[
'study_abstract'].decode('utf-8')
self.study_description.data = study_info[
'study_description'].decode('utf-8')
self.publication_doi.data = ",".join(dois)
self.publication_pid.data = ",".join(pids)
self.study_abstract.data = study_info['study_abstract']
self.study_description.data = study_info['study_description']
self.principal_investigator.data = study_info[
'principal_investigator'].id
self.lab_person.data = (study_info['lab_person'].id
Expand Down Expand Up @@ -254,19 +250,20 @@ def post(self, study=None):
lab_person = None

# TODO: MIXS compliant? Always true, right?
fd = form_data.data
info = {
'lab_person_id': lab_person,
'principal_investigator_id': PI,
'metadata_complete': False,
'mixs_compliant': True,
'study_description': form_data.data['study_description'][0],
'study_alias': form_data.data['study_alias'][0],
'study_abstract': form_data.data['study_abstract'][0]}
'study_description': fd['study_description'][0].decode('utf-8'),
'study_alias': fd['study_alias'][0].decode('utf-8'),
'study_abstract': fd['study_abstract'][0].decode('utf-8')}

if 'timeseries' in form_data.data and form_data.data['timeseries']:
info['timeseries_type_id'] = form_data.data['timeseries'][0]
if 'timeseries' in fd and fd['timeseries']:
info['timeseries_type_id'] = fd['timeseries'][0].decode('utf-8')

study_title = form_data.data['study_title'][0]
study_title = fd['study_title'][0].decode('utf-8')

if the_study:
# We are under editing, so just update the values
Expand All @@ -275,35 +272,34 @@ def post(self, study=None):

msg = ('Study <a href="%s/study/description/%d">%s</a> '
'successfully updated' %
(qiita_config.portal_dir, the_study.id,
form_data.data['study_title'][0]))
(qiita_config.portal_dir, the_study.id, study_title))
else:
# create the study
# TODO: Fix this EFO once ontology stuff from emily is added
the_study = Study.create(self.current_user, study_title, info=info)

msg = ('Study <a href="%s/study/description/%d">%s</a> '
'successfully created' %
(qiita_config.portal_dir, the_study.id,
form_data.data['study_title'][0]))
(qiita_config.portal_dir, the_study.id, study_title))

# Add the environmental packages, this attribute can only be edited
# if the study is not public, otherwise this cannot be changed
if isinstance(form_data, StudyEditorExtendedForm):
the_study.environmental_packages = form_data.data[
'environmental_packages']
vals = [
eval(v).decode('utf-8') for v in fd['environmental_packages']]
the_study.environmental_packages = vals

pubs = []
dois = form_data.data['publication_doi']
dois = fd['publication_doi']
if dois and dois[0]:
# The user can provide a comma-seprated list
dois = dois[0].split(',')
dois = dois[0].decode('utf-8').split(',')
# Make sure that we strip the spaces from the pubmed ids
pubs.extend([(doi.strip(), True) for doi in dois])
pids = form_data.data['publication_pid']
pids = fd['publication_pid']
if pids and pids[0]:
# The user can provide a comma-seprated list
pids = pids[0].split(',')
pids = pids[0].decode('utf-8').split(',')
# Make sure that we strip the spaces from the pubmed ids
pubs.extend([(pid.strip(), False) for pid in pids])
the_study.publications = pubs
Expand Down
3 changes: 2 additions & 1 deletion qiita_pet/handlers/study_handlers/listing_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ def get(self):
text = self.get_argument('text')
vals = r_client.execute_command('zrangebylex', 'qiita-usernames',
u'[%s' % text, u'[%s\xff' % text)
self.write({'results': [{'id': s, 'text': s} for s in vals]})
self.write({'results': [{'id': s.decode('utf-8'),
'text': s.decode('utf-8')} for s in vals]})


class ShareStudyAJAX(BaseHandler):
Expand Down
Loading

0 comments on commit 57fc48f

Please sign in to comment.