[odb-api] 21/21: Patch for python3
Alastair McKinstry
mckinstry at moszumanska.debian.org
Wed Aug 30 06:42:58 UTC 2017
This is an automated email from the git hooks/post-receive script.
mckinstry pushed a commit to branch debian/master
in repository odb-api.
commit 5b5cf9d4f9db28b1691dbd8ea3c28926ccb3f2cd
Author: Alastair McKinstry <mckinstry at debian.org>
Date: Tue Aug 29 20:07:32 2017 +0100
Patch for python3
---
debian/patches/python3.patch | 714 +++++++++++++++++++++++++++++++++++++++++++
debian/patches/series | 1 +
2 files changed, 715 insertions(+)
diff --git a/debian/patches/python3.patch b/debian/patches/python3.patch
new file mode 100644
index 0000000..313a7be
--- /dev/null
+++ b/debian/patches/python3.patch
@@ -0,0 +1,714 @@
+Description: <short summary of the patch>
+ TODO: Put a short summary on the line above and replace this paragraph
+ with a longer explanation of this change. Complete the meta-information
+ with other relevant fields (see below for details). To make it easier, the
+ information below has been extracted from the changelog. Adjust it or drop
+ it.
+ .
+ odb-api (0.17.1-1) UNRELEASED; urgency=medium
+ .
+ * Initial release. (Closes: #873067)
+Author: Alastair McKinstry <mckinstry at debian.org>
+Bug-Debian: https://bugs.debian.org/873067
+
+---
+The information above should follow the Patch Tagging Guidelines, please
+checkout http://dep.debian.net/deps/dep3/ to learn about the format. Here
+are templates for supplementary fields that you might want to add:
+
+Origin: <vendor|upstream|other>, <url of original patch>
+Bug: <url in upstream bugtracker>
+Bug-Debian: https://bugs.debian.org/<bugnumber>
+Bug-Ubuntu: https://launchpad.net/bugs/<bugnumber>
+Forwarded: <no|not-needed|url proving that it has been forwarded>
+Reviewed-By: <name and email of someone who approved the patch>
+Last-Update: 2017-08-29
+
+--- odb-api-0.17.1.orig/odb-tools/src/migrator/pyodbdump_example.py
++++ odb-api-0.17.1/odb-tools/src/migrator/pyodbdump_example.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import pyodbdump
+ db = '/tmp/new_migrator/ECMA.conv'
+ sql = open('/tmp/new_migrator/ECMA.conv/bigger_query.sql').read()
+@@ -6,7 +7,7 @@ sql = open('/tmp/new_migrator/ECMA.conv/
+
+ def columns(db, sql):
+ for r in pyodbdump.ODBReader(db, sql):
+- print dir(r)
++ print (dir(r))
+ return r
+
+-print columns(db, sql)
++print (columns(db, sql))
+--- odb-api-0.17.1.orig/odb_api/src/api/odbql_python_example.py
++++ odb-api-0.17.1/odb_api/src/api/odbql_python_example.py
+@@ -14,7 +14,7 @@ Examples of usage of ODB API Python inte
+
+ @author Piotr Kuchta, ECMWF, August 2016
+ """
+-
++from __future__ import print_function
+ import odb
+
+ ### Example 1. Create a new ODB file.
+@@ -67,7 +67,7 @@ c.execute('select * from foo;')
+ # and print its rows using simple print
+
+ for row in c.fetchall():
+- print ",".join(str(v) for v in row)
++ print (",".join(str(v) for v in row))
+
+
+ ### Example 3. Read a file into Pandas DataFrame object.
+@@ -78,14 +78,14 @@ d = pd.DataFrame.from_records(c.fetchall
+ columns = [d[0] for d in c.description],
+ exclude = ['v at foo'])
+
+-print 'Pandas DataFrame:\n', d
++print ('Pandas DataFrame:\n', d)
+
+ ### Example 4. Create numpy array.
+ import numpy as np
+
+ c.execute('select x, y, status.f1, status.f2 from foo;')
+ a = np.array(c.fetchall())
+-print 'numpy array:\n', a
++print ('numpy array:\n', a)
+
+
+ ### Example 5. Load some data from MARS or ODB Server into Pandas DataFrame
+@@ -103,6 +103,6 @@ c.execute('select *;')
+ d = pd.DataFrame.from_records(c.fetchall(),
+ columns = [d[0] for d in c.description],
+ exclude = ['expver','class','stream'])
+-print d
++print (d)
+
+-print "That's all, folks!"
++print ("That's all, folks!")
+--- odb-api-0.17.1.orig/odb_api/src/odb_api/fwrap.py
++++ odb-api-0.17.1/odb_api/src/odb_api/fwrap.py
+@@ -1,5 +1,5 @@
+ #!/usr/bin/env python
+-
++from __future__ import print_function
+ import re
+
+ PARAM_TYPE_COLUMN = 43
+@@ -12,7 +12,7 @@ def formatParameter(typ, name):
+
+ def declarations(source_cc = 'odbql.cc'):
+ decls = [line for line in [l.strip() for l in open(source_cc).read().splitlines()]
+- if line.find('odbql_') <> -1
++ if line.find('odbql_') != -1
+ and not line.startswith('//')
+ and line.find('return') == -1
+ and line.find('virtual') == -1
+@@ -44,7 +44,7 @@ def translate_value_and_comment(value_an
+ if len(value_and_possibly_comment.split('/*')) > 1:
+ comment = value_and_possibly_comment.split('/*')[1].split('*/')[0]
+
+- if value.find('|') <> -1:
++ if value.find('|') != -1:
+ comment = value + ' ' + comment + evaluated_expression(value_and_possibly_comment)
+ l,r = [x.strip(' ()') for x in value.split('|')]
+ i, shift = [x.strip() for x in r.split('<<')]
+@@ -62,7 +62,7 @@ def generateParameter(define):
+ name, value_and_possibly_comment = define
+
+ typ = 'integer'
+- if value_and_possibly_comment.find('"') <> -1:
++ if value_and_possibly_comment.find('"') != -1:
+ typ = 'character(len=*)'
+
+ if name == 'ODBQL_TRANSIENT':
+@@ -120,7 +120,7 @@ def parseDeclaration(decl):
+ return (decl, (return_type, fun_name, params))
+
+ def translate_type_for_binding(t):
+- #print 'translate_type_for_binding:', t
++ #print ('translate_type_for_binding:', t)
+ if t == 'const char*': return 'character(kind=C_CHAR), dimension(*)'
+ if t == 'const char**': return 'character(kind=C_CHAR), dimension(*)' # TODO
+ if t == 'double': return 'real(kind=C_DOUBLE), value'
+@@ -243,7 +243,7 @@ def actual_parameter(p):
+
+ def generateWrapper(signature, comment, template):
+
+- print 'generateWrapper:', signature, comment, template
++ print ('generateWrapper:', signature, comment, template)
+ global status_handling_code
+
+ return_type, function_name, params = signature
+--- odb-api-0.17.1.orig/odb_api/src/odb_api/test.py
++++ odb-api-0.17.1/odb_api/src/odb_api/test.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import odb
+ import pyodbapi
+ import unittest
+@@ -35,7 +36,7 @@ class TestPython(unittest.TestCase):
+ md = r.columns()
+ self.assertTrue(49, len(md))
+ for c in md:
+- print c.name(), c.type(), c.missingValue()
++ print (c.name(), c.type(), c.missingValue())
+ self.assertTrue(c.name() == 'expver')
+ self.assertTrue(c.type() == 3)
+ self.assertTrue(c.missingValue() == -2147483647.0)
+@@ -50,8 +51,8 @@ class TestPython(unittest.TestCase):
+ TODO:
+ """
+ for r in odb.open(self.fn):
+- print 'elements: ', ",".join([ str(i) + ':' + str(r[i]) for i in self.bitfields])
+- print r[0:44]
++ print ('elements: ', ",".join([ str(i) + ':' + str(r[i]) for i in self.bitfields]))
++ print (r[0:44])
+ break
+
+ if __name__ == '__main__':
+--- odb-api-0.17.1.orig/odb_api/src/python/legacy_odb_api_python_examples.py
++++ odb-api-0.17.1/odb_api/src/python/legacy_odb_api_python_examples.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import odb
+
+ # See also https://software.ecmwf.int/wiki/display/ODB/Python+interface
+@@ -21,7 +22,7 @@ def sql_select(file_name = test_file):
+ from "%s"
+ order by 2 desc''' % file_name
+ for row in odb.sql(s):
+- print ', '.join(map(str, row[:]))
++ print (', '.join(map(str, row[:])))
+
+ def indexing_result_set_row(file_name = test_file):
+ """ Various ways of indexing row object."""
+@@ -29,16 +30,16 @@ def indexing_result_set_row(file_name =
+ for row in odb.sql('select * from "%s"' % file_name):
+ # Row can be indexed with a tuple containing column names
+ expver, analysis_date, analysis_time = row['expver', 'andate', 'antime']
+- print 'expver: "%s", analysis date: %d analysis time: %d' % (expver, analysis_date, analysis_time)
++ print ('expver: "%s", analysis date: %d analysis time: %d' % (expver, analysis_date, analysis_time))
+ break
+
+ for row in odb.sql('select lat,lon,varno,obsvalue from "%s"' % file_name):
+ # Row can be indexed with integers representing position of the column in the result set,
+ # starting from 0
+- print row[0], row[1], row[2], row[3]
++ print (row[0], row[1], row[2], row[3])
+ # We can also index with a tuple containing several integers;
+ # the result will be a tuple of values.
+- print row[0,1,2,3]
++ print (row[0,1,2,3])
+ break
+
+ def reading_metadata_of_result_set(file_name = test_file):
+--- odb-api-0.17.1.orig/odb_api/src/python/legacy_test_python_odb_api.py
++++ odb-api-0.17.1/odb_api/src/python/legacy_test_python_odb_api.py
+@@ -1,7 +1,8 @@
+ #!/usr/bin/env python
+
++from __future__ import print_function
+ import os
+-print '*** ', __file__, 'CWD:', os.getcwd()
++print ('*** ', __file__, 'CWD:', os.getcwd())
+
+ import odb
+ import unittest
+--- odb-api-0.17.1.orig/odb_api/src/python/odb/__init__.py
++++ odb-api-0.17.1/odb_api/src/python/odb/__init__.py
+@@ -1,2 +1,2 @@
+-from odb import *
++from .odb import *
+
+--- odb-api-0.17.1.orig/odb_api/src/python/odb/odb.py
++++ odb-api-0.17.1/odb_api/src/python/odb/odb.py
+@@ -17,7 +17,7 @@ Functions sql and open are a legacy, uns
+
+ """
+
+-from odbql import *
++from .odbql import *
+
+ # Disable SWIG based interface for now
+ '''
+--- odb-api-0.17.1.orig/odb_api/src/python/odb/odbql.py
++++ odb-api-0.17.1/odb_api/src/python/odb/odbql.py
+@@ -144,8 +144,7 @@ class fetchall_generator(object):
+ def __init__(self, cursor):
+ self.cursor = cursor
+ def __iter__(self): return self
+- def __next__(self): return self.next()
+- def next(self):
++ def __next__(self):
+ v = self.cursor.fetchone()
+ if not v:
+ raise StopIteration()
+@@ -190,7 +189,7 @@ class Cursor:
+
+ operation = self.__add_semicolon_if_needed(operation)
+ rc = odbql_prepare_v2(db, operation, -1, byref(self.stmt), byref(tail))
+- if rc <> ODBQL_OK:
++ if rc != ODBQL_OK:
+ err_msg = odbql_errmsg(db).strip()
+ #print 'execute: odbql_prepare_v2 failed with error message: "%s"' % err_msg
+ if err_msg == "syntax error":
+@@ -208,14 +207,14 @@ class Cursor:
+ self.number_of_columns = odbql_column_count(self.stmt)
+ self.names = [odbql_column_name(self.stmt, i) for i in range(self.number_of_columns)]
+ self.types = [odbql_column_type(self.stmt, i) for i in range(self.number_of_columns)]
+- self.description = map (self.__column_info, self.names, self.types)
++ self.description = list(map (self.__column_info, self.names, self.types))
+
+ def fetchall(self):
+ return fetchall_generator(self)
+
+ def __iter__(self): return self
+
+- def next(self):
++ def __next__(self):
+ r = self.fetchone()
+ if r:
+ return r
+@@ -286,12 +285,16 @@ class Cursor:
+ def __marsify(self, procname, keyword_parameters):
+
+ def marslist(l):
+- if type(l) in (types.GeneratorType, types.ListType, types.TupleType):
+- return '/'.join([str(x) for x in l])
++ try:
++ if type(l) in (types.GeneratorType, types.ListType, types.TupleType):
++ return '/'.join([str(x) for x in l])
++ except: # on python3
++ if type(l) in (types.GeneratorType, list, tuple):
++ return '/'.join([str(x) for x in l])
+ else:
+ return str(l)
+
+- r = procname + "".join ( [ ',' + k + '=' + marslist(v) for k,v in keyword_parameters.iteritems()] )
++ r = procname + "".join ( [ ',' + k + '=' + marslist(v) for k,v in list(keyword_parameters.items())] )
+ return r
+
+
+@@ -326,8 +329,7 @@ class Cursor:
+ class __new_sql_generator:
+ def __init__(self, cursor): self.cursor = cursor
+ def __iter__(self): return self
+- def __next__(self): return self.next()
+- def next(self):
++ def __next__(self):
+ v = self.cursor.fetchone()
+ if not v:
+ raise StopIteration()
+--- odb-api-0.17.1.orig/odb_api/src/python/odb269.py
++++ odb-api-0.17.1/odb_api/src/python/odb269.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import odb
+
+ conn = odb.connect('')
+@@ -16,8 +17,8 @@ c.executemany('INSERT INTO foo (x,y,v,st
+ conn.commit()
+
+ for i in range(100000):
+- print str(i) + '.'
++ print (str(i) + '.')
+ c.execute('SELECT count(*) from foo')
+ for row in c.fetchall():
+- print row[0]
++ print (row[0])
+ c.close()
+--- odb-api-0.17.1.orig/odb_api/src/python/odbless.py
++++ odb-api-0.17.1/odb_api/src/python/odbless.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import sys
+ import odb
+
+@@ -12,7 +13,7 @@ def genStatSQL(columnNames, fs = statsFu
+ return 'select %s' % select_list
+
+ def chunks(l, n):
+- for i in xrange(0, len(l), n):
++ for i in range(0, len(l), n):
+ yield l[i:i+n]
+
+ def getColumns(dataFile):
+@@ -27,15 +28,15 @@ def printStats(dataFile, fs = statsFunct
+ valueFormat = '%20s'
+ sql = genStatSQL([c[0] for c in columns], fs) + ' from ' + quote(dataFile)
+ for r in odb.sql(sql):
+- print columnFormat % 'column', "".join([valueFormat % v for v in fs])
++ print (columnFormat % 'column', "".join([valueFormat % v for v in fs]))
+ values = [str(v) for v in r[:]]
+ for (c, vs) in zip(columns, chunks(values, len(fs))):
+ #if c[1] in [3,4]: vs = ['NA' for v in vs]
+- print columnFormat % c[0], "".join([valueFormat % v for v in vs])
++ print (columnFormat % c[0], "".join([valueFormat % v for v in vs]))
+
+ if __name__ == '__main__':
+ if len(sys.argv) == 1:
+- print 'Usage:\n\t', sys.argv[0], ' <fileName>+'
++ print ('Usage:\n\t', sys.argv[0], ' <fileName>+')
+ else:
+ for fn in sys.argv[1:]:
+ printStats(fn)
+--- odb-api-0.17.1.orig/odb_api/src/python/psql.py
++++ odb-api-0.17.1/odb_api/src/python/psql.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import os, subprocess, multiprocessing
+
+ exe = "/tmp/p4/source/main/build/Debug/bin/odb"
+@@ -18,7 +19,7 @@ def mergeBlocks(blocks, maxBlockSize):
+ def getBlocks(fileName):
+ l = [exe, "header", "-offsets", fileName]
+ blocks = subprocess.check_output(l)
+- blocks = [map(int, l.split()) for l in blocks.split('\n') if l]
++ blocks = [list(map(int, l.split())) for l in blocks.split('\n') if l]
+ return blocks
+
+ def divideFile(fileName, maxBlockSize = None, numberOfProcessors = None):
+@@ -32,12 +33,12 @@ def outputFileName(offset, length, nrows
+ return 'out_' + offset + '_' + length + '.odb'
+
+ def filterPartOfFile(inputFile, fileChunk, select = '*', where = ''):
+- offset, length, nrows, ncolumns = map(str, fileChunk)
++ offset, length, nrows, ncolumns = list(map(str, fileChunk))
+ outputFile = outputFileName(offset, length, nrows)
+ sql = "select " + select + " into " + '"' + outputFile + '"'
+ if where: sql += ' where ' + where
+ l = [exe, "sql", "-i", inputFile, "-offset", offset, "-length", length, sql]
+- print 'filterBlock: running ', l
++ print ('filterBlock: running ', l)
+ subprocess.call(l)
+ return outputFile
+
+--- odb-api-0.17.1.orig/odb_api/src/python/test_python_odb_api.py
++++ odb-api-0.17.1/odb_api/src/python/test_python_odb_api.py
+@@ -1,5 +1,6 @@
+ #!/usr/bin/env python
+
++from __future__ import print_function
+ import sys
+ sys.path.append('/tmp/build/bundle/debug/odb_api/src/python/odb')
+
+@@ -126,13 +127,13 @@ class TestODBQL(unittest.TestCase):
+
+ number_of_rows = 0
+ rc = None
+- while rc <> ODBQL_DONE:
++ while rc != ODBQL_DONE:
+
+ if number_of_rows == 0 or rc == ODBQL_METADATA_CHANGED:
+ number_of_columns = odbql_column_count(stmt)
+- print 'number_of_columns=', number_of_columns
++ print ('number_of_columns=', number_of_columns)
+ for i in range(0,number_of_columns):
+- print i, odbql_column_name(stmt, i) + ':' + type_name(odbql_column_type(stmt, i))
++ print (i, odbql_column_name(stmt, i) + ':' + type_name(odbql_column_type(stmt, i)))
+ else:
+ self.assertEqual(rc, ODBQL_ROW)
+
+@@ -149,7 +150,7 @@ class TestODBQL(unittest.TestCase):
+ #ODBQL_TEXT = 3
+ return odbql_column_text(stmt, column)
+
+- print ','.join([str(value(column)) for column in range(number_of_columns)])
++ print (','.join([str(value(column)) for column in range(number_of_columns)]))
+ number_of_rows += 1
+ rc = odbql_step(stmt)
+
+@@ -161,7 +162,7 @@ class TestODBQL(unittest.TestCase):
+
+
+ def test_bitfields(self): # ODB-97
+- print "TestODBQL.test_bitfields"
++ print ("TestODBQL.test_bitfields")
+ db, stmt, tail = c_voidp(), c_voidp(), c_char_p()
+ rc = odbql_open( "", byref(db))
+ self.assertEqual(rc, ODBQL_OK)
+@@ -185,7 +186,7 @@ class TestODBQL(unittest.TestCase):
+ self.assertEqual(rc, ODBQL_OK)
+
+ expected = [4198786,4198806,4202498,4202518,4202626,4202646,4210690,4210710,4210818,4210838,4210946,4211074,4211094,4243458,4243478,4243586,4243606,4243714,4243842,4243862,4264086,4264322,4264342,4268034,4268054,4268162,4268182,4276226,4276246,4276354,4276374,4276482,4276610,4308994,4309014,4309122,4309142,4309250,4309270,4309378,4309398,8404994,8405014,8405122,8405142,8405378,8405398,8458626,8458646,8462338,8462358,8470530,8470550,8470658,8470678]
+- print 'values =', values
++ print ('values =', values)
+ self.assertEqual(values, expected)
+
+ def test_stored_procedure(self):
+@@ -201,7 +202,7 @@ class TestODBQL(unittest.TestCase):
+ rc = odbql_step(stmt)
+ self.assertEqual(rc, ODBQL_DONE)
+
+- print 'test_stored_procedure: OK!'
++ print ('test_stored_procedure: OK!')
+
+
+ class TestPEP249(unittest.TestCase):
+@@ -217,7 +218,7 @@ class TestPEP249(unittest.TestCase):
+
+ def test_insert_data(self):
+ c = self.conn.cursor()
+- print self.data
++ print (self.data)
+ #c.execute(TEST_DDL)
+ c.executemany(TEST_DDL + TEST_INSERT, self.data)
+ c.close()
+@@ -228,9 +229,9 @@ class TestPEP249(unittest.TestCase):
+
+ def test_select_data_fetchone(self):
+
+- print 'calling legacy API:'
++ print ('calling legacy API:')
+ legacy = self.read_with_legacy_api()
+- print 'legacy:', legacy
++ print ('legacy:', legacy)
+
+ c = self.conn.cursor()
+ c.execute(TEST_DDL)
+@@ -246,7 +247,7 @@ class TestPEP249(unittest.TestCase):
+ #self.assertEqual (original, legacy [number_of_rows])
+ self.assertEqual (original, row)
+
+- print row
++ print (row)
+ number_of_rows += 1
+
+ self.assertEqual ( number_of_rows, 4 )
+@@ -278,7 +279,7 @@ class TestPEP249(unittest.TestCase):
+ """
+ ODB-97 (old, Swig based API), ODB-250 (new API): Bitfield values read incorrectly.
+ """
+- print "TestPEP249.test_bitfields"
++ print ("TestPEP249.test_bitfields")
+ # !odb sql select qcflags_info_1dvar -i ATOVS.trimmed.odb | sort | uniq
+ expected = [0,1,4,5,8,9,12,13,130,131,134,135,138,139,142,143,642]
+
+@@ -295,7 +296,7 @@ class TestPEP249(unittest.TestCase):
+ c = conn.cursor()
+ c.execute("""CREATE TABLE foo AS (statid string,x integer) ON 'test_sorting_string_columns.odb';""")
+ data = [[w] for w in """12345678 abcdefgh dfgsdfgs DFADSFAD sdffffff aaaaaaaa""".split()]
+- print data
++ print (data)
+ c.executemany('INSERT INTO foo (statid,x) VALUES (?,?);', data)
+ conn.commit()
+
+@@ -317,7 +318,7 @@ class TestPEP249(unittest.TestCase):
+ vals = s.split(',')
+ return int(vals[0]), float(vals[1])
+ data = [cast(w) for w in """1,0.1 2,0.2 3,0.3 4,0.4 1,0.11""".split()]
+- print data
++ print (data)
+ c.executemany('INSERT INTO test_sql_variables (varno,obsvalue) VALUES (?,?);', data)
+ conn.commit()
+
+--- odb-api-0.17.1.orig/odb_api/src/python/to_sqlite.py
++++ odb-api-0.17.1/odb_api/src/python/to_sqlite.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import sys, os
+ import sqlite3
+
+@@ -37,17 +38,17 @@ def convert(inputFileName, outputFileNam
+ ct = createTable(tableName, columns)
+ try:
+ c.execute(ct)
+- print ct
++ print (ct)
+ except:
+ dt = 'DELETE FROM ' + tableName
+ c.execute(dt)
+- print dt
++ print (dt)
+
+ def rows():
+ for row in odb.open(inputFileName):
+ yield row[:]
+
+- print 'Copying data from', inputFileName, 'into', outputFileName
++ print ('Copying data from', inputFileName, 'into', outputFileName)
+
+ c.executemany('INSERT INTO ' + tableName + ' VALUES (' + ','.join('?' for c in columns) + ')', rows())
+ conn.commit()
+--- odb-api-0.17.1.orig/odb_api/tests/dhshome/scripts/prestage.py
++++ odb-api-0.17.1/odb_api/tests/dhshome/scripts/prestage.py
+@@ -1,10 +1,14 @@
+ #!/usr/bin/env python
+
++from __future__ import print_function
+ import sys, os, re
+-import urllib2
++try:
++ import urllib.request, urllib.error, urllib.parse
++except:
++ import urllib2
+
+ if all(os.path.exists(fn) for fn in sys.argv[1:]):
+- print sys.argv[0] +': all files', sys.argv[1:], 'exist'
++ print (sys.argv[0] +': all files', sys.argv[1:], 'exist' )
+ os._exit(0)
+
+ """
+@@ -33,7 +37,7 @@ if all(os.path.exists(fn) for fn in sys.
+ """
+ emosBackup = "/emos_backup/an/{expver}/{date}{time}/ECMA.{marsname}.tar" # marsname is group's short name
+
+-#print 'Hello from ', sys.argv[0], ". I'm not going to do anything with file(s) ", sys.argv[1:]
++#print 'Hello from ', sys.argv[0], ". I'm not going to do anything with file(s) ", sys.argv[1:])
+
+ def dhshome():
+ return os.getenv('DHSHOME') or os.getenv('TEST_DHSHOME')
+@@ -48,7 +52,7 @@ def odbPathNameSchema():
+ schema = ":{class}:{stream}:{expver}:{date}/:{time}:{type}:{groupid}/{reportype}.odb"
+ cfg = localConfig()
+ if cfg:
+- lines = [l for l in open(cfg).readlines() if l.find('odbPathNameSchema') <> -1]
++ lines = [l for l in open(cfg).readlines() if l.find('odbPathNameSchema') != -1]
+ schema = lines[0].split(':')[1].strip().strip('"')
+ return schema
+
+@@ -57,15 +61,15 @@ def decodeFileName(fileName, pathNameSch
+ r = re.sub('[{]', '(?P<', r)
+ r = re.sub('[}]', '>.*)', r)
+ r = '.*/' + r
+- print 'fileName:', fileName
+- print 'RE:', r
++ print ('fileName:', fileName)
++ print ('RE:', r)
+ d = {'class': 'od', 'type' : 'ofb', 'stream' : 'oper', 'expver' : '0001'}
+ d.update(re.search(r, fileName).groupdict())
+ return d
+
+ def encodeFileName (d, template):
+ s = template
+- for k,v in d.iteritems():
++ for k,v in list(d.items()):
+ s = re.sub('{%s}' % k, str(v), s)
+ return s
+
+@@ -78,7 +82,7 @@ def reportType2group(rt, fn = '/usr/loca
+ for line in open(fn).readlines():
+ r = [v.strip() for v in line.split(';')]
+ if r[0] == str(rt):
+- print 'reportType2group("'+ rt + '") => ', r[1]
++ print ('reportType2group("'+ rt + '") => ', r[1])
+ return r[1] # group
+
+ # http://data-portal.ecmwf.int/odbgov/csv/Group/
+@@ -90,11 +94,11 @@ def group2marsname(group, fn = '/usr/loc
+ for line in open(fn).readlines():
+ r = [v.strip() for v in line.split(';')]
+ if r[1] == str(group):
+- print 'group2marsname("'+ group + '") => ', r[0]
++ print ('group2marsname("'+ group + '") => ', r[0])
+ return r[3] # marsname
+
+ for fn in sys.argv[1:]:
+ d = decodeFileName(fn)
+ d['marsname'] = group2marsname(reportType2group(d['reportype'])).lower()
+ ecfs = encodeFileName(d, emosBackup)
+- print fn, '=> (', d, ')', ecfs
++ print (fn, '=> (', d, ')', ecfs)
+--- odb-api-0.17.1.orig/odb_tools/src/migrator/pyodbdump_example.py
++++ odb-api-0.17.1/odb_tools/src/migrator/pyodbdump_example.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import pyodbdump
+ db = '/tmp/new_migrator/ECMA.conv'
+ sql = open('/tmp/new_migrator/ECMA.conv/bigger_query.sql').read()
+@@ -6,7 +7,7 @@ sql = open('/tmp/new_migrator/ECMA.conv/
+
+ def columns(db, sql):
+ for r in pyodbdump.ODBReader(db, sql):
+- print dir(r)
++ print (dir(r))
+ return r
+
+-print columns(db, sql)
++print (columns(db, sql))
+--- odb-api-0.17.1.orig/pkgpy.py
++++ odb-api-0.17.1/pkgpy.py
+@@ -1,3 +1,4 @@
++from __future__ import print_function
+ import sys, json, re
+
+ {
+@@ -31,7 +32,7 @@ def parseCommand(cmd):
+ w = l[i]
+ if w.startswith('-'):
+ if not paramOption(w) and not flagOption(w):
+- print 'Unknown option ' + w + " in \n" + cmd['command']
++ print ('Unknown option ' + w + " in \n" + cmd['command'])
+ sys.exit(1)
+
+ param = paramOption(w)
+@@ -66,7 +67,7 @@ def fileHasMain(fn):
+ if main_re .search(line):
+ rhs = line.split('main')[1]
+ if '(' in rhs and ')' in rhs:
+- #print fn,':', line
++ #print (fn,':', line)
+ return True
+ return False
+
+@@ -81,4 +82,4 @@ for cmd in cmds:
+
+ #if not output.endswith('.o'): print output
+ if not fileHasMain(input):
+- print input
++ print (input )
+--- odb-api-0.17.1.orig/python_package/pkgpy.py
++++ odb-api-0.17.1/python_package/pkgpy.py
+@@ -1,6 +1,7 @@
+ # -*- coding: utf-8 -*-
+ # http://clang.llvm.org/docs/JSONCompilationDatabase.html
+
++from __future__ import print_function
+ import sys, json, re, os, shutil, fnmatch
+ from setuptools import setup, Extension
+
+@@ -71,7 +72,7 @@ def parseCommand(cmd):
+ w = l[i]
+ if w.startswith('-'):
+ if not paramOption(w) and not flagOption(w):
+- print 'Unknown option ' + w + " in \n" + cmd['command']
++ print ('Unknown option ' + w + " in \n" + cmd['command'])
+ sys.exit(1)
+
+ param = paramOption(w)
+@@ -106,7 +107,7 @@ def fileHasMain(fn):
+ if main_re .search(line):
+ rhs = line.split('main')[1]
+ if '(' in rhs and ')' in rhs:
+- #print fn,':', line
++ #print (fn,':', line)
+ return True
+ return False
+
+@@ -115,7 +116,7 @@ def package_source_files(files, package_
+ except: pass # assume it failed because it already exists
+ # shutil.rmtree(path[, ignore_errors[, onerror]])¶
+ for f in files:
+- print '',f
++ print ('',f)
+ shutil.copy(f, package_source_files_directory)
+
+
+@@ -129,13 +130,13 @@ cmds = compile_commands()
+ cpp_sources = [str(c['file']) for c in cmds]
+
+ python_sources = [str(os.path.join(CMAKE_BUILD_DIRECTORY, 'odb_api/src/python/odb/', f)) for f in os.listdir(os.path.join(CMAKE_BUILD_DIRECTORY,'odb_api/src/python/odb/')) if fnmatch.fnmatch(f, '*.py')]
+-print '\n'.join(python_sources)
++print ('\n'.join(python_sources))
+
+ package_source_files(python_sources, package_source_files_directory = 'odb')
+ package_source_files(cpp_sources, package_source_files_directory = 'odb_api')
+
+ packaged_cpp_sources = [os.path.join('odb_api',f) for f in os.listdir('odb_api')]
+-print packaged_cpp_sources
++print (packaged_cpp_sources)
+
+ with open('setup.py','w') as f:
+ f.write(SETUP_PY % locals())
diff --git a/debian/patches/series b/debian/patches/series
index 4d9efeb..c1dbcb3 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,2 +1,3 @@
#unique_ptr.patch
shared_lib.patch
+python3.patch
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/odb-api.git
More information about the debian-science-commits
mailing list