[Greater-commits] r3766 - branches/3.0.0-all-models/greater-pre-processing
scm-commit@wald.intevation.org
scm-commit at wald.intevation.org
Wed Aug 17 11:46:25 CEST 2011
Author: aheinecke
Date: 2011-08-17 11:46:24 +0200 (Wed, 17 Aug 2011)
New Revision: 3766
Added:
branches/3.0.0-all-models/greater-pre-processing/greater-pre-processing.py
Removed:
branches/3.0.0-all-models/greater-pre-processing/greater_pre_processing.py
Log:
Revert the rename, greater-pre-processing is going to be kept as
a standalone application.
Copied: branches/3.0.0-all-models/greater-pre-processing/greater-pre-processing.py (from rev 3765, branches/3.0.0-all-models/greater-pre-processing/greater_pre_processing.py)
Deleted: branches/3.0.0-all-models/greater-pre-processing/greater_pre_processing.py
===================================================================
--- branches/3.0.0-all-models/greater-pre-processing/greater_pre_processing.py 2011-08-12 16:46:37 UTC (rev 3765)
+++ branches/3.0.0-all-models/greater-pre-processing/greater_pre_processing.py 2011-08-17 09:46:24 UTC (rev 3766)
@@ -1,678 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2002,2003,2011 by Intevation GmbH
-# Authors:
-# Jan-Oliver Wagner <jan at intevation.de>
-# Andre Heinecke <aheinecke at intevation.de>
-#
-# This program is free software under the GPL (>=v2)
-# Read the file COPYING for details.
-
-version__ = "$Revision: 1.28 $"
-# $Source: /home/bricks/source/greater/cvs/greaterrepository/greater-pre-processing/greater-pre-processing.py,v $
-# $Id: greater-pre-processing.py,v 1.28 2005-07-18 16:22:28 frank Exp $
-
-import os, sys
-import os.path
-import subprocess
-
-import support
-
-catchment = {}
-version_str = 'greater-pre-processing 1.2.0'
-usage_str = 'greater-pre-processing [check|help|run|run-without-cleanup|clean]'
-
-def cygwin_call(command, suppress_output=False, extra_env={}, inputdata=None,
- logfile=None, **kw):
- """Run command as a subprocess in a cygwin shell on windws
- and wait until it is finished.
-
- The command should be given as a list of strings. But a single
- string is also accepted
- """
- if os.name == 'nt':
- # On windows we prepend sh.exe -c to the commandline
- if isinstance(command, str):
- cygwin_command = ["sh.exe", "-c"]
- cygwin_command.append(command)
- command = cygwin_command
- else:
- command = ["sh.exe", "-c"] + command
-
- converted_command = []
- for string in command:
- converted_command.append(string.replace('\\', '/'))
- command = converted_command
- # add ./bin to path to allow easier packaging
- extra_env["PATH"] = os.path.join(sys.path[0], "bin") + \
- ";" + os.environ["PATH"]
- extra_env["CYGWIN"] = "nodosfilewarning"
-
- if inputdata is not None:
- kw["stdin"] = subprocess.PIPE
- if logfile:
- kw["stdout"] = kw["stderr"] = open(logfile, "w")
- elif suppress_output:
- kw["stdout"] = open(os.devnull, "w")
- kw["stderr"] = open(os.devnull, "w")
- env = kw.pop("env", None)
- if extra_env:
- if env is None:
- env = os.environ.copy()
- env.update(extra_env)
- try:
- process = subprocess.Popen(command, env=env, **kw)
- process.wait()
- except:
- import traceback
- traceback.print_exc()
- return -1
- return 0
-
-def catchment_desc():
- """Check for file "catchment.desc" and if found
- check also for correct syntax and completeness.
- Else, print appropriate error messages.
- """
- try:
- catchment_desc = open('catchment.desc', 'r').readlines()
- except:
- print "Error: Could not find or open file 'catchment.desc'"
- print "This file is required for pre-processing the catchment."
- return 1
-
- print "Found 'catchment.desc':"
- for line in catchment_desc:
- line = line.rstrip()
- if len(line) == 0 or line[0] == '#': continue
- fields = line.split('=')
- catchment[fields[0]] = fields[1]
-
- for k in [ 'ID', 'NAME', 'DESCRIPTION', 'IN-VERSION', 'OUT-VERSION' ]:
- if not catchment.has_key(k):
- print "Error in catchment.desc: missing", k
- return 2
-
- for c in catchment['ID']:
- if not((c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z') or \
- (c >= '0' and c <= '9')):
- print ' Error: character "' + c + \
- '" not allowed in CATCHEMENTID (only a-zA-Z0-9)'
- return 3
-
- print ' ID =', catchment['ID']
- print ' Name =', catchment['NAME']
- print ' Description =', catchment['DESCRIPTION']
- print ' Format version src =', catchment['IN-VERSION']
- print ' Format version result =', catchment['OUT-VERSION']
-
- print "description complete."
-
-
-def do_check():
- # check for files .drn, .rna, .dsd, .cbp, .bgd and if found
- # check also for correct syntax and completeness.
- # provide appropriate error messages
- print "Running check ..."
-
- if catchment['IN-VERSION'] != '1.0':
- print ' Error: Format version ' + catchment['IN-VERSION'] + \
- ' can not be processed'
- print ' This check is for version 1.0'
- return 99
-
- # <catchmentid>.drn: digital river network
- fname = catchment['ID']+'.drn'
- try:
- catchment_drn = open(fname, 'r').readlines()
- except:
- print " Error: Could not find or open file '" + fname + "'"
- print " This file is required for pre-processing the catchment."
- return 1
- else:
- print ' Found ' + fname + ' (digital river network):'
- # coarse syntax check
- drn_count = 0
- line_count = 0
- drn_stretch_ids = []
- for line in catchment_drn:
- line_count = line_count + 1
- line = line.rstrip()
- if len(line) == 0 or line[0] == '#': continue
- fields = line.split(',')
- if len(fields) == 1:
- try:
- id = int(fields[0])
- except:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' StretchID ' + fields[0] + ' not an integer.'
- return 2
- drn_count = drn_count + 1
- drn_stretch_ids.append(id)
- if len(fields) == 2:
- try:
- float(fields[0])
- float(fields[1])
- except:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' coordinates (' + fields[0] + ',' + fields[1] + ') not a float tupel.'
- return 3
- if len(fields) > 2:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' incorrect number of fields (is ' + str(len(fields)) + ', should be 1 (StretchID) or 2 (coords)).'
- return 4
-
- print ' Total number of stretches: ' + str(drn_count)
- print ' Syntax OK'
-
- # <catchmentid>.cbp: catchment boundary polygon
- fname = catchment['ID']+'.cbp'
- try:
- catchment_cbp = open(fname, 'r').readlines()
- except:
- print "Error: Could not find or open file '" + fname + "'"
- print "This file is required for pre-processing the catchment."
- return 1
- else:
- print ' Found ' + fname + ' (catchment boundary polygon):'
- # coarse syntax check
- cbp_count = 0
- line_count = 0
- cbp_first_coord = []
- cbp_last_coord = []
- for line in catchment_cbp:
- line_count = line_count + 1
- line = line.rstrip()
- if len(line) == 0 or line[0] == '#': continue
- fields = line.split(',')
- if len(fields) == 2:
- try:
- float(fields[0])
- float(fields[1])
- except:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' coordinates (' + fields[0] + ',' + fields[1] + ') not a float tupel.'
- return 3
- cbp_count = cbp_count + 1
- if cbp_count == 1:
- cbp_first_coord = fields
- cbp_last_coord = fields
- if len(fields) != 2:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' incorrect number of fields (is ' + str(len(fields)) + ', should be or 2 (coordinate pair)).'
- return 4
-
- if cbp_first_coord != cbp_last_coord:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' first and last coordinate pair must be equal (polygon must be closed).'
- print ' ' + str(cbp_first_coord) + ' != ' + str(cbp_last_coord)
- return 5
-
- print ' Total number of boundary coords: ' + str(cbp_count)
- print ' Syntax OK'
-
- # <catchmentid>.rna: river network attributes
- fname = catchment['ID']+'.rna'
- try:
- catchment_rna = open(fname, 'r').readlines()
- except:
- print "Error: Could not find or open file '" + fname + "'"
- print "This file is required for pre-processing the catchment."
- return 1
- else:
- print ' Found ' + fname + ' (river network attributes):'
- # coarse syntax check
- rna_count = 0
- line_count = 0
- rna_stretch_ids = []
- for line in catchment_rna:
- line_count = line_count + 1
- line = line.rstrip()
- if len(line) == 0 or line[0] == '#': continue
- fields = line.split(',')
- if len(fields) == 9:
- try:
- id = int(fields[0])
- float(fields[1])
- float(fields[2])
- if fields[3] != '': float(fields[3])
- if fields[4] != '': float(fields[4])
- float(fields[5])
- if fields[6] != '': float(fields[6])
- if fields[7] != '': float(fields[7])
- except:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' incorrect data type (should be int,float,float,float,float,float,float,float,string.'
- return 3
- rna_count = rna_count + 1
- rna_stretch_ids.append(id)
- if len(fields) != 9:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' incorrect number of fields (is ' + str(len(fields)) + ', should be 9).'
- return 4
-
- print ' Total number of stretches: ' + str(rna_count)
- print ' Syntax OK'
-
- # <catchmentid>.dsd: discharge site data
- fname = catchment['ID']+'.dsd'
- try:
- catchment_dsd = open(fname, 'r').readlines()
- except:
- print "Error: Could not find or open file '" + fname + "'"
- print "This file is required for pre-processing the catchment."
- return 1
- else:
- print ' Found ' + fname + ' (discharge site data):'
- # coarse syntax check
- dsd_count = 0
- line_count = 0
- for line in catchment_dsd:
- line_count = line_count + 1
- line = line.rstrip()
- if len(line) == 0 or line[0] == '#': continue
- fields = line.split(',')
- if len(fields) == 9:
- try:
- int(fields[0])
- float(fields[1])
- float(fields[2])
- int(fields[3])
- float(fields[4])
- float(fields[5])
- int(fields[7])
- except:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' incorrect data type (should be int,float,float,int,float,float,string,int,string).'
- return 3
- dsd_count = dsd_count + 1
- if len(fields) != 9:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' incorrect number of fields (is ' + str(len(fields)) + ', should be 9).'
- return 4
- if fields[6] not in [ 'PS', 'AS', 'TF', 'AS/TF' ]:
- print ' Syntax error at line ' + str(line_count) + ':'
- print " Type is '" + fields[6] + "' but must be either 'PS', 'AS', 'TF' or 'AS/TF'."
- return 6
-
- print ' Total number of discharge sites: ' + str(dsd_count)
- print ' Syntax OK'
-
- # <catchmentid>.bgd: background data
- fname = catchment['ID']+'.bgd'
- try:
- catchment_bgd = open(fname, 'r').readlines()
- except:
- print "Error: Could not find or open file '" + fname + "'"
- print "This file is required for pre-processing the catchment."
- return 1
- else:
- print ' Found ' + fname + ' (background data):'
- # coarse syntax check
- bgd_count = 0
- line_count = 0
- for line in catchment_bgd:
- line_count = line_count + 1
- line = line.rstrip()
- if len(line) == 0 or line[0] == '#': continue
- fields = line.split(',')
- if len(fields) == 5:
- bgd_count = bgd_count + 1
- if len(fields) != 5:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' incorrect number of fields (is ' + str(len(fields)) + ', should be 5).'
- return 4
- if fields[2] not in [ '', 'point', 'polygon', 'line', 'shape', 'image', 'grid' ]:
- print ' Syntax error at line ' + str(line_count) + ':'
- print " Type is '" + fields[2] + "' but must be one of 'point', 'polygon', 'line', 'shape', 'image', 'grid' or left empty."
- return 6
- if fields[4] not in [ 'yes', 'no' ]:
- print ' Syntax error at line ' + str(line_count) + ':'
- print " Ctch_fl is '" + fields[4] + "' but must be either 'yes' or 'no'."
- return 6
-
- print ' Total number of background elements : ' + str(bgd_count)
- print ' Syntax OK'
-
- # <catchmentid>.lks: lakes
- fname = catchment['ID']+'.lks'
- try:
- catchment_lks = open(fname, 'r').readlines()
- except:
- print "Error: Could not find or open file '" + fname + "'"
- print "This file is required for pre-processing the catchment."
- return 1
- else:
- print ' Found ' + fname + ' (lakes):'
- # coarse syntax check
- lks_count = 0
- line_count = 0
- for line in catchment_lks:
- line_count = line_count + 1
- line = line.rstrip()
- if len(line) == 0 or line[0] == '#': continue
- try:
- int(line)
- except:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' entry (' + line + ') not an integer.'
- return 3
- lks_count = lks_count + 1
- print ' Total number of lakes: ' + str(lks_count)
- print ' Syntax OK'
-
- # <catchmentid>.pic: pictures
- fname = catchment['ID']+'.pic'
- try:
- catchment_pic = open(fname, 'r').readlines()
- except:
- print "Error: Could not find or open file '" + fname + "'"
- print "This file is required for pre-processing the catchment."
- return 1
- else:
- print ' Found ' + fname + ' (pictures):'
- # coarse syntax check
- pic_count = 0
- line_count = 0
- for line in catchment_pic:
- line_count = line_count + 1
- line = line.rstrip()
- if len(line) == 0 or line[0] == '#': continue
- fields = line.split(',')
- if len(fields) == 5:
- try:
- int(fields[0])
- float(fields[1])
- float(fields[2])
- except:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' incorrect data type (should be int,float,float,string,string).'
- return 3
- pic_count = pic_count + 1
- if len(fields) != 5:
- print ' Syntax error at line ' + str(line_count) + ':'
- print ' incorrect number of fields (is ' + str(len(fields)) + ', should be 5).'
- return 4
-
- print ' Total number of pictures: ' + str(pic_count)
- print ' Syntax OK'
-
- error_count = 0
- for id in rna_stretch_ids:
- if id in drn_stretch_ids:
- continue
- print 'Semantic Error: StretchID %d occurs in .rna file, but does not '\
- 'appear in .drn file' % id
- error_count += 1
-
- for id in drn_stretch_ids:
- if id in rna_stretch_ids:
- continue
- print 'Semantic Error: StretchID %d occurs in .drn file, but does not '\
- 'appear in .rna file' % id
- error_count += 1
-
- if error_count > 0:
- return 5
-
- print 'Check on syntax successfully complete'
- return 0
-
-def do_run():
- if catchment['OUT-VERSION'] != '1.0' and catchment['OUT-VERSION'] != '2.0':
- print ' Error: Format version ' + catchment['OUT-VERSION'] + \
- ' can not be created'
- print ' Supported versions are 1.0 and 2.0'
- return 99
-
- # execute the pre-processing
- if do_check() != 0:
- print 'Not executing pre-processing due to error'
- return 1
-
- print 'Running pre-processing ...'
-
- print ' Removing tmp-files and old log-file (' +catchment['ID'] + '.log ...'
- cmds = [ 'rm -f *.tmp ' + catchment['ID'] + '.log' ]
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
-
- print ' Creating discharges.shp ...'
- cmds = [ 'gawk -f ' + os.path.join(greater_pre_proc_path,
- 'awk', 'dsd2gen.awk') + ' < ' + catchment['ID'] + \
- '.dsd > discharges.gen.tmp',\
- 'gen2shp discharges points < discharges.gen.tmp']
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
-
- print ' Creating rivernet.shp ...'
- cmds = [ 'gawk -v LOG=' + catchment['ID'] + '.log -f ' + \
- os.path.join(greater_pre_proc_path, 'awk','drn2gen.awk') +' < ' + \
- catchment['ID'] + '.drn > rivernet.gen.tmp',\
- 'gen2shp rivernet lines < rivernet.gen.tmp']
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
-
- print ' Creating disch_river.shp ...'
- cmds = [ 'gawk -f ' + os.path.join(greater_pre_proc_path,
- 'awk', 'drn_dsd2gen.awk') + " " + catchment['ID'] + '.drn ' + \
- catchment['ID'] + '.dsd > disch_river.gen.tmp',\
- 'gen2shp disch_river lines < disch_river.gen.tmp']
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
-
- print ' Creating catchbound.shp ...'
- cmds = [ 'sed -e "s/D/E/g" < ' + catchment['ID'] + \
- '.cbp | gawk -f ' + os.path.join(greater_pre_proc_path,
- 'awk', 'cbp2gen.awk') + ' > catchbound.gen.tmp',\
- 'gen2shp catchbound polygons < catchbound.gen.tmp',\
- 'echo "#catchbound,Name" > catchbound.att.tmp',\
- 'echo "1,' + catchment['NAME'] + '" >> catchbound.att.tmp',\
- 'txt2dbf -d , -I11 -C100 catchbound.att.tmp catchbound.dbf',\
- ]
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
-
- print ' Creating attribute files rivclass.dbf, wwtp.dbf, disch.dbf ' \
- 'and river.dbf ...'
- cmds = [ 'LC_ALL=C gawk -f ' + os.path.join(greater_pre_proc_path,
- 'awk', 'topology.awk') +" "+ catchment['ID'] + '.drn', \
- 'LC_COLLATE=C sort -n to_id.tmp > to_id2.tmp', \
- 'LC_COLLATE=C sort -n from_id.tmp > from_id2.tmp', \
- r'''join to_id2.tmp from_id2.tmp | cut -d " " -f 2,3 | sort -n | sed -e "s/ /,/" > f_t.tmp''',
- r'''gawk --source='BEGIN { FS="," } { printf("%s,%s\n", $2, $1) }' f_t.tmp | sort -n > t_f.tmp''',
- 'echo from-tos > f_ts.tmp',
- 'gawk -f ' + greater_pre_proc_path + \
- '/awk/joinup.awk f_t.tmp >> f_ts.tmp',
- 'echo to-froms > t_fs.tmp',
- 'gawk -f ' + os.path.join(greater_pre_proc_path,
- 'awk', 'joinup.awk') + ' t_f.tmp >> t_fs.tmp',
- 'sed -e "s/$/,/" < ' + catchment['ID'] + '.dsd > ' + \
- catchment['ID'] + '.dsd.tmp',
- 'gawk -v LOG=' + catchment['ID'] + '.log -v OUTVERSION='+ \
- catchment['OUT-VERSION'] + ' -f ' + \
- os.path.join(greater_pre_proc_path,
- 'awk', 'generateAttTables.awk') + " " +\
- catchment['ID'] + '.rna ' + catchment['ID'] + '.dsd.tmp ' + \
- catchment['ID'] + '.lks f_ts.tmp t_fs.tmp'
- ]
-
- if catchment['OUT-VERSION'] == '1.0':
- cmds.append('txt2dbf -d , -I10 -I2 -I1 -I10 -I10 -I5 -R12.5 -R12.5 '
- '-R12.5 -R12.5 -R20.10 -R12.5 -R12.5 -C60 river.att '
- 'river.dbf 2>> ' + catchment['ID'] + '.log')
- elif catchment['OUT-VERSION'] == '2.0':
- # add down1,down2 to river.att
- cmds.append('"%s" %s > river2.att' % ( sys.executable,
- os.path.join(greater_pre_proc_path, 'add-downsegments.py'))
- )
- # two additional -I10 for down1, down2
- cmds.append('txt2dbf -d , -I10 -I2 -I1 -I10 -I10 -I10 -I10 -I5 -R12.5 '
- '-R12.5 -R12.5 -R12.5 -R20.10 -R12.5 -R12.5 -C60 '
- 'river2.att river.dbf 2>> ' + catchment['ID'] + '.log')
-
- cmds.append('txt2dbf -d , -I4 -I1 -I1 -R10.5 -R10.5 -R10.5 -R10.5 -R10.5 '
- '-R10.5 -R10.5 -R10.5 -R10.5 -R10.5 -R10.5 -R10.5 -R10.5 '
- '-R10.5 -R10.5 -R10.5 -R10.5 -R10.5 -v rivclass.att '
- 'rivclass.dbf 2>> ' + catchment['ID'] + '.log')
- cmds.append('txt2dbf -d , -I4 -I1 -I1 -I1 -R10.5 -R10.5 -R10.5 -R10.5 ' + \
- '-R10.5 -R10.5 -R10.5 -R10.5 -I10 -R10.5 -R10.5 -R10.5 ' + \
- '-R10.5 -R10.5 -R10.5 -R10.5 -I1 -R10.5 -R10.5 -R10.5 ' + \
- '-R10.5 -R10.5 -C40 wwtp.att wwtp.dbf 2>> ' + \
- catchment['ID'] + '.log')
- cmds.append('txt2dbf -d , -I10 -I10 -I10 -I10 -R20.10 -R20.10 -R20.10 ' + \
- '-R20.10 -R20.10 ' + \
- '-R10.5 -I10 -R10.5 -C40 disch.att ' + \
- 'disch.dbf 2>> ' + catchment['ID'] + '.log')
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
-
- if catchment['OUT-VERSION'] == '2.0':
- print ' Checking for circles in the river network topology ...'
- river_att = open('river2.att', 'r').readlines()
- table = []
- for line in river_att:
- if len(line) == 0 or line[0] == '#': continue
- attributes = line.split(',')
- table.append([ int(attributes[0]), int(attributes[1]),
- int(attributes[3]), int(attributes[4]),
- int(attributes[5]), int(attributes[6]) ])
- result = support.check_for_circle(table)
- if isinstance(result, list):
- print " Error: Circle detected:"
- print " ", result
- return 1
- elif result is None:
- print " Error: Circle detection routine failed"
- return 1
- else:
- print ' [ Check for circles only available for OUT-VERSION >= 2.0 ]'
-
- print ' Creating pics.shp ...'
- cmds = [ 'gawk -f ' + os.path.join(greater_pre_proc_path,
- 'awk', 'pic2gen.awk') + ' < ' + catchment['ID'] + '.pic > pic.gen.tmp',\
- 'gawk -f ' + os.path.join(greater_pre_proc_path,
- 'awk', 'pic2att.awk') +' < ' + catchment['ID'] + '.pic > pic.att.tmp', \
- 'gen2shp pics points < pic.gen.tmp',
- 'dbf2txt pics.dbf | sed -e "s/\t//g" > pics.txt.tmp',
- 'paste -d , pics.txt.tmp pic.att.tmp > pics.txt2.tmp',
- 'rm -f pics.dbf',
- 'txt2dbf -d , -I11 -I4 -C20 -C100 pics.txt2.tmp pics.dbf ' + \
- '2>> ' + catchment['ID'] + '.log']
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
-
- if catchment['OUT-VERSION'] == '1.0':
- print ' Creating backgroundlst.dbf ...'
- cmds = [ 'txt2dbf -v -d , -C30 -C25 -C20 -C25 -C8 ' + \
- catchment['ID'] + '.bgd backgroundlst.dbf 2>> ' + \
- catchment['ID'] + '.log']
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
-
- print ' Creating Thuban session file (' + catchment['ID'] + '.session) ...'
- cmds = [ 'cp ' + os.path.join(greater_pre_proc_path, "catchment.thuban") + ' .']
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
-
- if sys.argv[1] == 'run':
- print ' Cleaning up ...'
- cmds = [ 'rm -f *.tmp' ]
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print ' done.'
- else:
- print ' Not removing temporary files.'
-
- print 'Pre-processing successfully finished'
- return 0
-
-
-def do_clean():
- print 'Cleaning up ...'
-
- cmds = [ 'rm -f *.tmp ' + catchment['ID'] + '.log',
- 'rm -f catchbound.* discharges.* rivernet.* catchment.thuban',
- 'rm -f disch_river.* disch.att disch.dbf backgroundlst.dbf',
- 'rm -f rivclass.att rivclass.dbf',
- 'rm -f river.att river.dbf river2.att wwtp.att wwtp.dbf pics.*' ]
- for cmd in cmds:
- if cygwin_call(cmd) != 0:
- print ' Error executing ' + cmd
- return 2
- print 'done.'
-
-
-def usage():
- print usage_str
- print ' help print this text'
- print ' check test for completeness and correct syntax of source files'
- print ' run execute catchment pre-processing'
- print ' run-without-cleanup execute catchment pre-processing not removing'
- print ' temporary files'
- print ' clean remove all temporary and all results files'
-
-
-if __name__ == '__main__':
- import sys
-
- print version_str
- print
-
- if catchment_desc():
- print '\nusage:', usage_str
- sys.exit(1)
-
- print
-
- if os.environ.has_key('GREATER_PRE_PROC') == 0:
- greater_pre_proc_path = os.path.abspath(os.path.dirname(sys.argv[0]))
- if not os.path.exists(os.path.join(
- greater_pre_proc_path,'awk','joinup.awk')):
- print 'Error: environment variable GREATER_PRE_PROC not set'
- sys.exit(2)
- else:
- greater_pre_proc_path = os.environ['GREATER_PRE_PROC']
-
-
- if len(sys.argv) == 2:
- if sys.argv[1] == 'check':
- do_check()
- if sys.argv[1] == 'run':
- do_run()
- if sys.argv[1] == 'run-without-cleanup':
- do_run()
- if sys.argv[1] == 'clean':
- do_clean()
- if sys.argv[1] == 'help':
- usage()
- else:
- print 'usage:', usage_str
More information about the Greater-commits
mailing list