[Mpuls-commits] r2218 - in wasko/branches/2.0: . mpulsweb/controllers

scm-commit@wald.intevation.org scm-commit at wald.intevation.org
Thu Mar 25 20:47:40 CET 2010


Author: bh
Date: 2010-03-25 20:47:39 +0100 (Thu, 25 Mar 2010)
New Revision: 2218

Modified:
   wasko/branches/2.0/ChangeLog
   wasko/branches/2.0/mpulsweb/controllers/evaluate.py
Log:
* mpulsweb/controllers/evaluate.py: Fix formatting.


Modified: wasko/branches/2.0/ChangeLog
===================================================================
--- wasko/branches/2.0/ChangeLog	2010-03-25 19:37:16 UTC (rev 2217)
+++ wasko/branches/2.0/ChangeLog	2010-03-25 19:47:39 UTC (rev 2218)
@@ -1,5 +1,9 @@
 2010-03-25  Bernhard Herzog  <bh at intevation.de>
 
+	* mpulsweb/controllers/evaluate.py: Fix formatting.
+
+2010-03-25  Bernhard Herzog  <bh at intevation.de>
+
 	* mpulsweb/controllers/error.py: Fix formatting.
 
 2010-03-25  Bernhard Herzog  <bh at intevation.de>

Modified: wasko/branches/2.0/mpulsweb/controllers/evaluate.py
===================================================================
--- wasko/branches/2.0/mpulsweb/controllers/evaluate.py	2010-03-25 19:37:16 UTC (rev 2217)
+++ wasko/branches/2.0/mpulsweb/controllers/evaluate.py	2010-03-25 19:47:39 UTC (rev 2218)
@@ -5,17 +5,22 @@
 import pylons
 
 from pylons import session, g, c
-from pylons.i18n import _ 
+from pylons.i18n import _
+
 from libmpuls.evaluation.config import EvaluationConfig
 from libmpuls.evaluation.evaluation import EvaluationSet
-from libmpuls.evaluation.export import EvaluationExportXML, EvaluationExportCSV, EvaluationExportHTML
+from libmpuls.evaluation.export import EvaluationExportXML, \
+     EvaluationExportCSV, EvaluationExportHTML
+
 from mpulsweb.lib.base import BaseController, render, request, response
 from mpulsweb.lib.db import db
-from mpulsweb.lib.helpers import format_date, get_phasesuccessors 
+from mpulsweb.lib.helpers import format_date, get_phasesuccessors
 from mpulsweb.lib.validators import EvaluationFormValidator
 
+
 log = logging.getLogger(__name__)
 
+
 def get_configfile(id):
     for enabled_eval in g.mpuls_config.get('evaluations', 'enabled'):
         if enabled_eval.get('id') == str(id):
@@ -32,10 +37,10 @@
     enddates will have a coalesce clause.'''
     sdate, edate = None, None
     log.debug('Phase: %s.' % id)
-    pair = g.mpuls_config.get('phases', 'pairs')[0].get(id) 
+    pair = g.mpuls_config.get('phases', 'pairs')[0].get(id)
     log.debug('Getting start and end date for phase %s.' % pair)
     for d in g.mpuls_config.get('phases', 'dates'):
-        for k,v in d.iteritems():
+        for k, v in d.iteritems():
             if k == pair[0]:
                 sdate = v
             elif k == pair[1]:
@@ -48,13 +53,12 @@
     options = {}
     options['id'] = id
     if soptions:
-
         options['start_date'] = soptions.get('start_date') or \
             g.mpuls_config.get('search', 'default-start-date')
         options['end_date'] = soptions.get('end_date') or \
             g.mpuls_config.get('search', 'default-end-date')
         phase = [int(p) for p in soptions.get('phase')]
-        
+
         if len(phase) == 1 and -1 in phase:
             options['start_date_field'] = None
             options['end_date_field'] = None
@@ -62,12 +66,13 @@
             options['start_date_field'] = get_phase_dates(min(phase))[0]
             options['end_date_field'] = get_phase_dates(max(phase))[1]
         options['phase'] = max(phase) #TODO: Why max?
-        options['sqlwhere'] = soptions.get('sqlwhere') or None 
-        options['sql'] =  soptions.get('sql') or None
+        options['sqlwhere'] = soptions.get('sqlwhere') or None
+        options['sql'] = soptions.get('sql') or None
     else:
         # set default evaluation options.
-        options['phase'] = g.mpuls_config.get('evaluations', 'default-phases') 
-        options['start_date'] = g.mpuls_config.get('search', 'default-start-date')
+        options['phase'] = g.mpuls_config.get('evaluations', 'default-phases')
+        options['start_date'] = g.mpuls_config.get('search',
+                                                   'default-start-date')
         options['end_date'] = g.mpuls_config.get('search', 'default-end-date')
     options['typelist'] = c.evalconfig.get_evaluations()
 
@@ -75,13 +80,14 @@
     #for p in g.mpuls_config.get('search', 'phases'):
     #    pt = [t for t in p.values() if int(t) >= 0]
     #    phases.extend(pt)
-    #options['phase'] =  phases
+    #options['phase'] = phases
 
     # convert dates to locale
-    options['start_date'] = format_date(options['start_date']) 
-    options['end_date']   = format_date(options['end_date']) 
+    options['start_date'] = format_date(options['start_date'])
+    options['end_date'] = format_date(options['end_date'])
     return options
 
+
 class EvaluateController(BaseController):
 
     def index(self):
@@ -93,35 +99,27 @@
             try:
                 config_file = get_configfile(id)
                 conn = db.getConnection()
-                c.evalconfig   = EvaluationConfig(
-                                config_file,
-                                conn,
-                                None,
-                                None,
-                                None,
-                                None,
-                                None,
-                                None,
-                                None)
+                c.evalconfig = EvaluationConfig(config_file, conn, None, None,
+                                                None, None, None, None, None)
             except:
                 print 'Error: Evaluation failed %s' % traceback.print_exc()
                 log.error(_('Error: Evaluation failed'))
         finally:
             db.recycleConnection(conn, cur)
 
-        c.evaloptions = get_search_options(session.get('evaluation.options'), id)
+        c.evaloptions = get_search_options(session.get('evaluation.options'),
+                                           id)
 
-        # If user selects adele-evaluation render page with disabled configuration elements. 
-        # Change default params
+        # If user selects adele-evaluation render page with disabled
+        # configuration elements.  Change default params
         if id == '0':
-            c.evaloptions['phase'] = g.mpuls_config.get('evaluations', 'adele-phases') 
+            c.evaloptions['phase'] = g.mpuls_config.get('evaluations',
+                                                        'adele-phases')
             form = render('/evaluation/evaluate_adele.mako')
         else:
             form = render('/evaluation/evaluate.mako')
-        return formencode.htmlfill.render(form, \
-                defaults=c.evaloptions, \
-                errors={}, \
-                auto_insert_errors=False)
+        return formencode.htmlfill.render(form, defaults=c.evaloptions,
+                                          errors={}, auto_insert_errors=False)
 
     def _get_evalparams(self, form_result):
         params = {}
@@ -129,8 +127,8 @@
         params['start_date'] = str(form_result['start_date'])
         params['end_date'] = str(form_result['end_date'])
         params['typelist'] = form_result['typelist']
-        params['phase']    = form_result['phase']
-        params['ending']    = form_result['type_ending']
+        params['phase'] = form_result['phase']
+        params['ending'] = form_result['type_ending']
 
         # Dates
         # Build timeframes bases on selected phases.
@@ -139,14 +137,20 @@
         tdates = []
         phase = params.get('phase')
         dates.append('( ')
-        if params.get('start_date') != 'None' and params.get('end_date') != 'None':
+        if (params.get('start_date') != 'None'
+            and params.get('end_date') != 'None'):
             if phase:
                 for p in phase:
-                    if int(p) >= 0: # phase is unknown -> has no start and endphase
+                    if int(p) >= 0:
+                        # phase is unknown -> has no start and endphase
                         sdf, edf = get_phase_dates(p)
                         suc = get_phasesuccessors(p)
                         all_phases.extend(p)
-                        tdates.append("('%s'::date <= %s AND '%s'::date >= %s AND phase IN (%s) )" % (params.get('start_date'), edf, params.get('end_date'), sdf, ",".join(["%s" % s for s in suc])))
+                        tdates.append("('%s'::date <= %s AND '%s'::date >= %s"
+                                      " AND phase IN (%s) )"
+                                      % (params.get('start_date'), edf,
+                                         params.get('end_date'), sdf,
+                                         ",".join(["%s" % s for s in suc])))
                     else:
                         tdates.append('phase IN (-1)')
                 dates.append(" OR ".join(tdates))
@@ -156,7 +160,8 @@
             dates.append("TRUE")
         dates.append(' )')
 
-        params['sql'] = "SELECT %%(fields)s from master_tbl_eval_total_view m WHERE %s " % ("".join(dates))
+        params['sql'] = ("SELECT %%(fields)s from master_tbl_eval_total_view m"
+                         " WHERE %s " % ("".join(dates)))
 
         #Datefields
         if all_phases:
@@ -164,93 +169,84 @@
             params['end_date_field'] = get_phase_dates(max(all_phases))[1]
         else:
             params['start_date_field'] = None
-            params['end_date_field'] =  None
+            params['end_date_field'] = None
         return params
 
 
     def evaluateAction(self):
-            '''Return an HTML file containing the result of one or more evaluations.'''
-            params = formencode.variabledecode.variable_decode(request.params)
-            #params = request.params
-            validator    = EvaluationFormValidator() 
-            form_result  = {}
-            form_errors  = {}
+        '''Return an HTML file containing the result of one or more evaluations.
+        '''
+        params = formencode.variabledecode.variable_decode(request.params)
+        #params = request.params
+        validator = EvaluationFormValidator()
+        form_result = {}
+        form_errors = {}
 
-            # Check values
-            conn, cur = None, None
-            conn = db.getConnection()
-            id = int(params['id'])
-            c.evalconfig   = EvaluationConfig(
-                            get_configfile(id),
-                            conn,
-                            None,
-                            None,
-                            None,
-                            None,
-                            None,
-                            None,
-                            None)
-            try:
-                form_result  = validator.to_python(params)
-            except formencode.Invalid, error:
-                form_result = error.value
-                form_errors = error.error_dict or {}
-                c.evaloptions = get_search_options(session.get('evaluation.options'), id)
-                form = render('/evaluation/evaluate.mako')
-                return formencode.htmlfill.render(form, \
-                        defaults=form_result, \
-                        errors=form_errors, \
-                        auto_insert_errors=False)
+        # Check values
+        conn, cur = None, None
+        conn = db.getConnection()
+        id = int(params['id'])
+        c.evalconfig = EvaluationConfig(get_configfile(id), conn, None, None,
+                                        None, None, None, None, None)
+        try:
+            form_result = validator.to_python(params)
+        except formencode.Invalid, error:
+            form_result = error.value
+            form_errors = error.error_dict or {}
+            c.evaloptions = get_search_options(session.get('evaluation.options'),
+                                               id)
+            form = render('/evaluation/evaluate.mako')
+            return formencode.htmlfill.render(form, defaults=form_result,
+                                              errors=form_errors,
+                                              auto_insert_errors=False)
 
-            # Build evaluation
-            eval_params = self._get_evalparams(form_result)
+        # Build evaluation
+        eval_params = self._get_evalparams(form_result)
+        try:
             try:
-                try:
-                    evalconfig   = EvaluationConfig(
-                                    get_configfile(form_result['id']),
-                                    conn,
-                                    eval_params['start_date'], 
-                                    eval_params['end_date'],
-                                    eval_params['start_date_field'], 
-                                    eval_params['end_date_field'], 
-                                    None,
-                                    eval_params['sql'], 
-                                    eval_params['typelist'])
-                    evalset      = EvaluationSet(evalconfig, True)
-                    evalset.evaluate()
-                    c.result = evalset.export(EvaluationExportHTML(show_percent=form_result['show_percent']))
-                    session['evaluation.params'] = eval_params
-                    session.save()
-                    form = render('/evaluation/result.mako')
-                    return formencode.htmlfill.render(form, \
-                        defaults=form_result, \
-                        errors=form_errors, \
-                        auto_insert_errors=False)
-                except:
-                    log.error(_('Error: Evaluation failed'))
-                    log.error(traceback.print_exc())
-            finally:
-                db.recycleConnection(conn, cur)
+                evalconfig = EvaluationConfig(get_configfile(form_result['id']),
+                                              conn, eval_params['start_date'],
+                                              eval_params['end_date'],
+                                              eval_params['start_date_field'],
+                                              eval_params['end_date_field'],
+                                              None,
+                                              eval_params['sql'],
+                                              eval_params['typelist'])
+                evalset = EvaluationSet(evalconfig, True)
+                evalset.evaluate()
+                c.result = evalset.export(EvaluationExportHTML(show_percent=form_result['show_percent']))
+                session['evaluation.params'] = eval_params
+                session.save()
+                form = render('/evaluation/result.mako')
+                return formencode.htmlfill.render(form, defaults=form_result,
+                                                  errors=form_errors,
+                                                  auto_insert_errors=False)
+            except:
+                log.error(_('Error: Evaluation failed'))
+                log.error(traceback.print_exc())
+        finally:
+            db.recycleConnection(conn, cur)
 
     def exportXML(self):
-        '''Return an XML file containing the result of one or more evaluations.'''
+        '''Return an XML file containing the result of one or more evaluations.
+        '''
         response.headers['Content-Type'] = 'application/xml; charset=utf8'
-        response.headers['Content-Disposition'] = 'attachment; filename=evaluation-export.xml'
+        response.headers['Content-Disposition'] = \
+                                  'attachment; filename=evaluation-export.xml'
         form_result = session.get('evaluation.params')
         try:
             try:
                 conn, cur = db.getConnection(), None
-                evalconfig   = EvaluationConfig(
-                                get_configfile(form_result['id']),
-                                conn,
-                                form_result['start_date'], 
-                                form_result['end_date'],
-                                form_result['start_date_field'], 
-                                form_result['end_date_field'], 
-                                None,
-                                form_result['sql'], 
-                                form_result['typelist'])
-                evalset      = EvaluationSet(evalconfig)
+                evalconfig = EvaluationConfig(get_configfile(form_result['id']),
+                                              conn,
+                                              form_result['start_date'],
+                                              form_result['end_date'],
+                                              form_result['start_date_field'],
+                                              form_result['end_date_field'],
+                                              None,
+                                              form_result['sql'],
+                                              form_result['typelist'])
+                evalset = EvaluationSet(evalconfig)
                 evalset.evaluate()
                 return evalset.export(EvaluationExportXML())
             except:
@@ -260,24 +256,25 @@
 
 
     def exportCSV(self):
-        '''Return an CSV file containing the result of one or more evaluations.'''
+        '''Return an CSV file containing the result of one or more evaluations.
+        '''
         response.headers['Content-Type'] = 'application/csv; charset=utf8'
-        response.headers['Content-Disposition'] = 'attachment; filename=evaluation-export.csv'
+        response.headers['Content-Disposition'] = \
+                                  'attachment; filename=evaluation-export.csv'
         form_result = session.get('evaluation.params')
         try:
             try:
                 conn, cur = db.getConnection(), None
-                evalconfig   = EvaluationConfig(
-                                get_configfile(form_result['id']),
-                                conn,
-                                form_result['start_date'], 
-                                form_result['end_date'],
-                                form_result['start_date_field'], 
-                                form_result['end_date_field'], 
-                                None,
-                                form_result['sql'], 
-                                form_result['typelist'])
-                evalset      = EvaluationSet(evalconfig)
+                evalconfig = EvaluationConfig(get_configfile(form_result['id']),
+                                              conn,
+                                              form_result['start_date'],
+                                              form_result['end_date'],
+                                              form_result['start_date_field'],
+                                              form_result['end_date_field'],
+                                              None,
+                                              form_result['sql'],
+                                              form_result['typelist'])
+                evalset = EvaluationSet(evalconfig)
                 evalset.evaluate()
                 return evalset.export(EvaluationExportCSV())
             except:



More information about the Mpuls-commits mailing list