2017-10-17 51 views
0

我尝试使用OpenMDAO框架优化Rosebrock函数,该函数与一个简单的隐式方程(参见apply_nonlinear部分)相关,如下所示。如何使用apply_nonlinear在OpenMDAO框架中创建优化?

from openmdao.api import Component, Problem, Group, ScipyOptimizer, IndepVarComp, Newton, ScipyGMRES 

class optimization(Component): 
    def __init__(self): 
     super(optimization, self).__init__() 

     # Parameter 
     self.add_param('x', 0.5) 
     #Design Variable 
     self.add_param('d1', 1.0) 
     self.add_param('d2', 1.0) 
     #Iteration Variable 
     self.add_state('y', 1.0) 
     self.add_state('z', 1.0) 
     self.add_output('f', shape=1) 

    def solve_nonlinear(self, params, unknowns, resids): 

     d1 = params['d1'] 
     d2 = params['d2'] 
     y = unknowns['y'] 
     z = unknowns['z'] 

     a = y/5.833333; 
     b = z/2.666667*100; 

     # print('a=%f'% a) 
     # print('b=%f'% b) 
     # Exact value for a and b is 1 and 100 

     # Rosenbrock function for Optimization 
     unknowns['f'] = (a-d1)**2 + b*(d2-d1**2)**2 

    def apply_nonlinear(self, params, unknowns, resids): 

     x = params['x'] 
     y = unknowns['y'] 
     z = unknowns['z'] 

     resids['y'] = x*z + z - 4 
     resids['z'] = x + 2*z - y 

if __name__ == "__main__": 

    top = Problem() 

    root = top.root = Group() 

    root.add('p1', IndepVarComp('x', 0.9)) 
    root.add('p2', IndepVarComp('y', 0.9)) 
    root.add('p', optimization()) 


    root.connect('p1.x', 'p.d1') 
    root.connect('p2.y', 'p.d2') 


    top.driver = ScipyOptimizer() 
    top.driver.options['optimizer'] = 'SLSQP' 

    top.driver.add_desvar('p1.x', lower=-50, upper=50) 
    top.driver.add_desvar('p2.y', lower=-50, upper=50) 
    top.driver.add_objective('p.f') 

    # Tell these components to finite difference 
    root.p.deriv_options['type'] = 'fd' 
    root.p.deriv_options['form'] = 'central' 
    root.p.deriv_options['step_size'] = 1.0e-4 

    top.root.nl_solver = Newton() 
    top.root.nl_solver.options['maxiter']=int(200) 
    top.root.nl_solver.options['atol']=float(1e-12) 
    top.root.nl_solver.options['rtol']=float(1e-10) 
    top.root.nl_solver.options['iprint']=int(2) 

    top.root.ln_solver = ScipyGMRES() 
    top.root.ln_solver.options['iprint']=int(-1) 
    top.setup() 
    top['p1.x'] = 0.9 
    top['p2.y'] = 0.9 
    top.run() 

    print('\n') 
    print('Minimum of %f found at (%f, %f)' % (top['p.f'], top['p.d1'], top['p.d2'])) 
    print('\nImplicit equation solution is') 
    print('x=%f, y=%f, z=%f' % (top['p.x'], top['p.y'], top['p.z'])) 

这使溶液如下所示,

Optimization terminated successfully. (Exit mode 0) 
      Current function value: [ 0.81999991] 
      Iterations: 1 
      Function evaluations: 1 
      Gradient evaluations: 1 
Optimization Complete 
----------------------------------- 
Minimum of 0.820000 found at (0.900000, 0.900000) 

Implicit equation solution is 
x=0.500000, y=5.833333, z=2.666667 

隐式方程的解是正确的。但是,优化的解决方案似乎并不完整。预期的解决方案就像设计变量(d1,d2)几乎等于1,最小函数值几乎为零。优化最终只需要一次迭代。为什么这样?

回答

0

在OpenMDAO 1.x中,混合的隐式/显式组件可能有点棘手。问题是您没有为f输出定义残差。即使你将它添加为显式变量,既然你定义了自己的apply_nonlinear,那么无论如何你都需要给它一个残差。在没有剩余方程的情况下,有限差分不能得到f变量的导数。 它只是将它定义为一个状态而已。

from openmdao.api import Component, Problem, Group, ScipyOptimizer, IndepVarComp, Newton, ScipyGMRES 

class optimization(Component): 
    def __init__(self): 
     super(optimization, self).__init__() 

     # Parameter 
     self.add_param('x', 0.5) 
     #Design Variable 
     self.add_param('d1', 1.0) 
     self.add_param('d2', 1.0) 
     #Iteration Variable 
     self.add_state('y', 1.0) 
     self.add_state('z', 1.0) 
     self.add_state('f', shape=1) 

    def solve_nonlinear(self, params, unknowns, resids): 

     d1 = params['d1'] 
     d2 = params['d2'] 
     y = unknowns['y'] 
     z = unknowns['z'] 

     a = y/5.833333; 
     b = z/2.666667*100; 

     # print('a=%f'% a) 
     # print('b=%f'% b) 
     # Exact value for a and b is 1 and 100 

     # Rosenbrock function for Optimization 
     unknowns['f'] = (a-d1)**2 + b*(d2-d1**2)**2 

    def apply_nonlinear(self, params, unknowns, resids): 

     d1 = params['d1'] 
     d2 = params['d2'] 
     x = params['x'] 
     y = unknowns['y'] 
     z = unknowns['z'] 

     a = y/5.833333; 
     b = z/2.666667*100; 

     resids['y'] = x*z + z - 4 
     resids['z'] = x + 2*z - y 
     resids['f'] = (a-d1)**2 + b*(d2-d1**2)**2 - unknowns['f'] 

if __name__ == "__main__": 

    top = Problem() 

    root = top.root = Group() 

    root.add('p1', IndepVarComp('x', 0.9)) 
    root.add('p2', IndepVarComp('y', 0.9)) 
    root.add('p', optimization()) 


    root.connect('p1.x', 'p.d1') 
    root.connect('p2.y', 'p.d2') 


    top.driver = ScipyOptimizer() 
    top.driver.options['optimizer'] = 'SLSQP' 

    top.driver.add_desvar('p1.x', lower=-50, upper=50) 
    top.driver.add_desvar('p2.y', lower=-50, upper=50) 
    top.driver.add_objective('p.f') 

    # Tell these components to finite difference 
    root.p.deriv_options['type'] = 'fd' 
    root.p.deriv_options['form'] = 'central' 
    root.p.deriv_options['step_size'] = 1.0e-4 

    top.root.nl_solver = Newton() 
    top.root.nl_solver.options['maxiter']=int(200) 
    top.root.nl_solver.options['atol']=float(1e-12) 
    top.root.nl_solver.options['rtol']=float(1e-10) 
    top.root.nl_solver.options['iprint']=int(2) 

    top.root.ln_solver = ScipyGMRES() 
    top.root.ln_solver.options['iprint']=int(-1) 
    top.setup() 
    top['p1.x'] = 0.9 
    top['p2.y'] = 0.9 
    top.run() 

    print('\n') 
    print('Minimum of %f found at (%f, %f)' % (top['p.f'], top['p.d1'], top['p.d2'])) 
    print('\nImplicit equation solution is') 
    print('x=%f, y=%f, z=%f' % (top['p.x'], top['p.y'], top['p.z'])) 
+0

谢谢@Justin Gray。这个对我有用。 –