Добавил:
Опубликованный материал нарушает ваши авторские права? Сообщите нам.
Вуз: Предмет: Файл:

Общий отчёт по МО

.pdf
Скачиваний:
2
Добавлен:
01.12.2023
Размер:
947.69 Кб
Скачать

Приложение А

(обязательное)

Листинг кода для одномерной оптимизации

from math import sin, pi

print("Введите нижнюю границу отрезка") a = int(input())

a1 = a

print("Введите верхнюю границу отрезка") b = int(input())

b1 = b

print("Выбирите функцию от 1 до 3") z = int(input())

if z == 1:

def function1(x):

return 4 * (x - 7) * (x - 3) * (x - 1) f = function1

elif z==2:

def function2(x):

return x / 4 + 7 * sin(3 * pi * x + 1) f = function2

else:

def function3(x):

return 4 - 7 * 2.71 * *(-(x - 3) * *2) f = function3

print(' i \t\tpogr \t\t min') pogr = 100

i = 1

R = (5 * *0.5 - 1) / 2 D = R * (b - a)

x1 = a + D

x2 = b - D

f1 = f(x1)

f2 = f(x2)

while pogr > 0.001: if f1 < f2:

a = x2 x2 = x1 f2 = f1

x1 = a + R * (b - a)

f1 = f(x1) else:

b = x1 x1 = x2 f1 = f2

x2 = b - R * (b - a)

f2 = f(x2)

if f1 < f2: xopt = x1

else:

xopt = x2

41

pogr = (1 - R) * abs((b - a) / xopt) * 100 print(f"{i:>2} {pogr:>8.5f} {xopt:>10.7f}") i += 1

a = a1 b = b1

seredina_otrezka = 0.001 / 2 iteracia = 0

x_min = 10000000

while (abs(b - a)) > 0.001:

x1 = (a + b - seredina_otrezka) / 2 x2 = (a + b + seredina_otrezka) / 2

iteracia += 1

if (f(x1) < f(x2)): b = x2

else:

a = x1 x_min = a

print(f"{iteracia:>2} {x_min:>10.7f}")

n = 1000 k = 1

y1 = 10000

y2 = 0 count = 0 a = a1

b = b1

data =[1, 1]

for i in range(n): data.append(data[i] + data[i + 1])

while abs(a - b) > 0.001: count += 1

y1 = a + (data[n - k - 1] / data[n - k + 1]) * (b - a) y2 = a + (data[n - k] / data[n - k + 1]) * (b - a)

if f(y1) > f(y2): a = y1

k += 1

print(f"{count:>2} {y1:>10.7f}") else:

b = y2 k += 1

print(f"{count:>2} {y1:>10.7f}")

42

Приложение Б

(обязательное)

Листинг кода для многомерной оптимизации

using Newtonsoft.Json.Linq; using System.Drawing;

using System.Reflection.Metadata; using System.Security.Cryptography; using System;

import numpy

from pylab import * from sympy import *

from scipy.optimize import minimize_scalar

print("Выберите функцию от 1 до 2") k = int(input())

if k == 1:

def f(point):

x, y = point

return 4 * (x - 7) * *2 + 3 * (y - 1) * *2

else:

def f(point):

x, y = point

return (x - 4) * *2 + (y - 7) * *2 + 30 * (x + y - 6) * *2 + 7.1

class Vector(object):

def __init__(self, x, y): self.x = x

self.y = y

def __repr__(self):

return "({0}, {1})".format(self.x, self.y)

def __add__(self, other): x = self.x + other.x y = self.y + other.y return Vector(x, y)

def __sub__(self, other): x = self.x - other.x y = self.y - other.y return Vector(x, y)

def __rmul__(self, other): x = self.x * other

y = self.y * other return Vector(x, y)

def __truediv__(self, other): x = self.x / other

y = self.y / other return Vector(x, y)

def c(self):

return self.x, self.y

43

def nelder_mead(alpha=1, beta = -0.5, gamma = 2, maxiter = 35): v1 = Vector(1, -1)

v2 = Vector(1.0, 0)

v3 = Vector(-2, 1) count = 0

for i in range(maxiter):

adict = { v1: f(v1.c()), v2: f(v2.c()), v3: f(v3.c())} points = sorted(adict.items(), key = lambda x: x[1])

count += 1

b = points[0][0] g = points[1][0] w = points[2][0]

mid = (g + b) / 2

xr = mid + alpha * (mid - w) if f(xr.c()) < f(g.c()):

w = xr else:

if f(xr.c()) < f(w.c()): w = xr

c = (w + mid) / 2

if f(c.c()) < f(w.c()): w = c

if f(xr.c()) < f(b.c()):

xe = mid + gamma * (xr - mid) if f(xe.c()) < f(xr.c()):

w = xe else:

w = xr

if f(xr.c()) > f(g.c()):

xc = mid + beta * (w - mid) if f(xc.c()) < f(w.c()):

w = xc

v1 = w

v2 = g

v3 = b print(count, b)

return b

xk = nelder_mead()

if k == 1:

z_str = '4*(a[0]-7)**2+3*(a[1]-1)**2' else:

z_str = '(a[0]-4)**2+(a[1]-7)**2+30*(a[0]+a[1]-6)**2+7.1'

count =

0

exec('z

= lambda a: ' + z_str)

z_str

=

z_str.replace('a[0]', 'x')

z_str

=

z_str.replace('a[1]', 'y')

def z_grad(a):

x= Symbol('x')

y= Symbol('y') z_d = eval(z_str)

44

yprime = z_d.diff(y)

dif_y = str(yprime).replace('y', str(a[1])) dif_y = dif_y.replace('x', str(a[0])) yprime = z_d.diff(x)

dif_x = str(yprime).replace('y', str(a[1])) dif_x = dif_x.replace('x', str(a[0]))

return numpy.array([eval(dif_y), eval(dif_x)])

def mininize(a):

l_min = minimize_scalar(lambda l: z(a - l * z_grad(a))).x return a - l_min * z_grad(a)

def norm(a):

return math.sqrt(a[0] * *2 + a[1] * *2)

def grad_step(dot): return mininize(dot)

dot = [numpy.array([-100.0, 100.0])] dot.append(grad_step(dot[0]))

while norm(dot[-2] - dot[-1]) > 0.001: dot.append(grad_step(dot[-1])) count += 1

print(count, dot[len(dot) - 1])

if k == 1:

def differentiable_function(x, y):

return 4 * (x - 7) * *2 + 3 * (y - 1) * *2

else:

def differentiable_function(x, y):

return (x - 4) * *2 + (y - 7) * *2 + 30 * (x + y - 6) * *2 + 7.1

radius = 8 global_epsilon = 0.001

centre = (global_epsilon, global_epsilon) arr_shape = 100

step = radius / arr_shape

def rotate_vector(length, a):

return length * np.cos(a), length* np.sin(a)

def derivative_x(epsilon, arg):

return (differentiable_function(global_epsilon + epsilon, arg) - differentiable_function(epsilon, arg)) / global_epsilon

def derivative_y(epsilon, arg):

return (differentiable_function(arg, epsilon + global_epsilon) - differentiable_function(arg, epsilon)) / global_epsilon

def calculate_flip_points(): flip_points = np.array([0, 0])

points = np.zeros((360, arr_shape), dtype = bool) cx, cy = centre

for i in range(arr_shape):

45

for alpha in range(360):

x, y = rotate_vector(step, alpha) x = x * i + cx

y = y * i + cy

points[alpha][i] = derivative_x(x, y) + derivative_y(y, x) > 0 if not points[alpha][i - 1] and points[alpha][i]:

flip_points = np.vstack((flip_points, np.array([alpha, i - 1]))) return flip_points

def pick_estimates(positions):

vx, vy = rotate_vector(step, positions[1][0]) cx, cy = centre

best_x, best_y = cx + vx * positions[1][1], cy + vy * positions[1][1] count = 0

for index in range(2, len(positions)):

vx, vy = rotate_vector(step, positions[index][0])

x, y = cx + vx * positions[index][1], cy + vy * positions[index][1]

if differentiable_function(best_x, best_y) > differentiable_function(x, y): best_x = x

best_y = y count += 1

print(count, best_x, best_y)

for index in range(360):

vx, vy = rotate_vector(step, index)

x, y = cx + vx * (arr_shape - 1), cy + vy * (arr_shape - 1)

if differentiable_function(best_x, best_y) > differentiable_function(x, y): best_x = x

best_y = y return best_x, best_y

def gradient_descent(best_estimates, is_x):

derivative = derivative_x if is_x else derivative_y best_x, best_y = best_estimates

descent_step = step

value = derivative(best_y, best_x)

while abs(value) > global_epsilon: descent_step *= 0.95

best_y = best_y - descent_step \

if derivative(best_y, best_x) > 0 else best_y + descent_step value = derivative(best_y, best_x)

return best_y, best_x

def find_minimum(): return

gradient_descent(gradient_descent(pick_estimates(calculate_flip_points()), False), True)

def get_grid(grid_step):

samples = np.arange(-radius, radius, grid_step) x, y = np.meshgrid(samples, samples)

return x, y, differentiable_function(x, y)

find_minimum()

46