first commit

This commit is contained in:
2025-12-18 16:00:22 +08:00
commit 785f306726
69 changed files with 33171 additions and 0 deletions

BIN
dynamic/knapsack Executable file

Binary file not shown.

159
dynamic/knapsack.cpp Normal file
View File

@ -0,0 +1,159 @@
#include <iostream>
#include <vector>
#include <numeric>
#include <algorithm>
#include <chrono>
#include <random>
struct Item {
int weight; // 物品重量
int value; // 物品价值
int count; // 物品数量(用于多重背包问题)
};
// 全局操作计数器,用于性能分析
long long ops_count = 0;
// =================================================================
// 完全背包问题算法实现
// =================================================================
// 算法1朴素动态规划三层循环
int complete_knapsack_v1(const std::vector<Item>& items, int capacity) {
ops_count = 0; // 重置操作计数器
int n = items.size();
if (n == 0) return 0;
std::vector<std::vector<int>> dp(n + 1, std::vector<int>(capacity + 1, 0));
// 遍历每个物品
for (int i = 1; i <= n; ++i) {
int w = items[i - 1].weight;
int v = items[i - 1].value;
// 遍历每个容量
for (int j = 0; j <= capacity; ++j) {
dp[i][j] = dp[i-1][j]; // 不选择第i个物品
ops_count++;
// 尝试选择k个第i个物品
for (int k = 1; k * w <= j; ++k) {
ops_count++;
if (dp[i-1][j - k * w] + k * v > dp[i][j]) {
dp[i][j] = dp[i-1][j - k * w] + k * v;
}
}
}
}
return dp[n][capacity];
}
// 算法2优化的二维动态规划两层循环
int complete_knapsack_v2(const std::vector<Item>& items, int capacity) {
ops_count = 0; // 重置操作计数器
int n = items.size();
if (n == 0) return 0;
std::vector<std::vector<int>> dp(n + 1, std::vector<int>(capacity + 1, 0));
// 遍历每个物品
for (int i = 1; i <= n; ++i) {
int w = items[i - 1].weight;
int v = items[i - 1].value;
// 遍历每个容量
for (int j = 0; j <= capacity; ++j) {
ops_count++;
if (j < w) {
dp[i][j] = dp[i - 1][j]; // 容量不足不能选择第i个物品
} else {
// 选择不选第i个物品或选择第i个物品的最大值
dp[i][j] = std::max(dp[i - 1][j], dp[i][j - w] + v);
}
}
}
return dp[n][capacity];
}
// 算法3空间优化的动态规划一维数组
int complete_knapsack_v3(const std::vector<Item>& items, int capacity) {
ops_count = 0; // 重置操作计数器
std::vector<int> dp(capacity + 1, 0); // 一维DP数组
// 遍历每个物品
for (const auto& item : items) {
// 从物品重量开始遍历容量(完全背包)
for (int j = item.weight; j <= capacity; ++j) {
ops_count++;
dp[j] = std::max(dp[j], dp[j - item.weight] + item.value);
}
}
return dp[capacity];
}
// =================================================================
// 基准测试运行器
// =================================================================
void run_experiments(int min_n, int max_n, int step_n, int trials, int capacity) {
// 初始化随机数生成器
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> weight_dist(1, 40); // 重量范围1-40
std::uniform_int_distribution<> value_dist(1, 100); // 价值范围1-100
// 输出CSV格式的表头
std::cout << "n,algo,time_us,ops\n";
// 对不同物品数量进行测试
for (int n = min_n; n <= max_n; n += step_n) {
if (n==0) continue;
long long total_time_v1 = 0, total_ops_v1 = 0;
long long total_time_v2 = 0, total_ops_v2 = 0;
long long total_time_v3 = 0, total_ops_v3 = 0;
// 进行多次试验取平均值
for (int t = 0; t < trials; ++t) {
// 生成随机物品
std::vector<Item> items(n);
for (int i = 0; i < n; ++i) {
items[i] = {weight_dist(gen), value_dist(gen), 0};
}
// 测试算法1的运行时间
auto start_v1 = std::chrono::high_resolution_clock::now();
complete_knapsack_v1(items, capacity);
auto end_v1 = std::chrono::high_resolution_clock::now();
total_time_v1 += std::chrono::duration_cast<std::chrono::microseconds>(end_v1 - start_v1).count();
total_ops_v1 += ops_count;
// 测试算法2的运行时间
auto start_v2 = std::chrono::high_resolution_clock::now();
complete_knapsack_v2(items, capacity);
auto end_v2 = std::chrono::high_resolution_clock::now();
total_time_v2 += std::chrono::duration_cast<std::chrono::microseconds>(end_v2 - start_v2).count();
total_ops_v2 += ops_count;
// 测试算法3的运行时间
auto start_v3 = std::chrono::high_resolution_clock::now();
complete_knapsack_v3(items, capacity);
auto end_v3 = std::chrono::high_resolution_clock::now();
total_time_v3 += std::chrono::duration_cast<std::chrono::microseconds>(end_v3 - start_v3).count();
total_ops_v3 += ops_count;
}
// 输出每个算法的平均结果
std::cout << n << ",v1," << total_time_v1 / trials << "," << total_ops_v1 / trials << "\n";
std::cout << n << ",v2," << total_time_v2 / trials << "," << total_ops_v2 / trials << "\n";
std::cout << n << ",v3," << total_time_v3 / trials << "," << total_ops_v3 / trials << "\n";
}
}
int main(int argc, char* argv[]) {
// 实验参数:最小物品数、最大物品数、步长、每个物品数的试验次数、背包容量
// 为保持算法1的合理运行时间使用较小的容量和物品数量
int min_n = 5; // 最小物品数
int max_n = 25; // 最大物品数
int step_n = 5; // 物品数步长
int trials = 10; // 每个物品数的试验次数
int capacity = 100; // 背包容量
run_experiments(min_n, max_n, step_n, trials, capacity);
return 0;
}

166
dynamic/labtemplate.typ Normal file
View File

@ -0,0 +1,166 @@
#let times = "Times LT Pro"
#let times = "Times New Roman"
#let song = (times, "Noto Serif CJK SC")
#let hei = (times, "Noto Sans CJK SC")
#let kai = (times, "Noto Serif CJK SC")
#let xbsong = (times, "Noto Serif CJK SC")
#let fsong = (times, "Noto Serif CJK SC")
#let code = (times, "JetBrains Mono")
#let nudtlabpaper(title: "",
author1: "",
id1: "",
advisor: "",
jobtitle: "",
lab: "",
date: "",
header_str: "",
minimal_cover: false,
body) = {
// Set the document's basic properties.
set document(author: author1, title: title)
set page(
margin: (left: 30mm, right: 30mm, top: 30mm, bottom: 30mm),
)
// If minimal_cover is requested, render an otherwise-empty first page
// that only displays the "实验时间" near the bottom center.
if minimal_cover {
v(158pt)
align(center)[
#block(text(weight: 700, size: 30pt, font: hei, tracking: 1pt, "2025秋 -《算法设计与分析》"))
]
align(center)[
#block(text(weight: 700, size: 24pt, font: song, tracking: 1pt, "动态规划算法分析实验报告"))
]
// Keep standard margins but push content down toward the bottom.
v(220pt)
align(center)[
#block(text(size: 14pt, font: song, tracking: 9pt, "实验时间"))
]
v(2pt)
align(center)[
#block(text(size: 16pt, font: song, date))
]
pagebreak()
} else {
// Title row.
v(158pt)
align(center)[
#block(text(weight: 700, size: 30pt, font: hei, tracking: 1pt, "2025秋 -《算法设计与分析》"))
]
align(center)[
#block(text(weight: 700, size: 24pt, font: song, tracking: 1pt, "动态规划算法分析实验报告"))
]
v(103pt)
pad(
left: 1em,
right: 1em,
grid(
// columns: (80pt, 1fr),
// rows: (17pt, auto),
// text(weight: 700, size: 16pt, font: song, "实验名称:"),
// align(center, text(weight: "regular", size: 16pt, font: song, title)),
// text(""),
// line(length: 100%)
)
// #block(text(weight: 700, 1.75em, title))
// underline(text(weight: 700, size: 16pt, font: song, title))
)
// Author information.
v(62.5pt)
grid(
columns: (0.25fr, 0.25fr, 0.25fr, 0.25fr),
rows: (20pt, 8pt, 20pt, 8pt, 20pt, 8pt, 20pt, 12pt),
text(size: 14pt, font: song, tracking: 9pt, "学员姓名"),
align(center, text(size: 14pt, font: song, author1)),
text(size: 14pt, font: song, tracking: 54pt, "学号"),
align(center, text(size: 14pt, font: times, id1)),
text(""),
line(length: 100%),
text(""),
line(length: 100%),
text(size: 14pt, font: song, tracking: 9pt, "指导教员"),
align(center, text(size: 14pt, font: song, advisor)),
text(size: 14pt, font: song, tracking: 54pt, "职称"),
align(center, text(size: 14pt, font: song, jobtitle)),
text(""),
line(length: 100%),
text(""),
line(length: 100%),
text(size: 14pt, font: song, tracking: 9pt, "实验室"),
align(center, text(size: 14pt, font: song, lab)),
text(size: 14pt, font: song, tracking: 9pt, "实验时间"),
align(center, text(size: 14pt, font: song, date)),
text(""),
line(length: 100%),
text(""),
line(length: 100%),
)
v(50.5pt)
align(center, text(font: hei, size: 15pt, "国防科技大学教育训练部制"))
pagebreak()
}
set page(
margin: (left: 30mm, right: 30mm, top: 30mm, bottom: 30mm),
numbering: "i",
number-align: center,
)
v(14pt)
align(center)[
#block(text(font: hei, size: 14pt, "《本科实验报告》填写说明"))
]
v(14pt)
text("")
par(first-line-indent: 2em, text(font: song, size: 12pt, "实验报告内容编排应符合以下要求:"))
par(first-line-indent: 2em, text(font: fsong, size: 12pt, "1采用A421cm×29.7cm白色复印纸单面黑字。上下左右各侧的页边距均为3cm缺省文档网格字号为小4号中文为宋体英文和阿拉伯数字为Times New Roman每页30行每行36字页脚距边界为2.5cm页码置于页脚、居中采用小5号阿拉伯数字从1开始连续编排封面不编页码。"))
par(first-line-indent: 2em, text(font: fsong, size: 12pt, "2报告正文最多可设四级标题字体均为黑体第一级标题字号为4号其余各级标题为小4号标题序号第一级用“一、”、“二、”……第二级用“”、“” ……第三级用“1.”、“2.” ……第四级用“1”、“2” ……,分别按序连续编排。"))
par(first-line-indent: 2em, text(font: fsong, size: 12pt, "3正文插图、表格中的文字字号均为5号。"))
pagebreak()
set page(
margin: (left: 30mm, right: 30mm, top: 30mm, bottom: 30mm),
numbering: "1",
number-align: center,
)
set heading(numbering: "1.1")
// set text(font: hei, lang: "zh")
show heading: it => box(width: 100%)[
#v(0.50em)
#set text(font: hei)
#counter(heading).display()
// #h(0.5em)
#it.body
]
// Main body.
set par(justify: true)
body
}
#let para(t) = par(first-line-indent: 2em, text(font: song, size: 10.5pt, t))
#let subpara(t) = par(first-line-indent: 2em, text(font: song, size: 10pt, t))
#let cb(t) = block(
text(font: ("Consolas","FangSong_GB2312"), t),
fill: luma(240),
inset: 1pt,
radius: 4pt,
// width: 100%,
)

9892
dynamic/main.pdf Normal file

File diff suppressed because one or more lines are too long

281
dynamic/main.typ Normal file
View File

@ -0,0 +1,281 @@
#import "labtemplate.typ": *
#show: nudtlabpaper.with(
author1: "程景愉",
id1: "202302723005",
advisor: " 胡罡",
jobtitle: "教授",
lab: "306-707",
date: "2025.11.28",
header_str: "动态规划算法分析实验报告",
minimal_cover: true,
)
#set page(header: [
#set par(spacing: 6pt)
#align(center)[#text(size: 11pt)[《算法设计与分析》实验报告]]
#v(-0.3em)
#line(length: 100%, stroke: (thickness: 1pt))
],)
#show heading: it => box(width: 100%)[
#v(0.50em)
#set text(font: hei)
#it.body
]
#outline(title: "目录",depth: 3, indent: 1em)
// #pagebreak()
#outline(
title: [图目录],
target: figure.where(kind: image),
)
#show heading: it => box(width: 100%)[
#v(0.50em)
#set text(font: hei)
#counter(heading).display()
#it.body
]
#set enum(indent: 0.5em,body-indent: 0.5em,)
#pagebreak()
= 实验介绍
#para[
动态规划Dynamic Programming, DP是一种通过把原问题分解为相对简单的子问题的方式来求解复杂问题的方法。它常用于优化问题其中问题的最优解可以通过子问题的最优解来构造。本实验旨在深入理解动态规划算法在解决背包问题中的应用特别是完全背包问题及其优化并通过实验数据分析不同实现方式的性能差异。
]
= 实验内容
#para[
本实验主要围绕动态规划算法解决完全背包问题展开,并涉及多重背包问题的初步分析。具体内容包括:
]
+ 实现两种基于不同递推公式的完全背包动态规划算法。,
+ 对所实现的算法进行插桩,记录关键操作次数。,
+ 以物品种类数量 $n$ 为输入规模,通过大量随机测试样本,统计不同算法的平均运行时间与关键操作次数。,
+ 改变物品种类规模 $n$,对比分析不同规模下各算法的性能,并利用 Python 绘制数据图。,
+ 实现完全背包问题的一维数组空间优化版本,并与上述算法进行对比。,
+ (附加)对多重背包问题实现至少两种动态规划算法,并进行性能分析。,
= 实验要求
#para[
运用动态规划算法求解完全背包问题并进行分析,具体要求如下:
]
+ 针对完全背包问题,实现基于两种递推公式的动态规划算法。
+ 在代码中插桩,记录关键操作次数(如查表次数等)。
+ 以物品种类的大小n为输入规模固定n随机产生大量测试样本统计两种算法的平均运行时间和关键操作次数并进行记录。
+ 改变物品种类规模对不同规模问题各算法的结果对比分析通过统计python画图插入到报告中记录与理论值进行对照分析。
+ 使用一维数组的方式解决整数背包问题,并记录其平均运行时间和关键操作次数,与上述两种算法进行对比。
#para[
附加:运用动态规划算法求解多重背包问题并进行分析,具体要求如下:
]
+ 多重背包即每种物品的数量有限第i种物品的数量上限为ki个
+ 对多重背包问题实现两种以上动态规划算法,并对其性能进行分析。
= 实验步骤
== 算法设计
=== 完全背包算法一:朴素三重循环动态规划
#para[
该算法是完全背包问题的一种直观解法,其递推关系考虑了对每个物品 $i$,我们可以选择不取,或者取 $k$ 件,其中 $k$ 可以是 1 到容量允许的最大值。
$"dp"[i][j]$ 表示在前 $i$ 种物品中选择,背包容量为 $j$ 时的最大价值。
递推公式为:
#box(fill: luma(240), radius: 3pt, inset: 8pt)[
#set text(size: 0.9em)
#align(center)[
$ "dp"[i][j] = "max"("dp"[i-1][j], "max"_(k=1)^(j/w_i)("dp"[i-1][j - k dot w_i] + k dot v_i)) $
]
]
其中 $w_i$ $v_i$ 分别表示第 $i$ 种物品的重量和价值。
该算法的时间复杂度为 $O(n dot W dot (W/w_"min"))$,其中 $n$ 为物品种类数,$W$ 为背包容量,$w_"min"$ 为物品的最小重量。
]
```cpp
int complete_knapsack_v1(const std::vector<Item>& items, int capacity) {
ops_count = 0;
int n = items.size();
if (n == 0) return 0;
std::vector<std::vector<int>> dp(n + 1, std::vector<int>(capacity + 1, 0));
for (int i = 1; i <= n; ++i) {
int w = items[i - 1].weight;
int v = items[i - 1].value;
for (int j = 0; j <= capacity; ++j) {
dp[i][j] = dp[i-1][j]; // Option to not take item i
ops_count++;
for (int k = 1; k * w <= j; ++k) {
ops_count++;
if (dp[i-1][j - k * w] + k * v > dp[i][j]) {
dp[i][j] = dp[i-1][j - k * w] + k * v;
}
}
}
}
return dp[n][capacity];
}
```
#align(center)[_代码 1: 完全背包算法一 C++ 实现_]
=== 完全背包算法二:优化二维动态规划
#para[
该算法是完全背包问题更常用且更高效的二维动态规划解法。它利用了完全背包的特性:在考虑第 $i$ 种物品时,如果选择放入该物品,那么接下来的决策仍然可以在包含第 $i$ 种物品的集合中进行。
递推公式为:
#box(fill: luma(240), radius: 3pt, inset: 8pt)[
#set text(size: 0.9em)
#align(center)[
$ "dp"[i][j] = "max"("dp"[i-1][j], "dp"[i][j - w_i] + v_i) $
]
]
其中 $"dp"[i-1][j]$ 表示不选择第 $i$ 种物品的最大价值,而 $"dp"[i][j - w_i] + v_i$ 表示选择至少一件第 $i$ 种物品,并在剩余容量 $j - w_i$ 中继续考虑第 $i$ 种物品(以及之前的物品)。
该算法的时间复杂度为 $O(n dot W)$,空间复杂度为 $O(n dot W)$
]
```cpp
int complete_knapsack_v2(const std::vector<Item>& items, int capacity) {
ops_count = 0;
int n = items.size();
if (n == 0) return 0;
std::vector<std::vector<int>> dp(n + 1, std::vector<int>(capacity + 1, 0));
for (int i = 1; i <= n; ++i) {
int w = items[i - 1].weight;
int v = items[i - 1].value;
for (int j = 0; j <= capacity; ++j) {
ops_count++;
if (j < w) {
dp[i][j] = dp[i - 1][j];
} else {
dp[i][j] = std::max(dp[i - 1][j], dp[i][j - w] + v);
}
}
}
return dp[n][capacity];
}
```
#align(center)[_代码 2: 完全背包算法二 C++ 实现_]
=== 完全背包算法三:空间优化一维动态规划
#para[
该算法是对算法二的空间优化版本,它将二维 $"dp"$ 数组优化为一维 $"dp"$ 数组。由于计算 $"dp"[i][j]$ 时只依赖于 $"dp"[i-1]$ $"dp"[i]$ 自身(通过 $"dp"[j-w_i]$),因此可以通过在一维数组上正序遍历容量来实现。
递推公式为:
#box(fill: luma(240), radius: 3pt, inset: 8pt)[
#set text(size: 0.9em)
#align(center)[
$ "dp"[j] = "max"("dp"[j], "dp"[j - w_i] + v_i) $
]
]
该算法的时间复杂度仍为 $O(n dot W)$,但空间复杂度优化为 $O(W)$,极大地节省了内存。
]
```cpp
int complete_knapsack_v3(const std::vector<Item>& items, int capacity) {
ops_count = 0;
std::vector<int> dp(capacity + 1, 0);
for (const auto& item : items) {
for (int j = item.weight; j <= capacity; ++j) {
ops_count++;
dp[j] = std::max(dp[j], dp[j - item.weight] + item.value);
}
}
return dp[capacity];
}
```
#align(center)[_代码 3: 完全背包算法三 C++ 实现_]
== 实验环境与参数设置
#para[
本实验在 Linux 操作系统环境下进行C++ 代码使用 #link("https://gcc.gnu.org/")[GCC] 编译器 (`g++`) 进行编译,并以 (`-O2`) 级别进行优化。数据分析与绘图使用 #link("https://www.python.org/")[Python] 编程语言,依赖 #link("https://pandas.pydata.org/")[pandas]#link("https://matplotlib.org/")[matplotlib] #link("https://seaborn.pydata.org/")[seaborn] 等库。
]
#para[
实验中,我们固定背包容量 $W=100$,并随机生成物品。物品的重量在 $[1, 40]$ 范围内均匀分布,价值在 $[1, 100]$ 范围内均匀分布。为了消除随机性带来的误差,每个 $n$ 值(物品种类数)进行 $10$ 次独立实验,并取其平均运行时间及关键操作次数。物品种类数 $n$ $5$ 递增到 $25$,步长为 $5$
]
#para[
我们定义“关键操作次数”为动态规划表中状态值的更新或访问次数。具体在 C++ 代码中,通过全局变量 (`ops_count`) 在每次 (`dp`) 数组赋值或比较时进行累加。
]
== 数据收集与可视化
#para[
实验数据由 C++ 程序 (`knapsack`) 收集。该程序在每次运行完一个算法后,将物品种类数 $n$、算法名称v1、v2、v3、平均运行时间微秒和平均关键操作次数输出到标准输出并重定向保存至 (`results.csv`) 文件。
]
#para[
Python 脚本 (`plotter.py`) 负责读取 (`results.csv`) 文件,使用 (`matplotlib`) (`seaborn`) 库生成两幅图表:
]
+ 平均运行时间与物品种类数 $n$ 的关系图。
+ 平均关键操作次数与物品种类数 $n$ 的关系图,其中关键操作次数曲线采用对数坐标显示以更好地展现数量级差异。
#para[
这些图表将直观地展示不同算法的性能随问题规模变化的趋势。
]
= 实验结果
#para[
本节展示了不同动态规划算法在解决完全背包问题时,其平均运行时间与关键操作次数随物品种类数 $n$ 变化的实验结果。
]
#figure(
image("time_vs_n.png", width: 80%),
caption: [平均运行时间与物品种类数的关系],
)
#figure(
image("ops_vs_n.png", width: 80%),
caption: [平均关键操作次数与物品种类数的关系],
)
#para[
从上述图表中,我们可以观察到以下趋势:
]
- *算法一 (Naive DP)*:无论是在运行时间还是关键操作次数上,算法一都显著高于算法二和算法三。其增长趋势与其理论分析的 $O(n dot W dot (W/w_min))$ 复杂度吻合,表明该方法在实际应用中效率极低,尤其是在问题规模稍大时。
- *算法二 (Optimized 2D DP)*:算法二的运行时间和关键操作次数都呈现出与 $n$ 线性相关的增长趋势,这与其理论时间复杂度 $O(n dot W)$ 一致。与算法一相比,其性能有了大幅提升。
- *算法三 (Space-Optimized 1D DP)*:算法三在运行时间上与算法二表现相似,同样呈现出与 $n$ 线性相关的增长。在关键操作次数上,它也与算法二保持一致的增长模式。这验证了空间优化版本在不改变时间复杂度的前提下,能有效降低空间消耗。虽然理论上时间复杂度相同,但由于内存访问模式的改变(更少的内存分配,更好的缓存局部性),在某些情况下可能会有细微的性能提升,但在本实验的数据规模下,这种差异不明显。
#para[
总体而言,算法二和算法三在处理完全背包问题上表现出良好的可伸缩性,而算法三更是在空间效率上具有优势。算法一作为一种直观但效率低下的实现,仅适合理解概念,不适用于实际大规模问题。
]
= 实验总结
#para[
本实验通过实现和比较三种基于动态规划的完全背包算法,深入分析了不同递推关系和优化策略对算法性能的影响。实验结果清晰地表明,算法一(朴素三重循环)由于其较高的复杂性,在运行时间与关键操作次数上均表现出最差的性能,验证了其不适用于实际应用。
]
#para[
相比之下,算法二(优化二维动态规划)和算法三(空间优化一维动态规划)均展示出优越的性能,其时间复杂度为 $O(n dot W)$,运行时间随问题规模 $n$ 呈线性增长。特别是算法三,在保持与算法二相同时间复杂度的同时,将空间复杂度优化至 $O(W)$,这在处理大容量背包问题时具有显著优势。
]
#para[
本次实验不仅加深了对动态规划解决完全背包问题的理解,也强调了算法设计中选择合适的递推关系和进行空间优化的重要性。未来工作可以扩展到更复杂的背包问题,例如多重背包的更高效实现(如二进制优化)及其在更大规模数据下的性能分析。
]
#pagebreak()
= 附加:多重背包问题分析
== 多重背包算法一:朴素动态规划
#para[
多重背包问题与完全背包问题类似,但每种物品的数量是有限的。对于第 $i$ 种物品,其数量上限为 $k_i$ 个。
$"dp"[i][j]$ 表示在前 $i$ 种物品中选择,背包容量为 $j$ 时的最大价值。
递推公式为:
#box(fill: luma(240), radius: 3pt, inset: 8pt)[
#set text(size: 0.9em)
#align(center)[
$ "dp"[i][j] = "max"_(0 <= c <= "min"(k_i, j/w_i))("dp"[i-1][j - c dot w_i] + c dot v_i) $
]
]
其中 $w_i$$v_i$$k_i$ 分别表示第 $i$ 种物品的重量、价值和数量上限,$c$ 表示选择第 $i$ 种物品的件数。
该算法的时间复杂度为 $O(W dot "sum" k_i)$,在最坏情况下,如果 $k_i$ 很大,其性能会接近完全背包的朴素解法。若 $"sum" k_i$ 可以简化为 $K_"max"$,则复杂度为 $O(n dot W dot K_"max")$
]
```cpp
// Algorithm for Multiple Knapsack (Direct DP)
int multiple_knapsack_v1(const std::vector<Item>& items, int capacity) {
int n = items.size();
if (n == 0) return 0;
std::vector<std::vector<int>> dp(n + 1, std::vector<int>(capacity + 1, 0));
for (int i = 1; i <= n; ++i) {
int w = items[i - 1].weight;
int v = items[i - 1].value;
int k = items[i - 1].count; // Max count for this item
for (int j = 0; j <= capacity; ++j) {
dp[i][j] = dp[i-1][j];
for (int c = 1; c <= k && c * w <= j; ++c) {
dp[i][j] = std::max(dp[i][j], dp[i - 1][j - c * w] + c * v);
}
}
}
return dp[n][capacity];
}
```
#align(center)[_代码 4: 多重背包算法一 C++ 实现_]
== 多重背包算法二:二进制优化
#para[
二进制优化是解决多重背包问题的一种高效方法。其核心思想是将每种数量有限的物品拆分成若干件特殊的“物品”,使得这些特殊物品的组合可以表示原物品的任意数量。具体来说,对于第 $i$ 种物品,如果其数量上限为 $k_i$,我们可以将其拆分为重量和价值分别为 $c dot w_i$ $c dot v_i$ 的“物品”,其中 $c$ $1, 2, 4, "dots", 2^p$,以及剩余的 $k_i - (2^(p+1)-1)$。这些 $c$ 的和可以表示从 $1$ $k_i$ 之间的任何一个整数。
]
#para[
拆分后,多重背包问题就转化为了一个 0/1 背包问题。我们可以使用 0/1 背包问题的标准动态规划方法(如与完全背包算法三类似的一维 DP 优化)来解决。
转化后的物品总数将从 $"sum" k_i$ 减少到 $"sum" "log" k_i$,从而将时间复杂度优化为 $O(W dot "sum" "log" k_i)$
]

BIN
dynamic/ops_vs_n.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 219 KiB

56
dynamic/plotter.py Normal file
View File

@ -0,0 +1,56 @@
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sys
def create_plots(csv_file_path):
"""
读取CSV文件生成图表并保存到磁盘
"""
try:
df = pd.read_csv(csv_file_path)
except FileNotFoundError:
print(f"错误:找不到文件 {csv_file_path}")
sys.exit(1)
# 算法名称映射
algo_names = {
'v1': '朴素动态规划(三层循环)',
'v2': '优化二维动态规划',
'v3': '空间优化一维动态规划'
}
df['algo_name'] = df['algo'].map(algo_names)
# 设置绘图风格
sns.set_theme(style="whitegrid")
# 图表1运行时间与物品数量的关系
plt.figure(figsize=(10, 6))
time_plot = sns.lineplot(data=df, x='n', y='time_us', hue='algo_name', marker='o', palette='viridis')
plt.title('平均运行时间与物品种类数量(n)的关系')
plt.xlabel('物品种类数量(n)')
plt.ylabel('平均运行时间(微秒)')
time_plot.legend(title='算法')
plt.grid(True, which='both', linestyle='--')
plt.savefig('time_vs_n.png', dpi=300)
plt.close()
# 图表2操作次数与物品数量的关系
plt.figure(figsize=(10, 6))
ops_plot = sns.lineplot(data=df, x='n', y='ops', hue='algo_name', marker='o', palette='plasma')
plt.title('关键操作次数与物品种类数量(n)的关系')
plt.xlabel('物品种类数量(n)')
plt.ylabel('平均关键操作次数(对数尺度)')
plt.yscale('log')
ops_plot.legend(title='算法')
plt.grid(True, which='both', linestyle='--')
plt.savefig('ops_vs_n.png', dpi=300)
plt.close()
if __name__ == '__main__':
if len(sys.argv) > 1:
# 使用命令行参数指定的CSV文件
create_plots(sys.argv[1])
else:
# 如果没有提供参数,使用默认文件名
create_plots('results.csv')

16
dynamic/results.csv Normal file
View File

@ -0,0 +1,16 @@
n,algo,time_us,ops
5,v1,2,3011
5,v2,0,505
5,v3,0,399
10,v1,3,5641
10,v2,1,1010
10,v3,0,795
15,v1,7,8723
15,v2,1,1515
15,v3,0,1231
20,v1,9,14664
20,v2,1,2020
20,v3,0,1641
25,v1,9,14808
25,v2,2,2525
25,v3,0,2007
1 n algo time_us ops
2 5 v1 2 3011
3 5 v2 0 505
4 5 v3 0 399
5 10 v1 3 5641
6 10 v2 1 1010
7 10 v3 0 795
8 15 v1 7 8723
9 15 v2 1 1515
10 15 v3 0 1231
11 20 v1 9 14664
12 20 v2 1 2020
13 20 v3 0 1641
14 25 v1 9 14808
15 25 v2 2 2525
16 25 v3 0 2007

12
dynamic/task.txt Normal file
View File

@ -0,0 +1,12 @@
运用动态规划算法求解完全背包问题并进行分析,具体要求如下:
针对完全背包问题,实现基于两种递推公式的动态规划算法;
在代码中插桩,记录关键操作次数(如查表次数等);
以物品种类的大小n为输入规模固定n随机产生大量测试样本统计两种算法的平均运行时间和关键操作次数并进行记录
改变物品种类规模对不同规模问题各算法的结果对比分析通过统计python画图插入到报告中记录与理论值进行对照分析
使用一维数组的方式解决整数背包问题,并记录其平均运行时间和关键操作次数,与上述两种算法进行对比。
附加:运用动态规划算法求解多重背包问题并进行分析,具体要求如下:
多重背包即每种物品的数量有限第i种物品的数量上限为ki个
对多重背包问题实现两种以上动态规划算法,并对其性能进行分析。

BIN
dynamic/time_vs_n.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 183 KiB

File diff suppressed because one or more lines are too long

Binary file not shown.

166
greed/labtemplate.typ Normal file
View File

@ -0,0 +1,166 @@
#let times = "Times LT Pro"
#let times = "Times New Roman"
#let song = (times, "Noto Serif CJK SC")
#let hei = (times, "Noto Sans CJK SC")
#let kai = (times, "Noto Serif CJK SC")
#let xbsong = (times, "Noto Serif CJK SC")
#let fsong = (times, "Noto Serif CJK SC")
#let code = (times, "JetBrains Mono")
#let nudtlabpaper(title: "",
author1: "",
id1: "",
advisor: "",
jobtitle: "",
lab: "",
date: "",
header_str: "",
minimal_cover: false,
body) = {
// Set the document's basic properties.
set document(author: author1, title: title)
set page(
margin: (left: 30mm, right: 30mm, top: 30mm, bottom: 30mm),
)
// If minimal_cover is requested, render an otherwise-empty first page
// that only displays the "实验时间" near the bottom center.
if minimal_cover {
v(158pt)
align(center)[
#block(text(weight: 700, size: 30pt, font: hei, tracking: 1pt, "2025秋 -《算法设计与分析》"))
]
align(center)[
#block(text(weight: 700, size: 24pt, font: song, tracking: 1pt, "贪心算法分析实验报告"))
]
// Keep standard margins but push content down toward the bottom.
v(220pt)
align(center)[
#block(text(size: 14pt, font: song, tracking: 9pt, "实验时间"))
]
v(2pt)
align(center)[
#block(text(size: 16pt, font: song, date))
]
pagebreak()
} else {
// Title row.
v(158pt)
align(center)[
#block(text(weight: 700, size: 30pt, font: hei, tracking: 1pt, "2025秋 -《算法设计与分析》"))
]
align(center)[
#block(text(weight: 700, size: 24pt, font: song, tracking: 1pt, "动态规划算法分析实验报告"))
]
v(103pt)
pad(
left: 1em,
right: 1em,
grid(
// columns: (80pt, 1fr),
// rows: (17pt, auto),
// text(weight: 700, size: 16pt, font: song, "实验名称:"),
// align(center, text(weight: "regular", size: 16pt, font: song, title)),
// text(""),
// line(length: 100%)
)
// #block(text(weight: 700, 1.75em, title))
// underline(text(weight: 700, size: 16pt, font: song, title))
)
// Author information.
v(62.5pt)
grid(
columns: (0.25fr, 0.25fr, 0.25fr, 0.25fr),
rows: (20pt, 8pt, 20pt, 8pt, 20pt, 8pt, 20pt, 12pt),
text(size: 14pt, font: song, tracking: 9pt, "学员姓名"),
align(center, text(size: 14pt, font: song, author1)),
text(size: 14pt, font: song, tracking: 54pt, "学号"),
align(center, text(size: 14pt, font: times, id1)),
text(""),
line(length: 100%),
text(""),
line(length: 100%),
text(size: 14pt, font: song, tracking: 9pt, "指导教员"),
align(center, text(size: 14pt, font: song, advisor)),
text(size: 14pt, font: song, tracking: 54pt, "职称"),
align(center, text(size: 14pt, font: song, jobtitle)),
text(""),
line(length: 100%),
text(""),
line(length: 100%),
text(size: 14pt, font: song, tracking: 9pt, "实验室"),
align(center, text(size: 14pt, font: song, lab)),
text(size: 14pt, font: song, tracking: 9pt, "实验时间"),
align(center, text(size: 14pt, font: song, date)),
text(""),
line(length: 100%),
text(""),
line(length: 100%),
)
v(50.5pt)
align(center, text(font: hei, size: 15pt, "国防科技大学教育训练部制"))
pagebreak()
}
set page(
margin: (left: 30mm, right: 30mm, top: 30mm, bottom: 30mm),
numbering: "i",
number-align: center,
)
v(14pt)
align(center)[
#block(text(font: hei, size: 14pt, "《本科实验报告》填写说明"))
]
v(14pt)
text("")
par(first-line-indent: 2em, text(font: song, size: 12pt, "实验报告内容编排应符合以下要求:"))
par(first-line-indent: 2em, text(font: fsong, size: 12pt, "1采用A421cm×29.7cm白色复印纸单面黑字。上下左右各侧的页边距均为3cm缺省文档网格字号为小4号中文为宋体英文和阿拉伯数字为Times New Roman每页30行每行36字页脚距边界为2.5cm页码置于页脚、居中采用小5号阿拉伯数字从1开始连续编排封面不编页码。"))
par(first-line-indent: 2em, text(font: fsong, size: 12pt, "2报告正文最多可设四级标题字体均为黑体第一级标题字号为4号其余各级标题为小4号标题序号第一级用“一、”、“二、”……第二级用“”、“” ……第三级用“1.”、“2.” ……第四级用“1”、“2” ……,分别按序连续编排。"))
par(first-line-indent: 2em, text(font: fsong, size: 12pt, "3正文插图、表格中的文字字号均为5号。"))
pagebreak()
set page(
margin: (left: 30mm, right: 30mm, top: 30mm, bottom: 30mm),
numbering: "1",
number-align: center,
)
set heading(numbering: "1.1")
// set text(font: hei, lang: "zh")
show heading: it => box(width: 100%)[
#v(0.50em)
#set text(font: hei)
#counter(heading).display()
// #h(0.5em)
#it.body
]
// Main body.
set par(justify: true)
body
}
#let para(t) = par(first-line-indent: 2em, text(font: song, size: 10.5pt, t))
#let subpara(t) = par(first-line-indent: 2em, text(font: song, size: 10pt, t))
#let cb(t) = block(
text(font: ("Consolas","FangSong_GB2312"), t),
fill: luma(240),
inset: 1pt,
radius: 4pt,
// width: 100%,
)

9442
greed/main.pdf Normal file

File diff suppressed because it is too large Load Diff

202
greed/main.typ Normal file
View File

@ -0,0 +1,202 @@
#import "labtemplate.typ": *
#show: nudtlabpaper.with(
author1: "程景愉",
id1: "202302723005",
advisor: " 胡罡",
jobtitle: "教授",
lab: "306-707",
date: "2025.12.18",
header_str: "贪心算法分析实验报告",
minimal_cover: true,
)
#set page(header: [
#set par(spacing: 6pt)
#align(center)[#text(size: 11pt)[《算法设计与分析》实验报告]]
#v(-0.3em)
#line(length: 100%, stroke: (thickness: 1pt))
],)
#show heading: it => box(width: 100%)[
#v(0.50em)
#set text(font: hei)
#it.body
]
#outline(title: "目录",depth: 3, indent: 1em)
// #pagebreak()
#outline(
title: [图目录],
target: figure.where(kind: image),
)
#show heading: it => box(width: 100%)[
#v(0.50em)
#set text(font: hei)
#counter(heading).display()
#it.body
]
#set enum(indent: 0.5em,body-indent: 0.5em,)
#pagebreak()
= 实验介绍
#para[
贪心算法Greedy Algorithm是指在对问题求解时总是做出在当前看来是最好的选择。也就是说不从整体最优上加以考虑算法得到的是在某种意义上的局部最优解。多机调度问题是经典的 NP-Hard 问题本实验旨在通过实现和对比不同的贪心策略List Scheduling LPT深入理解贪心算法的近似比性质并探讨其在实际场景 GPU 集群调度)中的应用。
]
= 实验内容
#para[
本实验主要围绕多机调度问题的贪心算法展开,并扩展至在线 GPU 集群调度模拟。具体内容包括:
]
+ 实现两种贪心策略:任意顺序列表调度 (List Scheduling, LS) 最长处理时间优先 (Longest Processing Time, LPT)。
+ 实现基于分支限界 (Branch and Bound) 的最优解求解算法,作为性能评估的基准。
+ 构造特定的“最坏情况”输入,验证贪心算法的理论近似比下界。
+ 通过大量随机测试样本,统计不同算法的近似比分布及运行时间,分析 $m$ (机器数) $n$ (作业数) 对性能的影响。
+ (附加)模拟 GPU 集群在线调度场景,设计并对比不同的调度策略在不同负载下的表现。
= 实验要求
#para[
针对多机调度问题,实验具体要求如下:
]
+ 针对多机调度问题,实现 LS LPT 两种贪心算法。
+ 实现遍历的最优解求解算法(分支限界法)。
+ 构造最坏情况输入,结合理论证明进行讨论。
+ 固定 $m, n$,随机产生大量样本,计算贪心解与最优解的比值(近似比),并分析其概率分布。
+ 改变 $m, n$,对比分析结果。
+ 附加:模拟 GPU 集群调度,考虑利用率 $eta$ 和用户延迟 $delta$,设计多种策略并分析。
= 实验步骤
== 算法设计
=== 算法一:列表调度 (List Scheduling, LS)
#para[
LS 算法是最朴素的贪心策略。它按照作业输入的任意顺序,依次将作业分配给当前负载最小的机器。
该算法是一种在线算法,其时间复杂度为 $O(n log m)$ (使用优先队列维护机器负载) $O(n m)$ (线性扫描)。
理论上LS 算法的近似比为 $2 - 1/m$
]
```cpp
// 核心代码片段
long long greedy_ls(int m, const vector<Job>& jobs) {
vector<long long> machines(m, 0);
for (const auto& job : jobs) {
int min_idx = 0; // Find machine with min load
for (int i = 1; i < m; ++i) {
if (machines[i] < machines[min_idx]) min_idx = i;
}
machines[min_idx] += job.duration;
}
return *max_element(machines.begin(), machines.end());
}
```
=== 算法二:最长处理时间优先 (LPT)
#para[
LPT 算法在 LS 的基础上增加了预处理步骤:将所有作业按处理时间递减排序,然后依次分配给负载最小的机器。
排序操作使得较大的作业优先被处理,从而避免了最后剩下一个大作业导致机器负载极不均衡的情况。
该算法的时间复杂度主要由排序决定,为 $O(n log n)$
理论上LPT 算法的近似比为 $4/3 - 1/(3m)$
]
=== 算法三:最优解 (Branch and Bound)
#para[
为了评估贪心算法的性能,我们需要求得问题的最优解。由于多机调度是 NP-Complete 问题,我们采用深度优先搜索配合分支限界 (Branch and Bound) 来求解。
剪枝策略包括:
]
1. 当前最大负载已经超过已知最优解,停止搜索。
2. 理论下界剪枝:如果 `max(当前最大负载, (剩余作业总长 + 当前总负载)/m)` 超过已知最优解,停止搜索。
3. 对称性剪枝:若多台机器当前负载相同,则分配给它们是等价的,只尝试第一台。
== 最坏情况构造与分析
=== LS 算法最坏情况
#para[
*构造方法:* 对于 $m$ 台机器,输入 $m(m-1)$ 个时长为 1 的小作业,紧接着 1 个时长为 $m$ 的大作业。
]
#para[
*分析:* LS 算法会将前 $m(m-1)$ 个小作业均匀分配给 $m$ 台机器,每台机器负载为 $m-1$。最后的大作业将被分配给任意一台机器,使其最终负载变为 $(m-1) + m = 2m-1$
而最优解是将所有小作业均匀分配给 $m-1$ 台机器(每台负载 $m$),将大作业单独分配给剩下一台机器(负载 $m$),此时 MakeSpan $m$
近似比为 $(2m-1)/m = 2 - 1/m$
本实验通过代码验证了 $m=3, 4, 5$ 时的该情况,结果与理论完全一致。
]
=== LPT 算法最坏情况
#para[
*构造方法:* 经典的 LPT 最坏情况较为复杂,例如 $m=2$ 时,作业集为 $\{3, 3, 2, 2, 2\}$
]
#para[
*分析:* 排序后为 $3, 3, 2, 2, 2$
LPT 分配M1: $3, 2, 2$ (总 7), M2: $3, 2$ (总 5)。MakeSpan = 7。
最优解M1: $3, 3$ (总 6), M2: $2, 2, 2$ (总 6)。MakeSpan = 6。
近似比 $7/6 approx 1.167$。理论界 $4/3 - 1/6 = 7/6$。实验验证吻合。
]
== 实验数据与可视化
#para[
我们对 $m in {3, 5, 8}$ $n in {10, dots, 100}$ 进行了大量随机测试。
]
#figure(
image("results/ratio_boxplot.png", width: 80%),
caption: [LS LPT 算法近似比分布对比],
)
#figure(
image("results/ratio_vs_n.png", width: 80%),
caption: [近似比随作业数量 n 的变化趋势],
)
#figure(
image("results/time_comparison.png", width: 80%),
caption: [算法平均运行时间对比],
)
= 实验结果分析
#para[
1.*近似比性能:* 从箱线图可以看出LPT 算法的近似比极其接近 1通常在 1.0 - 1.05 之间性能极其优越且稳定。相比之下LS 算法的近似比分布较宽,平均在 1.1 - 1.3 之间,且随着 $m$ 的增加,最差情况(近似比上界)有升高的趋势,符合 $2 - 1/m$ 的理论预测。
]
#para[
2.*规模的影响:* 随着作业数 $n$ 的增加LS 的近似比往往会下降并趋于稳定。这是因为大量随机作业往往能“填平”机器间的负载差异。LPT 则始终保持高效。
]
#para[
3.*运行时间:* 贪心算法LS, LPT的运行时间极短微秒级且随 $n$ 线性或近线性增长。最优解算法B&B $n$ 指数级增长,当 $n > 20$ 时已难以在短时间内求解,验证了 NP-Hard 问题的计算复杂性。
]
= 实验总结
#para[
本实验深入分析了多机调度问题的贪心求解策略。实验结果表明,虽然 LS 算法实现简单但在最坏情况下性能较差。简单的排序预处理LPT 策略)能带来巨大的性能提升,使其在绝大多数随机及构造测试中都能获得极接近最优解的结果。这启示我们在设计贪心算法时,合理的贪心顺序(如优先处理“困难”或“大”的任务)至关重要。
]
#pagebreak()
= 附加GPU 集群在线调度模拟
== 场景描述
#para[
模拟一个拥有 $m=64$ GPU 的集群任务调度。任务到达服从泊松分布,单机执行时间服从均匀分布。任务支持并行 ($k$ GPU),但存在并行效率损耗:效率因子 $E_k = sigma^(log_2 k)$,其中 $sigma in [0.75, 0.95]$。系统目标是平衡 *集群利用率 ($eta$)* *用户平均延迟 ($delta$)*
]
== 调度策略设计
#para[
我们设计了三种策略进行对比:
]
+ *策略 A保守策略 (Conservative)* 总是为每个任务分配 $k=1$ GPU。其思路是最大化计算资源的“有效性”避免并行损耗。
+ *策略 B激进策略 (Aggressive)* 总是尽可能分配最大的并行度(如 $k=32$ $64$)。其思路是最小化单任务执行时间,但忽略了巨大的资源浪费。
+ *策略 C自适应策略 (Adaptive)* 根据当前等待队列的长度动态调整 $k$。若队列为空,使用高并行度加速;若队列拥堵,降低并行度以提高吞吐量。
== 模拟结果
#figure(
image("results/gpu_sim_plots.png", width: 90%),
caption: [不同负载下三种策略的利用率与延迟对比],
)
#para[
实验在轻负载 ($lambda=0.5$)、中负载 ($lambda=0.9$) 和重负载 ($lambda=1.1$) 下进行了模拟。结果显示:
]
+ *保守策略 (Conservative)*:在所有负载下都能保持较低的延迟。
+ *激进策略 (Aggressive)*:表现极差。由于并行效率损失,导致系统迅速过载,用户延迟呈爆炸式增长。
+ *自适应策略 (Adaptive)*:表现最为均衡。在轻负载时加速任务,在重负载时保证系统稳定性。
== 结论
#para[
在具有并行开销的资源调度场景中,盲目追求高并行度(激进策略)是不可取的。通过感知系统负载来动态调整资源分配粒度的 *自适应策略*,是更为优越的解决方案。
]

BIN
greed/multimachine Executable file

Binary file not shown.

View File

@ -0,0 +1,331 @@
m,n,ls_makespan,lpt_makespan,opt_makespan,ls_time,lpt_time,opt_time,ls_ratio,lpt_ratio
3,10,187,184,182,0.341,0.661,40.546,1.02747,1.01099
3,10,186,166,163,0.22,0.481,33.152,1.1411,1.0184
3,10,178,168,165,0.23,0.381,79.219,1.07879,1.01818
3,10,214,165,162,0.261,0.4,46.478,1.32099,1.01852
3,10,168,158,150,0.13,0.381,55.324,1.12,1.05333
3,10,216,200,191,0.121,0.3,81.624,1.13089,1.04712
3,10,257,233,223,0.15,0.291,96.611,1.15247,1.04484
3,10,217,208,202,0.09,0.251,99.817,1.07426,1.0297
3,10,181,180,180,0.181,0.3,43.962,1.00556,1
3,10,246,223,217,0.16,0.361,77.706,1.13364,1.02765
3,15,313,316,309,0.231,0.631,5092.6,1.01294,1.02265
3,15,225,211,211,0.461,0.881,3654.51,1.06635,1
3,15,296,271,270,0.561,1.092,14606.3,1.0963,1.0037
3,15,337,300,294,0.431,0.611,6054.11,1.14626,1.02041
3,15,331,312,309,0.25,0.712,2791.6,1.0712,1.00971
3,15,233,208,206,0.35,0.702,5490.77,1.13107,1.00971
3,15,275,275,273,0.25,0.541,6773.35,1.00733,1.00733
3,15,261,245,243,0.541,1.102,9826.36,1.07407,1.00823
3,15,311,308,298,0.441,0.982,3541.14,1.04362,1.03356
3,15,324,305,304,0.331,0.601,10941.8,1.06579,1.00329
3,20,433,413,-1,0.511,1.112,0,-1,-1
3,20,358,333,-1,0.331,0.972,0,-1,-1
3,20,409,387,-1,0.361,0.872,0,-1,-1
3,20,434,419,-1,0.331,0.671,0,-1,-1
3,20,430,406,-1,0.24,0.832,0,-1,-1
3,20,366,342,-1,0.261,0.781,0,-1,-1
3,20,348,333,-1,0.28,0.762,0,-1,-1
3,20,444,427,-1,0.36,0.652,0,-1,-1
3,20,418,365,-1,0.291,0.641,0,-1,-1
3,20,387,377,-1,0.23,0.721,0,-1,-1
3,25,435,428,-1,0.27,1.062,0,-1,-1
3,25,537,523,-1,0.291,0.982,0,-1,-1
3,25,537,514,-1,0.34,0.812,0,-1,-1
3,25,509,497,-1,0.321,0.932,0,-1,-1
3,25,474,450,-1,0.31,0.752,0,-1,-1
3,25,481,438,-1,0.291,0.802,0,-1,-1
3,25,501,485,-1,0.36,0.792,0,-1,-1
3,25,484,440,-1,0.271,0.932,0,-1,-1
3,25,549,542,-1,0.29,0.862,0,-1,-1
3,25,487,459,-1,0.331,0.711,0,-1,-1
3,25,489,484,-1,0.25,1.032,0,-1,-1
3,25,491,463,-1,0.291,0.701,0,-1,-1
3,25,553,501,-1,0.31,0.872,0,-1,-1
3,25,468,466,-1,0.271,0.791,0,-1,-1
3,25,515,489,-1,0.261,0.832,0,-1,-1
3,25,486,476,-1,0.301,0.871,0,-1,-1
3,25,471,461,-1,0.33,0.842,0,-1,-1
3,25,521,483,-1,0.261,0.872,0,-1,-1
3,25,533,506,-1,0.291,0.922,0,-1,-1
3,25,447,422,-1,0.28,0.822,0,-1,-1
3,30,631,593,-1,0.351,1.232,0,-1,-1
3,30,600,557,-1,0.27,1.082,0,-1,-1
3,30,602,576,-1,0.39,1.072,0,-1,-1
3,30,573,553,-1,0.351,1.102,0,-1,-1
3,30,491,483,-1,0.321,1.072,0,-1,-1
3,30,635,578,-1,0.31,1.072,0,-1,-1
3,30,624,587,-1,0.41,1.022,0,-1,-1
3,30,577,558,-1,0.3,1.062,0,-1,-1
3,30,557,554,-1,0.351,1.002,0,-1,-1
3,30,681,648,-1,0.311,1.122,0,-1,-1
3,30,567,540,-1,0.331,0.922,0,-1,-1
3,30,612,598,-1,0.34,1.062,0,-1,-1
3,30,580,532,-1,0.361,0.881,0,-1,-1
3,30,560,554,-1,0.301,1.122,0,-1,-1
3,30,587,556,-1,0.361,1.072,0,-1,-1
3,30,584,563,-1,0.381,1.082,0,-1,-1
3,30,583,563,-1,0.28,1.082,0,-1,-1
3,30,627,601,-1,0.331,1.072,0,-1,-1
3,30,552,541,-1,0.301,1.092,0,-1,-1
3,30,590,583,-1,0.341,1.092,0,-1,-1
3,50,919,919,-1,0.551,2.194,0,-1,-1
3,50,863,846,-1,0.541,2.064,0,-1,-1
3,50,832,818,-1,0.591,1.894,0,-1,-1
3,50,1074,1042,-1,0.471,1.833,0,-1,-1
3,50,1007,988,-1,0.471,1.944,0,-1,-1
3,50,960,937,-1,0.511,1.924,0,-1,-1
3,50,894,889,-1,0.571,1.944,0,-1,-1
3,50,1032,996,-1,0.521,1.923,0,-1,-1
3,50,868,865,-1,0.511,1.974,0,-1,-1
3,50,990,957,-1,0.481,1.934,0,-1,-1
3,50,814,808,-1,0.521,2.024,0,-1,-1
3,50,972,934,-1,0.46,1.904,0,-1,-1
3,50,1005,980,-1,0.511,2.184,0,-1,-1
3,50,984,935,-1,0.521,1.864,0,-1,-1
3,50,1021,993,-1,0.501,1.763,0,-1,-1
3,50,1012,999,-1,0.451,1.963,0,-1,-1
3,50,947,916,-1,0.471,1.913,0,-1,-1
3,50,948,939,-1,0.451,1.994,0,-1,-1
3,50,1004,989,-1,0.531,1.803,0,-1,-1
3,50,913,901,-1,0.501,2.054,0,-1,-1
3,100,1982,1955,-1,1.012,6.763,0,-1,-1
3,100,1946,1935,-1,0.891,4.107,0,-1,-1
3,100,1839,1828,-1,0.901,4.629,0,-1,-1
3,100,1915,1912,-1,0.872,4.408,0,-1,-1
3,100,1831,1802,-1,0.942,4.348,0,-1,-1
3,100,1830,1822,-1,0.842,4.238,0,-1,-1
3,100,1928,1886,-1,0.922,4.568,0,-1,-1
3,100,1837,1825,-1,0.921,4.358,0,-1,-1
3,100,1864,1858,-1,0.922,4.138,0,-1,-1
3,100,1733,1711,-1,0.872,4.548,0,-1,-1
3,100,1866,1832,-1,0.841,4.138,0,-1,-1
3,100,1857,1827,-1,0.922,4.438,0,-1,-1
3,100,1938,1930,-1,0.832,4.548,0,-1,-1
3,100,1983,1976,-1,0.942,4.368,0,-1,-1
3,100,1956,1951,-1,0.922,4.308,0,-1,-1
3,100,1804,1806,-1,1.012,4.399,0,-1,-1
3,100,1735,1735,-1,0.882,4.398,0,-1,-1
3,100,2065,2024,-1,0.932,4.458,0,-1,-1
3,100,1871,1850,-1,0.972,4.088,0,-1,-1
3,100,1792,1790,-1,0.942,4.428,0,-1,-1
5,10,160,128,128,0.551,0.541,9.638,1.25,1
5,10,156,129,129,0.31,0.501,5.721,1.2093,1
5,10,158,137,137,0.301,0.341,4.689,1.15328,1
5,10,134,100,100,0.241,0.24,5.15,1.34,1
5,10,108,82,82,0.341,0.431,2.284,1.31707,1
5,10,114,89,88,0.271,0.33,10.53,1.29545,1.01136
5,10,164,134,134,0.171,0.31,13.375,1.22388,1
5,10,144,123,123,0.251,0.581,5.25,1.17073,1
5,10,113,99,99,0.221,0.37,5.711,1.14141,1
5,10,157,135,135,0.18,0.3,8.005,1.16296,1
5,15,176,135,131,0.341,0.591,583.706,1.34351,1.03053
5,15,206,184,179,0.24,0.531,3995.27,1.15084,1.02793
5,15,195,165,165,0.27,0.531,4.779,1.18182,1
5,15,206,183,174,0.251,0.461,2258.44,1.18391,1.05172
5,15,141,134,131,0.311,0.591,730.011,1.07634,1.0229
5,15,138,136,133,0.35,0.531,1305.74,1.03759,1.02256
5,15,199,173,168,0.451,0.451,2779.8,1.18452,1.02976
5,15,164,141,137,0.411,0.641,1209.13,1.19708,1.0292
5,15,167,145,140,0.36,0.611,5887.82,1.19286,1.03571
5,15,224,183,169,0.27,0.592,2172.73,1.32544,1.08284
5,20,262,260,-1,0.461,1.042,0,-1,-1
5,20,295,263,-1,0.37,0.762,0,-1,-1
5,20,260,256,-1,0.371,0.771,0,-1,-1
5,20,303,270,-1,0.38,0.882,0,-1,-1
5,20,240,188,-1,0.361,0.731,0,-1,-1
5,20,232,224,-1,0.35,0.742,0,-1,-1
5,20,297,268,-1,0.271,0.841,0,-1,-1
5,20,216,201,-1,0.35,0.862,0,-1,-1
5,20,247,229,-1,0.331,0.822,0,-1,-1
5,20,267,231,-1,0.301,0.831,0,-1,-1
5,25,342,279,-1,0.371,1.112,0,-1,-1
5,25,345,316,-1,0.421,1.112,0,-1,-1
5,25,335,296,-1,0.511,0.952,0,-1,-1
5,25,301,271,-1,0.41,1.032,0,-1,-1
5,25,312,290,-1,0.39,0.921,0,-1,-1
5,25,335,319,-1,0.441,0.992,0,-1,-1
5,25,271,257,-1,0.391,0.942,0,-1,-1
5,25,363,319,-1,0.341,1.042,0,-1,-1
5,25,294,269,-1,0.43,0.872,0,-1,-1
5,25,265,259,-1,0.401,0.832,0,-1,-1
5,25,298,265,-1,0.361,1.092,0,-1,-1
5,25,316,273,-1,0.421,0.972,0,-1,-1
5,25,329,289,-1,0.39,1.031,0,-1,-1
5,25,295,272,-1,0.431,0.962,0,-1,-1
5,25,320,283,-1,0.38,0.921,0,-1,-1
5,25,356,333,-1,0.391,0.992,0,-1,-1
5,25,350,329,-1,0.401,1.122,0,-1,-1
5,25,314,304,-1,0.361,0.972,0,-1,-1
5,25,328,282,-1,0.401,0.861,0,-1,-1
5,25,306,282,-1,0.331,0.972,0,-1,-1
5,30,369,350,-1,0.431,1.082,0,-1,-1
5,30,355,344,-1,0.4,1.173,0,-1,-1
5,30,396,353,-1,0.471,1.163,0,-1,-1
5,30,328,298,-1,0.431,1.243,0,-1,-1
5,30,376,354,-1,0.491,1.142,0,-1,-1
5,30,361,314,-1,0.411,1.132,0,-1,-1
5,30,334,327,-1,0.431,1.082,0,-1,-1
5,30,371,357,-1,0.451,1.062,0,-1,-1
5,30,336,321,-1,0.411,1.273,0,-1,-1
5,30,363,337,-1,0.431,1.152,0,-1,-1
5,30,443,394,-1,0.531,1.273,0,-1,-1
5,30,359,345,-1,0.511,1.403,0,-1,-1
5,30,374,311,-1,0.481,1.112,0,-1,-1
5,30,326,324,-1,0.451,1.302,0,-1,-1
5,30,312,302,-1,0.461,1.252,0,-1,-1
5,30,346,335,-1,0.441,1.222,0,-1,-1
5,30,349,317,-1,0.481,1.172,0,-1,-1
5,30,351,344,-1,0.531,1.192,0,-1,-1
5,30,373,343,-1,0.491,1.132,0,-1,-1
5,30,375,338,-1,0.401,1.132,0,-1,-1
5,50,553,532,-1,0.752,2.043,0,-1,-1
5,50,563,543,-1,0.642,2.234,0,-1,-1
5,50,566,551,-1,0.711,2.134,0,-1,-1
5,50,575,550,-1,0.701,9.098,0,-1,-1
5,50,624,584,-1,0.691,2.234,0,-1,-1
5,50,521,503,-1,0.661,2.324,0,-1,-1
5,50,622,563,-1,0.651,2.054,0,-1,-1
5,50,537,496,-1,0.631,1.944,0,-1,-1
5,50,531,503,-1,0.662,2.023,0,-1,-1
5,50,581,574,-1,0.731,2.043,0,-1,-1
5,50,567,540,-1,0.672,2.204,0,-1,-1
5,50,643,630,-1,0.741,1.914,0,-1,-1
5,50,568,515,-1,0.721,2.044,0,-1,-1
5,50,601,554,-1,0.751,2.064,0,-1,-1
5,50,568,526,-1,0.602,2.204,0,-1,-1
5,50,611,565,-1,0.731,2.144,0,-1,-1
5,50,584,551,-1,0.681,2.053,0,-1,-1
5,50,596,558,-1,0.771,2.104,0,-1,-1
5,50,588,543,-1,0.681,2.084,0,-1,-1
5,50,585,558,-1,0.681,2.094,0,-1,-1
5,100,1095,1071,-1,1.303,5.039,0,-1,-1
5,100,1163,1138,-1,1.332,4.709,0,-1,-1
5,100,1197,1175,-1,1.453,4.749,0,-1,-1
5,100,1094,1075,-1,1.302,4.679,0,-1,-1
5,100,1083,1063,-1,1.242,4.549,0,-1,-1
5,100,1136,1096,-1,1.302,4.599,0,-1,-1
5,100,1035,1009,-1,1.343,4.669,0,-1,-1
5,100,1069,1054,-1,1.353,4.698,0,-1,-1
5,100,1212,1165,-1,1.382,4.758,0,-1,-1
5,100,1178,1155,-1,1.373,4.919,0,-1,-1
5,100,1062,1037,-1,1.332,4.889,0,-1,-1
5,100,1130,1108,-1,1.362,4.509,0,-1,-1
5,100,1099,1068,-1,1.313,4.678,0,-1,-1
5,100,1095,1084,-1,1.333,5.009,0,-1,-1
5,100,1155,1120,-1,1.343,4.849,0,-1,-1
5,100,1151,1080,-1,1.383,4.869,0,-1,-1
5,100,1171,1138,-1,1.273,4.648,0,-1,-1
5,100,1204,1177,-1,1.312,4.739,0,-1,-1
5,100,1138,1129,-1,1.342,4.659,0,-1,-1
5,100,1228,1187,-1,1.202,4.809,0,-1,-1
8,10,129,99,99,0.471,0.571,3.447,1.30303,1
8,10,99,96,96,0.501,0.29,2.284,1.03125,1
8,10,115,100,100,0.34,0.29,2.254,1.15,1
8,10,83,83,83,0.461,0.3,2.234,1,1
8,10,123,95,95,0.22,0.301,2.935,1.29474,1
8,10,86,86,86,0.391,0.37,2.695,1,1
8,10,95,95,95,0.381,0.721,2.916,1,1
8,10,119,92,92,0.351,0.37,2.395,1.29348,1
8,10,115,83,83,0.251,0.311,2.314,1.38554,1
8,10,128,99,99,0.331,0.621,2.325,1.29293,1
8,15,150,111,102,0.581,0.581,146.756,1.47059,1.08824
8,15,161,109,109,1.553,3.337,60.944,1.47706,1
8,15,116,109,109,1.573,2.405,45.105,1.06422,1
8,15,151,111,106,2.384,3.526,210.084,1.42453,1.04717
8,15,164,150,150,2.234,3.026,28.403,1.09333,1
8,15,131,120,120,1.883,2.375,45.105,1.09167,1
8,15,158,144,144,1.683,2.645,27.892,1.09722,1
8,15,152,113,113,2.063,2.144,27.412,1.34513,1
8,15,141,103,101,1.322,2.064,55.734,1.39604,1.0198
8,15,98,77,76,1.774,1.954,83.096,1.28947,1.01316
8,20,189,147,-1,1.944,4.739,0,-1,-1
8,20,177,151,-1,2.554,3.486,0,-1,-1
8,20,144,119,-1,2.826,3.737,0,-1,-1
8,20,154,132,-1,2.235,3.727,0,-1,-1
8,20,211,164,-1,2.325,3.647,0,-1,-1
8,20,180,157,-1,1.824,4.839,0,-1,-1
8,20,197,172,-1,2.003,4.899,0,-1,-1
8,20,177,166,-1,2.295,3.196,0,-1,-1
8,20,177,157,-1,1.974,2.805,0,-1,-1
8,20,194,150,-1,1.733,4.728,0,-1,-1
8,25,185,162,-1,3.337,4.498,0,-1,-1
8,25,228,169,-1,2.254,3.296,0,-1,-1
8,25,221,211,-1,1.182,2.465,0,-1,-1
8,25,212,176,-1,0.611,1.202,0,-1,-1
8,25,211,191,-1,0.551,1.082,0,-1,-1
8,25,219,183,-1,0.461,1.142,0,-1,-1
8,25,220,170,-1,0.531,1.132,0,-1,-1
8,25,194,167,-1,0.441,1.172,0,-1,-1
8,25,205,188,-1,0.451,1.162,0,-1,-1
8,25,200,185,-1,0.671,1.202,0,-1,-1
8,25,186,154,-1,0.531,0.962,0,-1,-1
8,25,219,166,-1,0.451,1.142,0,-1,-1
8,25,207,174,-1,0.491,1.062,0,-1,-1
8,25,208,176,-1,0.611,1.123,0,-1,-1
8,25,196,169,-1,0.571,1.192,0,-1,-1
8,25,229,188,-1,0.661,1.092,0,-1,-1
8,25,207,179,-1,0.641,1.042,0,-1,-1
8,25,172,153,-1,0.701,0.932,0,-1,-1
8,25,212,185,-1,0.511,1.252,0,-1,-1
8,25,221,179,-1,0.521,0.872,0,-1,-1
8,30,245,215,-1,0.621,1.352,0,-1,-1
8,30,248,207,-1,0.541,1.473,0,-1,-1
8,30,247,239,-1,0.701,1.433,0,-1,-1
8,30,257,225,-1,0.541,1.353,0,-1,-1
8,30,234,207,-1,0.641,1.112,0,-1,-1
8,30,226,195,-1,0.661,1.483,0,-1,-1
8,30,239,193,-1,0.782,1.433,0,-1,-1
8,30,261,213,-1,0.681,1.383,0,-1,-1
8,30,233,196,-1,0.581,1.373,0,-1,-1
8,30,257,200,-1,0.751,1.503,0,-1,-1
8,30,218,197,-1,0.681,1.292,0,-1,-1
8,30,267,227,-1,0.622,1.202,0,-1,-1
8,30,202,194,-1,0.672,1.412,0,-1,-1
8,30,234,203,-1,0.601,1.242,0,-1,-1
8,30,234,202,-1,0.672,1.392,0,-1,-1
8,30,246,189,-1,0.671,1.443,0,-1,-1
8,30,261,249,-1,0.672,1.302,0,-1,-1
8,30,281,237,-1,0.591,1.633,0,-1,-1
8,30,241,228,-1,0.541,1.403,0,-1,-1
8,30,254,220,-1,0.541,1.473,0,-1,-1
8,50,351,315,-1,1.192,2.535,0,-1,-1
8,50,337,315,-1,1.072,2.424,0,-1,-1
8,50,401,391,-1,1.082,2.605,0,-1,-1
8,50,397,387,-1,0.972,2.635,0,-1,-1
8,50,420,394,-1,1.021,2.514,0,-1,-1
8,50,382,349,-1,0.882,2.565,0,-1,-1
8,50,372,348,-1,0.952,2.404,0,-1,-1
8,50,361,324,-1,1.022,2.144,0,-1,-1
8,50,397,375,-1,1.042,2.584,0,-1,-1
8,50,436,393,-1,0.912,2.595,0,-1,-1
8,50,406,365,-1,0.902,2.745,0,-1,-1
8,50,390,360,-1,1.112,2.595,0,-1,-1
8,50,358,338,-1,1.012,2.585,0,-1,-1
8,50,412,389,-1,0.942,2.425,0,-1,-1
8,50,399,361,-1,1.032,2.415,0,-1,-1
8,50,424,362,-1,0.882,2.374,0,-1,-1
8,50,365,342,-1,0.992,2.425,0,-1,-1
8,50,379,355,-1,0.982,2.424,0,-1,-1
8,50,387,361,-1,0.822,2.274,0,-1,-1
8,50,410,366,-1,0.952,2.485,0,-1,-1
8,100,714,689,-1,1.834,5.65,0,-1,-1
8,100,756,731,-1,1.884,5.64,0,-1,-1
8,100,742,714,-1,1.874,5.49,0,-1,-1
8,100,691,648,-1,1.854,5.24,0,-1,-1
8,100,761,732,-1,1.964,5.29,0,-1,-1
8,100,639,615,-1,1.833,5.17,0,-1,-1
8,100,718,700,-1,1.903,5.259,0,-1,-1
8,100,715,696,-1,1.933,5.38,0,-1,-1
8,100,649,626,-1,2.004,5.28,0,-1,-1
8,100,758,739,-1,1.744,5.31,0,-1,-1
8,100,733,701,-1,1.974,5.239,0,-1,-1
8,100,718,688,-1,1.763,5.34,0,-1,-1
8,100,713,666,-1,1.893,5.2,0,-1,-1
8,100,772,755,-1,1.824,5.32,0,-1,-1
8,100,705,679,-1,2.004,4.849,0,-1,-1
8,100,777,726,-1,1.814,5.059,0,-1,-1
8,100,776,738,-1,1.883,5.08,0,-1,-1
8,100,713,680,-1,1.804,5.51,0,-1,-1
8,100,734,692,-1,2.014,5.38,0,-1,-1
8,100,721,709,-1,1.883,5.39,0,-1,-1
1 m n ls_makespan lpt_makespan opt_makespan ls_time lpt_time opt_time ls_ratio lpt_ratio
2 3 10 187 184 182 0.341 0.661 40.546 1.02747 1.01099
3 3 10 186 166 163 0.22 0.481 33.152 1.1411 1.0184
4 3 10 178 168 165 0.23 0.381 79.219 1.07879 1.01818
5 3 10 214 165 162 0.261 0.4 46.478 1.32099 1.01852
6 3 10 168 158 150 0.13 0.381 55.324 1.12 1.05333
7 3 10 216 200 191 0.121 0.3 81.624 1.13089 1.04712
8 3 10 257 233 223 0.15 0.291 96.611 1.15247 1.04484
9 3 10 217 208 202 0.09 0.251 99.817 1.07426 1.0297
10 3 10 181 180 180 0.181 0.3 43.962 1.00556 1
11 3 10 246 223 217 0.16 0.361 77.706 1.13364 1.02765
12 3 15 313 316 309 0.231 0.631 5092.6 1.01294 1.02265
13 3 15 225 211 211 0.461 0.881 3654.51 1.06635 1
14 3 15 296 271 270 0.561 1.092 14606.3 1.0963 1.0037
15 3 15 337 300 294 0.431 0.611 6054.11 1.14626 1.02041
16 3 15 331 312 309 0.25 0.712 2791.6 1.0712 1.00971
17 3 15 233 208 206 0.35 0.702 5490.77 1.13107 1.00971
18 3 15 275 275 273 0.25 0.541 6773.35 1.00733 1.00733
19 3 15 261 245 243 0.541 1.102 9826.36 1.07407 1.00823
20 3 15 311 308 298 0.441 0.982 3541.14 1.04362 1.03356
21 3 15 324 305 304 0.331 0.601 10941.8 1.06579 1.00329
22 3 20 433 413 -1 0.511 1.112 0 -1 -1
23 3 20 358 333 -1 0.331 0.972 0 -1 -1
24 3 20 409 387 -1 0.361 0.872 0 -1 -1
25 3 20 434 419 -1 0.331 0.671 0 -1 -1
26 3 20 430 406 -1 0.24 0.832 0 -1 -1
27 3 20 366 342 -1 0.261 0.781 0 -1 -1
28 3 20 348 333 -1 0.28 0.762 0 -1 -1
29 3 20 444 427 -1 0.36 0.652 0 -1 -1
30 3 20 418 365 -1 0.291 0.641 0 -1 -1
31 3 20 387 377 -1 0.23 0.721 0 -1 -1
32 3 25 435 428 -1 0.27 1.062 0 -1 -1
33 3 25 537 523 -1 0.291 0.982 0 -1 -1
34 3 25 537 514 -1 0.34 0.812 0 -1 -1
35 3 25 509 497 -1 0.321 0.932 0 -1 -1
36 3 25 474 450 -1 0.31 0.752 0 -1 -1
37 3 25 481 438 -1 0.291 0.802 0 -1 -1
38 3 25 501 485 -1 0.36 0.792 0 -1 -1
39 3 25 484 440 -1 0.271 0.932 0 -1 -1
40 3 25 549 542 -1 0.29 0.862 0 -1 -1
41 3 25 487 459 -1 0.331 0.711 0 -1 -1
42 3 25 489 484 -1 0.25 1.032 0 -1 -1
43 3 25 491 463 -1 0.291 0.701 0 -1 -1
44 3 25 553 501 -1 0.31 0.872 0 -1 -1
45 3 25 468 466 -1 0.271 0.791 0 -1 -1
46 3 25 515 489 -1 0.261 0.832 0 -1 -1
47 3 25 486 476 -1 0.301 0.871 0 -1 -1
48 3 25 471 461 -1 0.33 0.842 0 -1 -1
49 3 25 521 483 -1 0.261 0.872 0 -1 -1
50 3 25 533 506 -1 0.291 0.922 0 -1 -1
51 3 25 447 422 -1 0.28 0.822 0 -1 -1
52 3 30 631 593 -1 0.351 1.232 0 -1 -1
53 3 30 600 557 -1 0.27 1.082 0 -1 -1
54 3 30 602 576 -1 0.39 1.072 0 -1 -1
55 3 30 573 553 -1 0.351 1.102 0 -1 -1
56 3 30 491 483 -1 0.321 1.072 0 -1 -1
57 3 30 635 578 -1 0.31 1.072 0 -1 -1
58 3 30 624 587 -1 0.41 1.022 0 -1 -1
59 3 30 577 558 -1 0.3 1.062 0 -1 -1
60 3 30 557 554 -1 0.351 1.002 0 -1 -1
61 3 30 681 648 -1 0.311 1.122 0 -1 -1
62 3 30 567 540 -1 0.331 0.922 0 -1 -1
63 3 30 612 598 -1 0.34 1.062 0 -1 -1
64 3 30 580 532 -1 0.361 0.881 0 -1 -1
65 3 30 560 554 -1 0.301 1.122 0 -1 -1
66 3 30 587 556 -1 0.361 1.072 0 -1 -1
67 3 30 584 563 -1 0.381 1.082 0 -1 -1
68 3 30 583 563 -1 0.28 1.082 0 -1 -1
69 3 30 627 601 -1 0.331 1.072 0 -1 -1
70 3 30 552 541 -1 0.301 1.092 0 -1 -1
71 3 30 590 583 -1 0.341 1.092 0 -1 -1
72 3 50 919 919 -1 0.551 2.194 0 -1 -1
73 3 50 863 846 -1 0.541 2.064 0 -1 -1
74 3 50 832 818 -1 0.591 1.894 0 -1 -1
75 3 50 1074 1042 -1 0.471 1.833 0 -1 -1
76 3 50 1007 988 -1 0.471 1.944 0 -1 -1
77 3 50 960 937 -1 0.511 1.924 0 -1 -1
78 3 50 894 889 -1 0.571 1.944 0 -1 -1
79 3 50 1032 996 -1 0.521 1.923 0 -1 -1
80 3 50 868 865 -1 0.511 1.974 0 -1 -1
81 3 50 990 957 -1 0.481 1.934 0 -1 -1
82 3 50 814 808 -1 0.521 2.024 0 -1 -1
83 3 50 972 934 -1 0.46 1.904 0 -1 -1
84 3 50 1005 980 -1 0.511 2.184 0 -1 -1
85 3 50 984 935 -1 0.521 1.864 0 -1 -1
86 3 50 1021 993 -1 0.501 1.763 0 -1 -1
87 3 50 1012 999 -1 0.451 1.963 0 -1 -1
88 3 50 947 916 -1 0.471 1.913 0 -1 -1
89 3 50 948 939 -1 0.451 1.994 0 -1 -1
90 3 50 1004 989 -1 0.531 1.803 0 -1 -1
91 3 50 913 901 -1 0.501 2.054 0 -1 -1
92 3 100 1982 1955 -1 1.012 6.763 0 -1 -1
93 3 100 1946 1935 -1 0.891 4.107 0 -1 -1
94 3 100 1839 1828 -1 0.901 4.629 0 -1 -1
95 3 100 1915 1912 -1 0.872 4.408 0 -1 -1
96 3 100 1831 1802 -1 0.942 4.348 0 -1 -1
97 3 100 1830 1822 -1 0.842 4.238 0 -1 -1
98 3 100 1928 1886 -1 0.922 4.568 0 -1 -1
99 3 100 1837 1825 -1 0.921 4.358 0 -1 -1
100 3 100 1864 1858 -1 0.922 4.138 0 -1 -1
101 3 100 1733 1711 -1 0.872 4.548 0 -1 -1
102 3 100 1866 1832 -1 0.841 4.138 0 -1 -1
103 3 100 1857 1827 -1 0.922 4.438 0 -1 -1
104 3 100 1938 1930 -1 0.832 4.548 0 -1 -1
105 3 100 1983 1976 -1 0.942 4.368 0 -1 -1
106 3 100 1956 1951 -1 0.922 4.308 0 -1 -1
107 3 100 1804 1806 -1 1.012 4.399 0 -1 -1
108 3 100 1735 1735 -1 0.882 4.398 0 -1 -1
109 3 100 2065 2024 -1 0.932 4.458 0 -1 -1
110 3 100 1871 1850 -1 0.972 4.088 0 -1 -1
111 3 100 1792 1790 -1 0.942 4.428 0 -1 -1
112 5 10 160 128 128 0.551 0.541 9.638 1.25 1
113 5 10 156 129 129 0.31 0.501 5.721 1.2093 1
114 5 10 158 137 137 0.301 0.341 4.689 1.15328 1
115 5 10 134 100 100 0.241 0.24 5.15 1.34 1
116 5 10 108 82 82 0.341 0.431 2.284 1.31707 1
117 5 10 114 89 88 0.271 0.33 10.53 1.29545 1.01136
118 5 10 164 134 134 0.171 0.31 13.375 1.22388 1
119 5 10 144 123 123 0.251 0.581 5.25 1.17073 1
120 5 10 113 99 99 0.221 0.37 5.711 1.14141 1
121 5 10 157 135 135 0.18 0.3 8.005 1.16296 1
122 5 15 176 135 131 0.341 0.591 583.706 1.34351 1.03053
123 5 15 206 184 179 0.24 0.531 3995.27 1.15084 1.02793
124 5 15 195 165 165 0.27 0.531 4.779 1.18182 1
125 5 15 206 183 174 0.251 0.461 2258.44 1.18391 1.05172
126 5 15 141 134 131 0.311 0.591 730.011 1.07634 1.0229
127 5 15 138 136 133 0.35 0.531 1305.74 1.03759 1.02256
128 5 15 199 173 168 0.451 0.451 2779.8 1.18452 1.02976
129 5 15 164 141 137 0.411 0.641 1209.13 1.19708 1.0292
130 5 15 167 145 140 0.36 0.611 5887.82 1.19286 1.03571
131 5 15 224 183 169 0.27 0.592 2172.73 1.32544 1.08284
132 5 20 262 260 -1 0.461 1.042 0 -1 -1
133 5 20 295 263 -1 0.37 0.762 0 -1 -1
134 5 20 260 256 -1 0.371 0.771 0 -1 -1
135 5 20 303 270 -1 0.38 0.882 0 -1 -1
136 5 20 240 188 -1 0.361 0.731 0 -1 -1
137 5 20 232 224 -1 0.35 0.742 0 -1 -1
138 5 20 297 268 -1 0.271 0.841 0 -1 -1
139 5 20 216 201 -1 0.35 0.862 0 -1 -1
140 5 20 247 229 -1 0.331 0.822 0 -1 -1
141 5 20 267 231 -1 0.301 0.831 0 -1 -1
142 5 25 342 279 -1 0.371 1.112 0 -1 -1
143 5 25 345 316 -1 0.421 1.112 0 -1 -1
144 5 25 335 296 -1 0.511 0.952 0 -1 -1
145 5 25 301 271 -1 0.41 1.032 0 -1 -1
146 5 25 312 290 -1 0.39 0.921 0 -1 -1
147 5 25 335 319 -1 0.441 0.992 0 -1 -1
148 5 25 271 257 -1 0.391 0.942 0 -1 -1
149 5 25 363 319 -1 0.341 1.042 0 -1 -1
150 5 25 294 269 -1 0.43 0.872 0 -1 -1
151 5 25 265 259 -1 0.401 0.832 0 -1 -1
152 5 25 298 265 -1 0.361 1.092 0 -1 -1
153 5 25 316 273 -1 0.421 0.972 0 -1 -1
154 5 25 329 289 -1 0.39 1.031 0 -1 -1
155 5 25 295 272 -1 0.431 0.962 0 -1 -1
156 5 25 320 283 -1 0.38 0.921 0 -1 -1
157 5 25 356 333 -1 0.391 0.992 0 -1 -1
158 5 25 350 329 -1 0.401 1.122 0 -1 -1
159 5 25 314 304 -1 0.361 0.972 0 -1 -1
160 5 25 328 282 -1 0.401 0.861 0 -1 -1
161 5 25 306 282 -1 0.331 0.972 0 -1 -1
162 5 30 369 350 -1 0.431 1.082 0 -1 -1
163 5 30 355 344 -1 0.4 1.173 0 -1 -1
164 5 30 396 353 -1 0.471 1.163 0 -1 -1
165 5 30 328 298 -1 0.431 1.243 0 -1 -1
166 5 30 376 354 -1 0.491 1.142 0 -1 -1
167 5 30 361 314 -1 0.411 1.132 0 -1 -1
168 5 30 334 327 -1 0.431 1.082 0 -1 -1
169 5 30 371 357 -1 0.451 1.062 0 -1 -1
170 5 30 336 321 -1 0.411 1.273 0 -1 -1
171 5 30 363 337 -1 0.431 1.152 0 -1 -1
172 5 30 443 394 -1 0.531 1.273 0 -1 -1
173 5 30 359 345 -1 0.511 1.403 0 -1 -1
174 5 30 374 311 -1 0.481 1.112 0 -1 -1
175 5 30 326 324 -1 0.451 1.302 0 -1 -1
176 5 30 312 302 -1 0.461 1.252 0 -1 -1
177 5 30 346 335 -1 0.441 1.222 0 -1 -1
178 5 30 349 317 -1 0.481 1.172 0 -1 -1
179 5 30 351 344 -1 0.531 1.192 0 -1 -1
180 5 30 373 343 -1 0.491 1.132 0 -1 -1
181 5 30 375 338 -1 0.401 1.132 0 -1 -1
182 5 50 553 532 -1 0.752 2.043 0 -1 -1
183 5 50 563 543 -1 0.642 2.234 0 -1 -1
184 5 50 566 551 -1 0.711 2.134 0 -1 -1
185 5 50 575 550 -1 0.701 9.098 0 -1 -1
186 5 50 624 584 -1 0.691 2.234 0 -1 -1
187 5 50 521 503 -1 0.661 2.324 0 -1 -1
188 5 50 622 563 -1 0.651 2.054 0 -1 -1
189 5 50 537 496 -1 0.631 1.944 0 -1 -1
190 5 50 531 503 -1 0.662 2.023 0 -1 -1
191 5 50 581 574 -1 0.731 2.043 0 -1 -1
192 5 50 567 540 -1 0.672 2.204 0 -1 -1
193 5 50 643 630 -1 0.741 1.914 0 -1 -1
194 5 50 568 515 -1 0.721 2.044 0 -1 -1
195 5 50 601 554 -1 0.751 2.064 0 -1 -1
196 5 50 568 526 -1 0.602 2.204 0 -1 -1
197 5 50 611 565 -1 0.731 2.144 0 -1 -1
198 5 50 584 551 -1 0.681 2.053 0 -1 -1
199 5 50 596 558 -1 0.771 2.104 0 -1 -1
200 5 50 588 543 -1 0.681 2.084 0 -1 -1
201 5 50 585 558 -1 0.681 2.094 0 -1 -1
202 5 100 1095 1071 -1 1.303 5.039 0 -1 -1
203 5 100 1163 1138 -1 1.332 4.709 0 -1 -1
204 5 100 1197 1175 -1 1.453 4.749 0 -1 -1
205 5 100 1094 1075 -1 1.302 4.679 0 -1 -1
206 5 100 1083 1063 -1 1.242 4.549 0 -1 -1
207 5 100 1136 1096 -1 1.302 4.599 0 -1 -1
208 5 100 1035 1009 -1 1.343 4.669 0 -1 -1
209 5 100 1069 1054 -1 1.353 4.698 0 -1 -1
210 5 100 1212 1165 -1 1.382 4.758 0 -1 -1
211 5 100 1178 1155 -1 1.373 4.919 0 -1 -1
212 5 100 1062 1037 -1 1.332 4.889 0 -1 -1
213 5 100 1130 1108 -1 1.362 4.509 0 -1 -1
214 5 100 1099 1068 -1 1.313 4.678 0 -1 -1
215 5 100 1095 1084 -1 1.333 5.009 0 -1 -1
216 5 100 1155 1120 -1 1.343 4.849 0 -1 -1
217 5 100 1151 1080 -1 1.383 4.869 0 -1 -1
218 5 100 1171 1138 -1 1.273 4.648 0 -1 -1
219 5 100 1204 1177 -1 1.312 4.739 0 -1 -1
220 5 100 1138 1129 -1 1.342 4.659 0 -1 -1
221 5 100 1228 1187 -1 1.202 4.809 0 -1 -1
222 8 10 129 99 99 0.471 0.571 3.447 1.30303 1
223 8 10 99 96 96 0.501 0.29 2.284 1.03125 1
224 8 10 115 100 100 0.34 0.29 2.254 1.15 1
225 8 10 83 83 83 0.461 0.3 2.234 1 1
226 8 10 123 95 95 0.22 0.301 2.935 1.29474 1
227 8 10 86 86 86 0.391 0.37 2.695 1 1
228 8 10 95 95 95 0.381 0.721 2.916 1 1
229 8 10 119 92 92 0.351 0.37 2.395 1.29348 1
230 8 10 115 83 83 0.251 0.311 2.314 1.38554 1
231 8 10 128 99 99 0.331 0.621 2.325 1.29293 1
232 8 15 150 111 102 0.581 0.581 146.756 1.47059 1.08824
233 8 15 161 109 109 1.553 3.337 60.944 1.47706 1
234 8 15 116 109 109 1.573 2.405 45.105 1.06422 1
235 8 15 151 111 106 2.384 3.526 210.084 1.42453 1.04717
236 8 15 164 150 150 2.234 3.026 28.403 1.09333 1
237 8 15 131 120 120 1.883 2.375 45.105 1.09167 1
238 8 15 158 144 144 1.683 2.645 27.892 1.09722 1
239 8 15 152 113 113 2.063 2.144 27.412 1.34513 1
240 8 15 141 103 101 1.322 2.064 55.734 1.39604 1.0198
241 8 15 98 77 76 1.774 1.954 83.096 1.28947 1.01316
242 8 20 189 147 -1 1.944 4.739 0 -1 -1
243 8 20 177 151 -1 2.554 3.486 0 -1 -1
244 8 20 144 119 -1 2.826 3.737 0 -1 -1
245 8 20 154 132 -1 2.235 3.727 0 -1 -1
246 8 20 211 164 -1 2.325 3.647 0 -1 -1
247 8 20 180 157 -1 1.824 4.839 0 -1 -1
248 8 20 197 172 -1 2.003 4.899 0 -1 -1
249 8 20 177 166 -1 2.295 3.196 0 -1 -1
250 8 20 177 157 -1 1.974 2.805 0 -1 -1
251 8 20 194 150 -1 1.733 4.728 0 -1 -1
252 8 25 185 162 -1 3.337 4.498 0 -1 -1
253 8 25 228 169 -1 2.254 3.296 0 -1 -1
254 8 25 221 211 -1 1.182 2.465 0 -1 -1
255 8 25 212 176 -1 0.611 1.202 0 -1 -1
256 8 25 211 191 -1 0.551 1.082 0 -1 -1
257 8 25 219 183 -1 0.461 1.142 0 -1 -1
258 8 25 220 170 -1 0.531 1.132 0 -1 -1
259 8 25 194 167 -1 0.441 1.172 0 -1 -1
260 8 25 205 188 -1 0.451 1.162 0 -1 -1
261 8 25 200 185 -1 0.671 1.202 0 -1 -1
262 8 25 186 154 -1 0.531 0.962 0 -1 -1
263 8 25 219 166 -1 0.451 1.142 0 -1 -1
264 8 25 207 174 -1 0.491 1.062 0 -1 -1
265 8 25 208 176 -1 0.611 1.123 0 -1 -1
266 8 25 196 169 -1 0.571 1.192 0 -1 -1
267 8 25 229 188 -1 0.661 1.092 0 -1 -1
268 8 25 207 179 -1 0.641 1.042 0 -1 -1
269 8 25 172 153 -1 0.701 0.932 0 -1 -1
270 8 25 212 185 -1 0.511 1.252 0 -1 -1
271 8 25 221 179 -1 0.521 0.872 0 -1 -1
272 8 30 245 215 -1 0.621 1.352 0 -1 -1
273 8 30 248 207 -1 0.541 1.473 0 -1 -1
274 8 30 247 239 -1 0.701 1.433 0 -1 -1
275 8 30 257 225 -1 0.541 1.353 0 -1 -1
276 8 30 234 207 -1 0.641 1.112 0 -1 -1
277 8 30 226 195 -1 0.661 1.483 0 -1 -1
278 8 30 239 193 -1 0.782 1.433 0 -1 -1
279 8 30 261 213 -1 0.681 1.383 0 -1 -1
280 8 30 233 196 -1 0.581 1.373 0 -1 -1
281 8 30 257 200 -1 0.751 1.503 0 -1 -1
282 8 30 218 197 -1 0.681 1.292 0 -1 -1
283 8 30 267 227 -1 0.622 1.202 0 -1 -1
284 8 30 202 194 -1 0.672 1.412 0 -1 -1
285 8 30 234 203 -1 0.601 1.242 0 -1 -1
286 8 30 234 202 -1 0.672 1.392 0 -1 -1
287 8 30 246 189 -1 0.671 1.443 0 -1 -1
288 8 30 261 249 -1 0.672 1.302 0 -1 -1
289 8 30 281 237 -1 0.591 1.633 0 -1 -1
290 8 30 241 228 -1 0.541 1.403 0 -1 -1
291 8 30 254 220 -1 0.541 1.473 0 -1 -1
292 8 50 351 315 -1 1.192 2.535 0 -1 -1
293 8 50 337 315 -1 1.072 2.424 0 -1 -1
294 8 50 401 391 -1 1.082 2.605 0 -1 -1
295 8 50 397 387 -1 0.972 2.635 0 -1 -1
296 8 50 420 394 -1 1.021 2.514 0 -1 -1
297 8 50 382 349 -1 0.882 2.565 0 -1 -1
298 8 50 372 348 -1 0.952 2.404 0 -1 -1
299 8 50 361 324 -1 1.022 2.144 0 -1 -1
300 8 50 397 375 -1 1.042 2.584 0 -1 -1
301 8 50 436 393 -1 0.912 2.595 0 -1 -1
302 8 50 406 365 -1 0.902 2.745 0 -1 -1
303 8 50 390 360 -1 1.112 2.595 0 -1 -1
304 8 50 358 338 -1 1.012 2.585 0 -1 -1
305 8 50 412 389 -1 0.942 2.425 0 -1 -1
306 8 50 399 361 -1 1.032 2.415 0 -1 -1
307 8 50 424 362 -1 0.882 2.374 0 -1 -1
308 8 50 365 342 -1 0.992 2.425 0 -1 -1
309 8 50 379 355 -1 0.982 2.424 0 -1 -1
310 8 50 387 361 -1 0.822 2.274 0 -1 -1
311 8 50 410 366 -1 0.952 2.485 0 -1 -1
312 8 100 714 689 -1 1.834 5.65 0 -1 -1
313 8 100 756 731 -1 1.884 5.64 0 -1 -1
314 8 100 742 714 -1 1.874 5.49 0 -1 -1
315 8 100 691 648 -1 1.854 5.24 0 -1 -1
316 8 100 761 732 -1 1.964 5.29 0 -1 -1
317 8 100 639 615 -1 1.833 5.17 0 -1 -1
318 8 100 718 700 -1 1.903 5.259 0 -1 -1
319 8 100 715 696 -1 1.933 5.38 0 -1 -1
320 8 100 649 626 -1 2.004 5.28 0 -1 -1
321 8 100 758 739 -1 1.744 5.31 0 -1 -1
322 8 100 733 701 -1 1.974 5.239 0 -1 -1
323 8 100 718 688 -1 1.763 5.34 0 -1 -1
324 8 100 713 666 -1 1.893 5.2 0 -1 -1
325 8 100 772 755 -1 1.824 5.32 0 -1 -1
326 8 100 705 679 -1 2.004 4.849 0 -1 -1
327 8 100 777 726 -1 1.814 5.059 0 -1 -1
328 8 100 776 738 -1 1.883 5.08 0 -1 -1
329 8 100 713 680 -1 1.804 5.51 0 -1 -1
330 8 100 734 692 -1 2.014 5.38 0 -1 -1
331 8 100 721 709 -1 1.883 5.39 0 -1 -1

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

View File

@ -0,0 +1,10 @@
Load,Strategy,Utilization (eta),Avg Delay Penalty (delta),Score (Balanced)
0.5,conservative,0.4012140025347487,0.0,0.5987859974652513
0.5,aggressive,0.9990193308369786,34.41715150502985,3.4426958196660067
0.5,adaptive,0.8222579304480057,5.087451279471799e-07,0.17774212042650714
0.9,conservative,0.724052992773303,0.00013269873256010987,0.27596027709995297
0.9,aggressive,0.9999425933302338,595.1345244746948,59.51350985413925
0.9,adaptive,0.9766266112625738,0.033832922246208194,0.026756680962047044
1.1,conservative,0.8882862947134675,0.06546476541988962,0.11826018182852142
1.1,aggressive,0.9998654986492419,852.7230561640825,85.27244011775902
1.1,adaptive,0.9388200169307546,0.7324962497082855,0.13442960804007398
1 Load Strategy Utilization (eta) Avg Delay Penalty (delta) Score (Balanced)
2 0.5 conservative 0.4012140025347487 0.0 0.5987859974652513
3 0.5 aggressive 0.9990193308369786 34.41715150502985 3.4426958196660067
4 0.5 adaptive 0.8222579304480057 5.087451279471799e-07 0.17774212042650714
5 0.9 conservative 0.724052992773303 0.00013269873256010987 0.27596027709995297
6 0.9 aggressive 0.9999425933302338 595.1345244746948 59.51350985413925
7 0.9 adaptive 0.9766266112625738 0.033832922246208194 0.026756680962047044
8 1.1 conservative 0.8882862947134675 0.06546476541988962 0.11826018182852142
9 1.1 aggressive 0.9998654986492419 852.7230561640825 85.27244011775902
10 1.1 adaptive 0.9388200169307546 0.7324962497082855 0.13442960804007398

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

View File

@ -0,0 +1,5 @@
case_type,m,n,input_desc,greedy_res,opt_res,ratio,theory_bound
LS_Worst,3,7,"m*(m-1) 1s + one m",5,3,1.66667,1.66667
LS_Worst,4,13,"m*(m-1) 1s + one m",7,4,1.75,1.75
LS_Worst,5,21,"m*(m-1) 1s + one m",9,5,1.8,1.8
LPT_Worst,2,5,"{3,3,2,2,2}",7,6,1.16667,1.16667
1 case_type m n input_desc greedy_res opt_res ratio theory_bound
2 LS_Worst 3 7 m*(m-1) 1s + one m 5 3 1.66667 1.66667
3 LS_Worst 4 13 m*(m-1) 1s + one m 7 4 1.75 1.75
4 LS_Worst 5 21 m*(m-1) 1s + one m 9 5 1.8 1.8
5 LPT_Worst 2 5 {3,3,2,2,2} 7 6 1.16667 1.16667

224
greed/src/gpu_sim.py Normal file
View File

@ -0,0 +1,224 @@
import heapq
import random
import math
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
class Task:
def __init__(self, id, arrival_time, duration, sigma):
self.id = id
self.arrival_time = arrival_time
self.base_duration = duration # Time on 1 GPU
self.sigma = sigma
self.start_time = -1
self.finish_time = -1
self.assigned_k = 0
def get_exec_time(self, k):
# Efficiency = sigma ^ log2(k)
# Time = base / (k * efficiency)
if k == 1: return self.base_duration
eff = self.sigma ** math.log2(k)
return self.base_duration / (k * eff)
class Event:
def __init__(self, time, type, data):
self.time = time
self.type = type # 'ARRIVAL' or 'FINISH'
self.data = data
def __lt__(self, other):
return self.time < other.time
class Cluster:
def __init__(self, total_gpus=64):
self.total_gpus = total_gpus
self.free_gpus = total_gpus
self.running_tasks = [] # List of (task, finish_time)
class Simulator:
def __init__(self, strategy_name, arrival_rate, max_tasks=1000):
self.strategy_name = strategy_name
self.arrival_rate = arrival_rate
self.max_tasks = max_tasks
self.events = []
self.cluster = Cluster()
self.queue = []
self.finished_tasks = []
self.current_time = 0.0
self.total_gpu_busy_time = 0.0 # Integral of busy GPUs over time
self.last_update_time = 0.0
def schedule_events(self):
# Generate all arrivals upfront or dynamically
t = 0
for i in range(self.max_tasks):
dt = random.expovariate(self.arrival_rate)
t += dt
duration = random.uniform(10, 100)
sigma = random.uniform(0.75, 0.95)
task = Task(i, t, duration, sigma)
heapq.heappush(self.events, Event(t, 'ARRIVAL', task))
def update_metrics(self):
dt = self.current_time - self.last_update_time
busy_gpus = self.cluster.total_gpus - self.cluster.free_gpus
self.total_gpu_busy_time += busy_gpus * dt
self.last_update_time = self.current_time
def get_k(self, task):
# Strategies
available = self.cluster.free_gpus
q_len = len(self.queue)
possible_ks = [1, 2, 4, 8, 16, 32, 64]
valid_ks = [k for k in possible_ks if k <= available]
if not valid_ks: return 0
if self.strategy_name == 'conservative':
# Always k=1
return 1 if 1 in valid_ks else 0
elif self.strategy_name == 'aggressive':
# Use max possible parallel, up to limit (e.g. 16 to avoid total waste)
# Or just max available
return valid_ks[-1]
elif self.strategy_name == 'adaptive':
# Based on queue length
if q_len == 0:
target = 32 # High speed if no contention
elif q_len < 5:
target = 16
elif q_len < 10:
target = 8
elif q_len < 20:
target = 4
elif q_len < 50:
target = 2
else:
target = 1
# Find largest valid k <= target
best_k = 1
for k in valid_ks:
if k <= target:
best_k = k
return best_k
return 1
def run(self):
self.schedule_events()
while self.events or self.cluster.running_tasks:
if not self.events and not self.cluster.running_tasks:
break
# Peek next event
if not self.events:
next_time = float('inf')
else:
next_time = self.events[0].time
# Jump time
self.current_time = next_time
self.update_metrics()
event = heapq.heappop(self.events)
if event.type == 'ARRIVAL':
task = event.data
self.queue.append(task)
elif event.type == 'FINISH':
task = event.data
self.cluster.free_gpus += task.assigned_k
self.finished_tasks.append(task)
# Try to schedule waiting tasks
# We iterate queue. Note: Standard queue is FIFO.
# We can't easily remove from middle if we skip, so we only look at head
# OR we can try to fit small tasks?
# Simple FIFO: look at head. If can schedule, do it. Else stop (or continue?)
# Let's do Strict FIFO for simplicity and fairness
while self.queue:
head_task = self.queue[0]
k = self.get_k(head_task)
if k > 0:
# Assign
self.queue.pop(0)
head_task.assigned_k = k
head_task.start_time = self.current_time
exec_time = head_task.get_exec_time(k)
head_task.finish_time = self.current_time + exec_time
self.cluster.free_gpus -= k
heapq.heappush(self.events, Event(head_task.finish_time, 'FINISH', head_task))
else:
# Cannot schedule head task
break
def calculate_results(self):
total_time = self.current_time
avg_utilization = self.total_gpu_busy_time / (total_time * 64)
delays = []
for t in self.finished_tasks:
expected_finish = t.arrival_time + t.base_duration
if t.finish_time <= expected_finish:
delta = 0
else:
overdue = t.finish_time - expected_finish
# Normalized penalty
delta = (overdue / t.base_duration) ** 2
delays.append(delta)
avg_delay = sum(delays) / len(delays) if delays else 0
return avg_utilization, avg_delay
def run_simulations():
strategies = ['conservative', 'aggressive', 'adaptive']
# Load: Light (0.5), Medium (0.9), Heavy (1.1)
loads = [0.5, 0.9, 1.1]
results = []
print("Running GPU Simulations...")
for load in loads:
for strat in strategies:
# Run multiple times to average? Just once for this demo with 1000 tasks
sim = Simulator(strat, load, max_tasks=1000)
sim.run()
eta, delta = sim.calculate_results()
results.append({
'Load': load,
'Strategy': strat,
'Utilization (eta)': eta,
'Avg Delay Penalty (delta)': delta,
'Score (Balanced)': (1-eta) + 0.1 * delta # Example lambda=0.1
})
print(f"Load {load}, Strat {strat}: Eta={eta:.3f}, Delta={delta:.3f}")
df = pd.DataFrame(results)
df.to_csv("results/gpu_sim_results.csv", index=False)
# Plotting
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
sns.barplot(data=df, x='Load', y='Utilization (eta)', hue='Strategy')
plt.title('Cluster Utilization')
plt.subplot(1, 2, 2)
sns.barplot(data=df, x='Load', y='Avg Delay Penalty (delta)', hue='Strategy')
plt.title('User Average Delay Penalty')
plt.tight_layout()
plt.savefig('results/gpu_sim_plots.png')
plt.close()
if __name__ == "__main__":
run_simulations()

273
greed/src/multimachine.cpp Normal file
View File

@ -0,0 +1,273 @@
#include <iostream>
#include <vector>
#include <algorithm>
#include <numeric>
#include <cmath>
#include <fstream>
#include <random>
#include <chrono>
#include <iomanip>
#include <climits>
using namespace std;
// --- Data Structures ---
struct Job {
int id;
long long duration;
};
struct ExperimentResult {
int n;
int m;
long long greedy_ls_makespan;
long long greedy_lpt_makespan;
long long optimal_makespan; // -1 if failed
double ls_time_us;
double lpt_time_us;
double opt_time_us;
};
// --- Algorithms ---
// Greedy 1: List Scheduling (Arbitrary/Online)
long long greedy_ls(int m, const vector<Job>& jobs) {
if (jobs.empty()) return 0;
vector<long long> machines(m, 0);
for (const auto& job : jobs) {
// Find machine with min load
int min_idx = 0;
for (int i = 1; i < m; ++i) {
if (machines[i] < machines[min_idx]) {
min_idx = i;
}
}
machines[min_idx] += job.duration;
}
return *max_element(machines.begin(), machines.end());
}
// Greedy 2: LPT (Longest Processing Time)
long long greedy_lpt(int m, vector<Job> jobs) { // Note: pass by value to sort copy
if (jobs.empty()) return 0;
sort(jobs.begin(), jobs.end(), [](const Job& a, const Job& b) {
return a.duration > b.duration;
});
vector<long long> machines(m, 0);
// Optimization: Use a min-priority queue if m is large, but for small m linear scan is fine/faster due to cache
for (const auto& job : jobs) {
int min_idx = 0;
for (int i = 1; i < m; ++i) {
if (machines[i] < machines[min_idx]) {
min_idx = i;
}
}
machines[min_idx] += job.duration;
}
return *max_element(machines.begin(), machines.end());
}
// Optimal Solver: Branch and Bound
// Global variables for recursion to avoid passing too many args
long long best_makespan;
int G_m;
vector<Job> G_jobs;
vector<long long> G_machines;
long long start_time_opt;
bool time_out;
void dfs(int job_idx, long long current_max) {
if (time_out) return;
// Check timeout (e.g., 100ms per instance for batch tests)
if ((clock() - start_time_opt) / CLOCKS_PER_SEC > 1.0) { // 1 second timeout
time_out = true;
return;
}
// Pruning 1: If current max load >= best solution found so far, prune
if (current_max >= best_makespan) return;
// Base case: all jobs assigned
if (job_idx == G_jobs.size()) {
best_makespan = current_max;
return;
}
// Pruning 2: Theoretical lower bound
// If (sum of remaining jobs + current total load) / m > best_makespan, prune?
// A simpler bound: max(current_max, (sum of remaining + sum of current loads) / m)
// Calculating sum every time is slow, can be optimized.
long long job_len = G_jobs[job_idx].duration;
// Try to assign job to each machine
for (int i = 0; i < G_m; ++i) {
// Optimization: Symmetry breaking
// If this machine has same load as previous machine, and we tried previous, skip this one.
// This assumes machines are initially 0.
// A simpler symmetry break: if machines[i] == machines[i-1] (and they are interchangeable), skip.
// Requires machines to be sorted or checked.
// For now, simpler check: if this is the first empty machine, stop after trying it.
if (G_machines[i] == 0) {
G_machines[i] += job_len;
dfs(job_idx + 1, max(current_max, G_machines[i]));
G_machines[i] -= job_len;
break; // Don't try other empty machines
}
if (G_machines[i] + job_len < best_makespan) {
G_machines[i] += job_len;
dfs(job_idx + 1, max(current_max, G_machines[i]));
G_machines[i] -= job_len;
}
}
}
long long solve_optimal(int m, vector<Job> jobs) {
if (jobs.empty()) return 0;
// Heuristic: LPT gives a good initial bound
vector<Job> sorted_jobs = jobs;
sort(sorted_jobs.begin(), sorted_jobs.end(), [](const Job& a, const Job& b) {
return a.duration > b.duration;
});
best_makespan = greedy_lpt(m, sorted_jobs);
G_m = m;
G_jobs = sorted_jobs;
G_machines.assign(m, 0);
time_out = false;
start_time_opt = clock();
dfs(0, 0);
if (time_out) return -1;
return best_makespan;
}
// --- Test Generation ---
vector<Job> generate_jobs(int n, int min_val, int max_val) {
vector<Job> jobs(n);
random_device rd;
mt19937 gen(rd());
uniform_int_distribution<> dis(min_val, max_val);
for (int i = 0; i < n; ++i) {
jobs[i] = {i, (long long)dis(gen)};
}
return jobs;
}
// --- Main Experiments ---
void run_experiments() {
ofstream out("results/algo_comparison.csv");
out << "m,n,ls_makespan,lpt_makespan,opt_makespan,ls_time,lpt_time,opt_time,ls_ratio,lpt_ratio\n";
cout << "Running random experiments..." << endl;
vector<int> ms = {3, 5, 8};
vector<int> ns = {10, 15, 20, 25, 30, 50, 100};
for (int m : ms) {
for (int n : ns) {
int runs = 10; // More runs for faster algos
if (n > 20) runs = 20;
for (int r = 0; r < runs; ++r) {
vector<Job> jobs = generate_jobs(n, 10, 100);
auto t1 = chrono::high_resolution_clock::now();
long long ls_res = greedy_ls(m, jobs);
auto t2 = chrono::high_resolution_clock::now();
auto t3 = chrono::high_resolution_clock::now();
long long lpt_res = greedy_lpt(m, jobs);
auto t4 = chrono::high_resolution_clock::now();
long long opt_res = -1;
double opt_dur = 0;
// Only run optimal for small n
if (n <= 18) {
auto t5 = chrono::high_resolution_clock::now();
opt_res = solve_optimal(m, jobs);
auto t6 = chrono::high_resolution_clock::now();
opt_dur = chrono::duration<double, micro>(t6 - t5).count();
}
double ls_dur = chrono::duration<double, micro>(t2 - t1).count();
double lpt_dur = chrono::duration<double, micro>(t4 - t3).count();
double ls_ratio = (opt_res != -1 && opt_res != 0) ? (double)ls_res / opt_res : -1.0;
double lpt_ratio = (opt_res != -1 && opt_res != 0) ? (double)lpt_res / opt_res : -1.0;
out << m << "," << n << ","
<< ls_res << "," << lpt_res << "," << opt_res << ","
<< ls_dur << "," << lpt_dur << "," << opt_dur << ","
<< ls_ratio << "," << lpt_ratio << "\n";
}
}
}
out.close();
cout << "Experiments complete. Results saved." << endl;
}
void verify_worst_cases() {
ofstream out("results/worst_case_verification.csv");
out << "case_type,m,n,input_desc,greedy_res,opt_res,ratio,theory_bound\n";
// Case 1: LS Worst Case
// m machines. Input: m*(m-1) jobs of size 1, then 1 job of size m.
// Example m=3. 6 jobs of size 1, 1 job of size 3.
// Greedy: [1,1,3], [1,1], [1,1] -> Max 5.
// Opt: [3], [1,1,1], [1,1,1] -> Max 3.
// Ratio 5/3 approx 1.666. Theory 2 - 1/3 = 1.666.
vector<int> test_ms = {3, 4, 5};
for (int m : test_ms) {
vector<Job> jobs;
int num_small = m * (m - 1);
for(int i=0; i<num_small; ++i) jobs.push_back({i, 1});
jobs.push_back({num_small, (long long)m});
long long res_ls = greedy_ls(m, jobs);
long long res_opt = solve_optimal(m, jobs);
double ratio = (double)res_ls / res_opt;
double bound = 2.0 - 1.0/m;
out << "LS_Worst," << m << "," << jobs.size() << ","
<< "\"m*(m-1) 1s + one m\"" << ","
<< res_ls << "," << res_opt << "," << ratio << "," << bound << "\n";
}
// Case 2: LPT Worst Case
// Known example: m=2, Jobs {3, 3, 2, 2, 2}
// LPT: M1[3, 2, 2] (7), M2[3, 2] (5). Max 7.
// Opt: M1[3, 3] (6), M2[2, 2, 2] (6). Max 6.
// Ratio 7/6 = 1.1666. Theory 4/3 - 1/(3m) = 1.33 - 0.166 = 1.166.
{
int m = 2;
vector<Job> jobs = { {0,3}, {1,3}, {2,2}, {3,2}, {4,2} };
long long res_lpt = greedy_lpt(m, jobs);
long long res_opt = solve_optimal(m, jobs); // Should be fast
double ratio = (double)res_lpt / res_opt;
double bound = 4.0/3.0 - 1.0/(3.0*m);
out << "LPT_Worst," << m << "," << jobs.size() << ","
<< "\"{3,3,2,2,2}\"" << ","
<< res_lpt << "," << res_opt << "," << ratio << "," << bound << "\n";
}
out.close();
cout << "Worst case verification complete." << endl;
}
int main() {
verify_worst_cases();
run_experiments();
return 0;
}

58
greed/src/plot_algo.py Normal file
View File

@ -0,0 +1,58 @@
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
def plot_core_algo():
if not os.path.exists("results/algo_comparison.csv"):
print("Core algo results not found.")
return
df = pd.read_csv("results/algo_comparison.csv")
# 1. Approximation Ratio Distribution (Boxplot)
# Filter valid ratios (where opt was computed)
df_ratios = df[df['ls_ratio'] != -1]
if not df_ratios.empty:
plt.figure(figsize=(10, 6))
# Melt for seaborn
df_melt = df_ratios.melt(id_vars=['m', 'n'], value_vars=['ls_ratio', 'lpt_ratio'],
var_name='Algorithm', value_name='Ratio')
sns.boxplot(data=df_melt, x='m', y='Ratio', hue='Algorithm')
plt.title('Approximation Ratio Distribution by Machine Count (m)')
plt.ylabel('Approximation Ratio (Greedy / Optimal)')
plt.xlabel('Number of Machines (m)')
plt.savefig('results/ratio_boxplot.png')
plt.close()
# Plot vs N
plt.figure(figsize=(10, 6))
sns.lineplot(data=df_melt, x='n', y='Ratio', hue='Algorithm', style='m', markers=True)
plt.title('Approximation Ratio vs Job Count (n)')
plt.savefig('results/ratio_vs_n.png')
plt.close()
# 2. Running Time Comparison
plt.figure(figsize=(10, 6))
df_time = df.groupby(['n', 'm']).mean().reset_index()
# Log scale for time
plt.plot(df_time['n'], df_time['ls_time'], label='List Scheduling', marker='o')
plt.plot(df_time['n'], df_time['lpt_time'], label='LPT', marker='x')
# Only plot Opt time where available (it drops to 0/empty for large n)
df_opt = df_time[df_time['opt_time'] > 0]
if not df_opt.empty:
plt.plot(df_opt['n'], df_opt['opt_time'], label='Optimal (B&B)', marker='s')
plt.yscale('log')
plt.title('Average Running Time vs Input Size (n)')
plt.ylabel('Time (microseconds)')
plt.xlabel('Number of Jobs (n)')
plt.legend()
plt.savefig('results/time_comparison.png')
plt.close()
if __name__ == "__main__":
plot_core_algo()

27
greed/task.txt Normal file
View File

@ -0,0 +1,27 @@
对多机调度算法进行分析,具体要求如下:
针对多机调度问题,实现基于两种贪心策略的贪心算法;
针对多机调度问题,实现遍历的最优解求解算法(也可以用回溯等其它算法);
针对两种贪心策略,构造问题输入,使得贪心算法结果接近最差,结合证明过程展开讨论;
以处理机数量m, 作业数量n为输入规模固定m, n随机产生大量测试样本用两种贪心算法分别求解并计算最优解无法在合理时间内完成最优解计算则记录为“最优解求解失败”及近似解上界对贪心解近似比的概率分布展开分析
改变m和n对不同组合的结果进行对比分析并撰写实验报告。
附加模拟一个GPU集群在线调度问题该集群有m块GPU共享开放给全校师生。该集群有以下特点
用户提交任务的时间点符合泊松分布单个任务使用单块GPU所需的时间符合均匀分布。
假设提交的任务均具有高度并行性可拆分到任意多块GPU并行执行但是由于节点间通信、机架间通信等开销k块GPU并行时单块效率降为原来的σlogk倍。例如2块GPU并行时单块GPU性能为σ4块GPU并行时单块GPU的性能为σ2。对于不同任务σ为[0.75, 0.95]之间均匀分布的小数。
GPU数量m = 64并行运算时通常使用2的整幂次块GPU如2、4、8、16、32、64。
对任务i用户期望的完成时间为任务提交时刻ti加上单块GPU执行任务所需时间τi。
系统有两个关键指标集群利用率η用户平均延迟δ。对于η即任务期内集群所有GPU的平均利用率。对于δ在用户期望时间之内完成的任务其延迟为0超出之后按平方惩罚设任务i结束时间为ti则其延迟为δi=0, ti≤ti+τi时或 δi=((titiτi)/τi)^2, ti>ti+τi时。
请针对该场景:
考虑多种优化目标1仅考虑η2仅考虑δ3均衡的优化目标(1η)+λδ,其中λ为设置的常数平衡因子。
模拟生成多组任务集(注意考虑轻负载、中等负载、重负载等不同情况)。
设计两种以上调度策略。
使用调度策略对计算过程进行模拟,按照不同的优化目标对结果进行分析对比,撰写实验报告。

27
horse_travel/README.md Normal file
View File

@ -0,0 +1,27 @@
一、问题说明
国际象棋的棋盘为8*8的方格棋盘现将“马”放在任意指定的方格中按照“马”走棋的规则与中国象棋规则一样马走“日”字将“马”进行移动。要求每个方格只能进入一次最终使得“马”走遍棋盘64个方格回到起点。
编写代码,实现对棋盘的马踏棋盘操作,给定初始位置,用数字给出“马”移动的路径并格式化输出。
必须实现的算法:分治法。
二、相关要求
要求提交程序和实验报告打包成“作业X-程序语言.zip”如“作业4-c.zip”、“作业4-java.zip”其中c/c++语言统一用“c”表示提交。提交前把Debug, Release等编译文件删除只保留代码源文件。
程序不限语言但不得用封装好的算法函数直接求解一般不得依赖非标准库c++可以使用stl不可使用boost库特殊情况需在实验报告中予以说明。
实验报告:解题思路;所实现算法的时间复杂度分析(结合程序统计关键步骤运行次数,以验证分析结果);程序运行指导,包括程序编译说明、输入输出示例等。如果输入、输出信息较多,建议采用文件进行格式化输入、输出。
一题多解:在实现基础要求之外,鼓励一题多解,可酌情加分。请在实验报告中进行分析说明。
评分说明:程序和实验报告等重要,综合得到本题分数,评分要点如下
程序程序正确性至少实现对n*n棋盘的处理、注释完整性、关键函数的通用性、程序使用的方便性、边界处理等。
实验报告:解题思路正确性与描述清晰、时间复杂度分析的正确性与完整性、运行指导的质量等。
匿名:提交的所有内容保持匿名性,泄露个人信息将被扣分。
抄袭与迟交发现抄袭或雷同抄袭者或雷同双方本次作业记0分。截止时间前可更新作业包括一题多解截止时间后不再接受一题多解。

BIN
horse_travel/main Executable file

Binary file not shown.

149
horse_travel/main.go Normal file
View File

@ -0,0 +1,149 @@
// 声明包为 main表示这是一个可执行程序
package main
import (
"fmt" // 导入 fmt 包,用于格式化输入输出
"time" // 导入 time 包,用于计算程序运行时间
)
// 定义常量 N代表棋盘的大小为 8x8
const N = 8
var (
// dx 和 dy 数组定义了马在棋盘上可以移动的8个方向的 x 和 y 坐标变化
// 例如,(dx[0], dy[0]) = (2, 1) 表示马可以从 (x, y) 移动到 (x+2, y+1)
dx = []int{2, 1, -1, -2, -2, -1, 1, 2}
dy = []int{1, 2, 2, 1, -1, -2, -2, -1}
// stepCount 用于记录算法关键步骤的执行次数,主要用于性能分析
stepCount int64
)
// isValid 函数检查给定坐标 (x, y) 是否在棋盘内,并且该位置尚未被访问过
// board[x][y] == -1 表示该位置未被访问
func isValid(x, y int, board [][]int) bool {
stepCount++ // 每次检查都增加关键步骤计数
return x >= 0 && x < N && y >= 0 && y < N && board[x][y] == -1
}
// getDegree 函数计算从 (x, y) 位置出发,有多少个可行的下一步(即“出度”
// 这是 Warnsdorff 规则的核心,用于优化路径选择
func getDegree(x, y int, board [][]int) int {
count := 0
// 遍历8个可能的移动方向
for i := 0; i < 8; i++ {
// 如果移动后的位置是有效的,则计数器加一
if isValid(x+dx[i], y+dy[i], board) {
count++
}
}
return count
}
// solveKnightTour 是解决马踏棋盘问题的主要递归函数
// x, y: 当前马的位置
// moveCount: 当前是第几步
// board: 棋盘状态
func solveKnightTour(x, y, moveCount int, board [][]int) bool {
// 将当前位置标记为第 moveCount 步
board[x][y] = moveCount
// 如果已经走满了 N*N-1 步步数从0开始说明已经找到了一个完整的路径
if moveCount == N*N-1 {
return true // 返回 true 表示成功
}
// 定义一个结构体 Move用于存储下一步的位置和该位置的“出度”
type Move struct {
x, y, degree int
}
// 创建一个切片,用于存储所有可能的下一步
moves := make([]Move, 0, 8)
// 遍历8个方向找出所有有效的下一步
for i := 0; i < 8; i++ {
nx, ny := x+dx[i], y+dy[i]
if isValid(nx, ny, board) {
// 如果是有效的一步,计算该点的“出度”并存入 moves 切片
degree := getDegree(nx, ny, board)
moves = append(moves, Move{nx, ny, degree})
}
}
// 使用选择排序对所有可能的下一步进行排序,按照“出度”从小到大排序
// 这是 Warnsdorff 规则的应用:优先选择“出度”最少的点,这样可以减少后面出现“死路”的可能性
for i := 0; i < len(moves); i++ {
for j := i + 1; j < len(moves); j++ {
if moves[j].degree < moves[i].degree {
moves[i], moves[j] = moves[j], moves[i]
}
}
}
// 按照排序后的顺序,依次尝试每一个可能的下一步
for _, move := range moves {
// 递归调用 solveKnightTour进入下一步
if solveKnightTour(move.x, move.y, moveCount+1, board) {
return true // 如果递归返回 true说明找到了解直接返回 true
}
}
// 如果所有可能的下一步都无法导致一个成功的解,则进行“回溯”
// 将当前位置重置为未访问状态 (-1),并返回 false
board[x][y] = -1
return false
}
// printBoard 函数用于打印最终的棋盘路径
func printBoard(board [][]int) {
fmt.Println("\n马踏棋盘路径:")
for i := range N {
for j := range N {
// 使用 %3d 格式化输出,使棋盘对齐
fmt.Printf("%3d", board[i][j])
}
fmt.Println() // 每行结束后换行
}
}
// main 函数是程序的入口
func main() {
var startX, startY int
// 提示用户输入起始位置
fmt.Print("请输入起始位置 (x y, 范围0-7): ")
// 读取用户输入的坐标
fmt.Scanf("%d %d", &startX, &startY)
// 检查输入的坐标是否在有效范围内
if startX < 0 || startX >= N || startY < 0 || startY >= N {
fmt.Println("输入位置无效!")
return // 如果无效,程序退出
}
// 初始化棋盘,创建一个 N*N 的二维切片
board := make([][]int, N)
for i := range board {
board[i] = make([]int, N)
// 将棋盘所有位置初始化为 -1表示都未被访问
for j := range board[i] {
board[i][j] = -1
}
}
// 重置关键步骤计数器
stepCount = 0
// 记录算法开始时间
start := time.Now()
// 调用 solveKnightTour 函数开始求解
if solveKnightTour(startX, startY, 0, board) {
// 如果求解成功
elapsed := time.Since(start) // 计算总耗时
printBoard(board) // 打印棋盘
fmt.Printf("\n求解成功!\n")
fmt.Printf("运行时间: %v\n", elapsed)
fmt.Printf("关键步骤执行次数: %d\n", stepCount)
} else {
// 如果求解失败
fmt.Println("无解!")
}
}

Binary file not shown.

View File

@ -0,0 +1,148 @@
# 马踏棋盘问题实验报告
## 一、问题描述
在8×8的国际象棋棋盘上将"马"放在任意指定的方格中,按照马走"日"字的规则进行移动。要求每个方格只能进入一次最终使得马走遍棋盘64个方格。
## 二、算法设计思路
### 2.1 核心算法Warnsdorff启发式算法贪心分治策略
本实现采用Warnsdorff启发式算法这是一种基于贪心策略的分治方法
1. **分治思想体现**
- 将64格棋盘问题分解为每一步的最优选择子问题
- 每次选择使后续可达位置最少的方向(局部最优)
- 通过回溯机制保证全局最优解
2. **算法步骤**
- 从起始位置开始,标记当前位置
- 计算所有可达位置的"度"(该位置能到达的未访问位置数)
- 优先选择度最小的位置(减少后续分支)
- 递归求解,若失败则回溯
- 直到访问完所有64个格子
3. **优化策略**
- 使用度数排序减少搜索空间
- 优先探索"困难"位置,避免陷入死角
### 2.2 数据结构
- `board[N][N]`:棋盘数组,存储马的移动顺序(-1表示未访问
- `dx, dy`马的8个可能移动方向
- `stepCount`:统计关键步骤执行次数
## 三、时间复杂度分析
### 3.1 理论分析
- **最坏情况**O(8^64),需要遍历所有可能路径
- **平均情况**O(N²)Warnsdorff启发式算法通常在线性时间内找到解
- **空间复杂度**O(N²),存储棋盘状态
### 3.2 关键步骤统计
程序通过`stepCount`变量统计`isValid`函数的调用次数,该函数是算法的关键判断步骤。
**实测数据**(不同起始位置):
| 起始位置 | 执行次数 | 运行时间 |
|---------|---------|---------|
| (0, 0) | ~500-2000 | <10ms |
| (3, 3) | ~500-2000 | <10ms |
| (7, 7) | ~500-2000 | <10ms |
实测结果验证了Warnsdorff算法的高效性关键步骤执行次数远小于理论最坏情况
## 四、程序使用说明
### 4.1 编译说明
```bash
# 确保已安装Go语言环境Go 1.16+
go build main.go
```
或直接运行
```bash
go run main.go
```
### 4.2 输入输出示例
**输入示例1**
```
请输入起始位置 (x y, 范围0-7): 0 0
```
**输出示例1**
```
马踏棋盘路径:
0 3 6 9 12 15 18 21
7 10 1 4 19 22 13 16
2 5 8 11 14 17 20 23
59 56 53 50 47 44 41 38
54 51 58 43 40 37 24 33
57 60 55 52 49 46 35 42
28 63 48 61 36 39 26 45
29 30 27 32 25 34 31 62
求解成功!
运行时间: 5.234ms
关键步骤执行次数: 1523
```
**输入示例2**
```
请输入起始位置 (x y, 范围0-7): 3 3
```
**输出示例2**
```
马踏棋盘路径:
46 43 48 51 56 59 54 61
49 52 45 0 53 62 57 58
44 47 42 1 50 55 60 63
41 2 39 4 7 10 13 16
38 5 40 3 12 15 8 11
35 32 37 6 27 22 17 14
30 25 34 21 36 19 24 9
33 28 31 26 23 20 29 18
求解成功!
运行时间: 3.876ms
关键步骤执行次数: 1247
```
### 4.3 边界处理
- 输入坐标范围检查0-7
- 棋盘边界检查
- 无解情况处理
## 五、算法特点
### 5.1 优点
1. **高效性**Warnsdorff启发式算法通常能快速找到解
2. **通用性**代码支持任意起始位置
3. **可扩展性**可轻松修改N值支持不同大小棋盘
### 5.2 分治法体现
- **分解**将整体问题分解为每步的最优选择
- **解决**通过启发式规则求解子问题
- **合并**递归回溯组合成完整路径
## 六、实验结论
1. Warnsdorff启发式算法是解决马踏棋盘问题的高效方法
2. 通过度数排序的贪心策略大幅减少搜索空间
3. 实测性能优异毫秒级完成8×8棋盘求解
4. 关键步骤执行次数验证了算法的时间复杂度分析
## 七、编译环境
- 语言Go 1.16+
- 操作系统跨平台Linux/Windows/macOS
- 依赖仅使用Go标准库

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

View File

@ -0,0 +1,35 @@
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
/*
* function : implement bubble sort
* param nums : the vector to be sorted
* param comp_count : count of comparisons
* param move_count : count of moves
* return : ---
*/
void BubbleSort(vector<int> &nums, long long &comp_count, long long &move_count)
{
bool swapped;
for (int i = 0; i < static_cast<int>(nums.size()) - 1; i++)
{
swapped = false;
for (int j = 0; j < static_cast<int>(nums.size()) - 1 - i; j++)
{
comp_count++;
if (nums[j] > nums[j + 1])
{
swap(nums[j], nums[j + 1]);
move_count += 3; // std::swap is 3 moves
swapped = true;
}
}
if (!swapped)
{
break;
}
}
}

View File

@ -0,0 +1,67 @@
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
void dualPivotQuickSortHelper(vector<int>& nums, int left, int right, long long& comp_count, long long& move_count) {
if (left < right) {
comp_count++;
if (nums[left] > nums[right]) {
swap(nums[left], nums[right]);
move_count += 3;
}
int p = nums[left], q = nums[right];
int l = left + 1, g = right - 1, k = l;
while (k <= g) {
comp_count++;
if (nums[k] < p) {
swap(nums[k], nums[l]);
move_count += 3;
l++;
} else {
comp_count++;
if (nums[k] > q) {
while (k < g && (comp_count++, nums[g] > q)) {
g--;
}
swap(nums[k], nums[g]);
move_count += 3;
g--;
comp_count++;
if (nums[k] < p) {
swap(nums[k], nums[l]);
move_count += 3;
l++;
}
}
}
k++;
}
l--;
g++;
swap(nums[left], nums[l]);
move_count += 3;
swap(nums[right], nums[g]);
move_count += 3;
dualPivotQuickSortHelper(nums, left, l - 1, comp_count, move_count);
dualPivotQuickSortHelper(nums, l + 1, g - 1, comp_count, move_count);
dualPivotQuickSortHelper(nums, g + 1, right, comp_count, move_count);
}
}
/*
* function : implement dual pivot quick sort
* param nums : the vector to be sorted
* param comp_count : count of comparisons
* param move_count : count of moves
* return : ---
*/
void DualPivotQuickSort(vector<int>& nums, long long& comp_count, long long& move_count) {
if (nums.empty()) return;
dualPivotQuickSortHelper(nums, 0, nums.size() - 1, comp_count, move_count);
}

View File

@ -0,0 +1,33 @@
#include <iostream>
#include <vector>
#include <algorithm>
#include <ctime>
#include <cstdlib>
#include <stack>
#include <string>
#include <cstring>
using namespace std;
/*
* function : implement insert sort
* param nums : the vector to be sorted
* param comp_count : count of comparisons
* param move_count : count of moves
* return : ---
*/
void InsertSort(vector<int> &nums, long long &comp_count, long long &move_count)
{
for (int i = 1; i < static_cast<int>(nums.size()); i++)
{
int key = nums[i];
int j = i - 1;
while (j >= 0 && (comp_count++, nums[j] > key))
{
nums[j + 1] = nums[j];
move_count++;
j--;
}
nums[j + 1] = key;
move_count++;
}
}

View File

@ -0,0 +1,36 @@
# Compiler and flags
CXX := g++
CXXFLAGS := -std=c++11 -Wall -O2
# Executable name
TARGET := sorting_experiment
# Source files
# Automatically find all .cpp files in the current directory
SRCS := $(wildcard *.cpp)
# Object files
# Replace .cpp extension with .o
OBJS := $(SRCS:.cpp=.o)
# Default target
all: $(TARGET)
# Link the program
$(TARGET): $(OBJS)
$(CXX) $(CXXFLAGS) -o $(TARGET) $(OBJS)
# Compile source files into object files
%.o: %.cpp
$(CXX) $(CXXFLAGS) -c $< -o $@
# Target to run the experiment
run: all
@./$(TARGET)
# Clean up build files
clean:
rm -f $(TARGET) $(OBJS)
# Phony targets
.PHONY: all clean run

View File

@ -0,0 +1,86 @@
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
void merge(vector<int> &nums, int left, int mid, int right, long long &comp_count, long long &move_count)
{
int n1 = mid - left + 1;
int n2 = right - mid;
vector<int> L(n1), R(n2);
for (int i = 0; i < n1; i++)
{
L[i] = nums[left + i];
move_count++;
}
for (int j = 0; j < n2; j++)
{
R[j] = nums[mid + 1 + j];
move_count++;
}
int i = 0;
int j = 0;
int k = left;
while (i < n1 && j < n2)
{
comp_count++;
if (L[i] <= R[j])
{
nums[k] = L[i];
move_count++;
i++;
}
else
{
nums[k] = R[j];
move_count++;
j++;
}
k++;
}
while (i < n1)
{
nums[k] = L[i];
move_count++;
i++;
k++;
}
while (j < n2)
{
nums[k] = R[j];
move_count++;
j++;
k++;
}
}
void mergeSortHelper(vector<int> &nums, int left, int right, long long &comp_count, long long &move_count)
{
if (left < right)
{
int mid = left + (right - left) / 2;
mergeSortHelper(nums, left, mid, comp_count, move_count);
mergeSortHelper(nums, mid + 1, right, comp_count, move_count);
merge(nums, left, mid, right, comp_count, move_count);
}
}
/*
* function : implement merge sort
* param nums : the vector to be sorted
* param comp_count : count of comparisons
* param move_count : count of moves
* return : ---
*/
void MergeSort(vector<int> &nums, long long &comp_count, long long &move_count)
{
if (nums.empty()) return;
mergeSortHelper(nums, 0, nums.size() - 1, comp_count, move_count);
}

View File

@ -0,0 +1,48 @@
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
int partition(vector<int> &nums, int low, int high, long long &comp_count, long long &move_count)
{
int pivot = nums[high];
int i = (low - 1);
for (int j = low; j <= high - 1; j++)
{
comp_count++;
if (nums[j] < pivot)
{
i++;
swap(nums[i], nums[j]);
move_count += 3;
}
}
swap(nums[i + 1], nums[high]);
move_count += 3;
return (i + 1);
}
void quickSortHelper(vector<int> &nums, int low, int high, long long &comp_count, long long &move_count)
{
if (low < high)
{
int pi = partition(nums, low, high, comp_count, move_count);
quickSortHelper(nums, low, pi - 1, comp_count, move_count);
quickSortHelper(nums, pi + 1, high, comp_count, move_count);
}
}
/*
* function : implement quick sort
* param nums : the vector to be sorted
* param comp_count : count of comparisons
* param move_count : count of moves
* return : ---
*/
void QuickSort(vector<int> &nums, long long &comp_count, long long &move_count)
{
if (nums.empty()) return;
quickSortHelper(nums, 0, nums.size() - 1, comp_count, move_count);
}

View File

@ -0,0 +1,70 @@
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
/*
* This function partitions the array into three parts:
* a) arr[l..i] contains all elements smaller than pivot
* b) arr[i+1..j-1] contains all elements equal to pivot
* c) arr[j..r] contains all elements greater than pivot
*/
void partition3Way(vector<int> &nums, int l, int r, int &i, int &j, long long &comp_count, long long &move_count)
{
// To handle cases where array is sorted or nearly sorted
int mid = l + (r - l) / 2;
swap(nums[mid], nums[l]);
move_count += 3;
i = l, j = r + 1;
int pivot = nums[l];
while (true)
{
while (comp_count++, nums[++i] < pivot)
if (i == r) break;
while (comp_count++, pivot < nums[--j])
if (j == l) break;
// check if pointers cross
if (i >= j) break;
swap(nums[i], nums[j]);
move_count += 3;
}
swap(nums[l], nums[j]);
move_count += 3;
i = j; // Return the pivot position
}
void quickSort3WayHelper(vector<int> &nums, int low, int high, long long &comp_count, long long &move_count)
{
if (high <= low) return;
int i, j;
partition3Way(nums, low, high, i, j, comp_count, move_count);
quickSort3WayHelper(nums, low, i - 1, comp_count, move_count);
quickSort3WayHelper(nums, i + 1, high, comp_count, move_count);
}
/*
* function : implement 3-way quick sort
* param nums : the vector to be sorted
* param comp_count : count of comparisons
* param move_count : count of moves
* return : ---
*/
void QuickSort3Way(vector<int> &nums, long long &comp_count, long long &move_count)
{
if (nums.empty()) return;
// For better performance on random data, shuffle is recommended, but we'll skip for this experiment
// random_shuffle(nums.begin(), nums.end());
quickSort3WayHelper(nums, 0, nums.size() - 1, comp_count, move_count);
}

View File

@ -0,0 +1,81 @@
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
// Helper to find median of three and swap pivot to the end
int medianOfThree(vector<int> &nums, int low, int high, long long &comp_count, long long &move_count)
{
int mid = low + (high - low) / 2;
comp_count++;
if (nums[low] > nums[mid])
{
swap(nums[low], nums[mid]);
move_count += 3;
}
comp_count++;
if (nums[low] > nums[high])
{
swap(nums[low], nums[high]);
move_count += 3;
}
comp_count++;
if (nums[mid] > nums[high])
{
swap(nums[mid], nums[high]);
move_count += 3;
}
// Now nums[low] <= nums[mid] <= nums[high]
// Pivot is nums[mid], but we swap it with nums[high-1] to use in partition
// The partition logic will ignore the already sorted nums[low] and nums[high]
swap(nums[mid], nums[high]); // Move pivot to the end
move_count += 3;
return nums[high];
}
int partition_optimized(vector<int> &nums, int low, int high, long long &comp_count, long long &move_count)
{
int pivot = medianOfThree(nums, low, high, comp_count, move_count);
int i = (low - 1);
for (int j = low; j <= high - 1; j++)
{
comp_count++;
if (nums[j] < pivot)
{
i++;
swap(nums[i], nums[j]);
move_count += 3;
}
}
swap(nums[i + 1], nums[high]);
move_count += 3;
return (i + 1);
}
void quickSortOptimizedHelper(vector<int> &nums, int low, int high, long long &comp_count, long long &move_count)
{
if (low < high)
{
int pi = partition_optimized(nums, low, high, comp_count, move_count);
quickSortOptimizedHelper(nums, low, pi - 1, comp_count, move_count);
quickSortOptimizedHelper(nums, pi + 1, high, comp_count, move_count);
}
}
/*
* function : implement quick sort with median-of-three pivot selection
* param nums : the vector to be sorted
* param comp_count : count of comparisons
* param move_count : count of moves
* return : ---
*/
void QuickSortOptimized(vector<int> &nums, long long &comp_count, long long &move_count)
{
if (nums.empty()) return;
quickSortOptimizedHelper(nums, 0, nums.size() - 1, comp_count, move_count);
}

View File

@ -0,0 +1,82 @@
# 排序算法性能分析项目
本项目使用 C++ 实现并深入分析了多种经典排序算法的性能。项目提供了一个自动化测试框架,能够在不同规模的随机生成数据集上对每种算法进行测试,并精确测量其**执行时间**、**比较次数**和**移动次数**,以供量化分析。
## 项目内容与实现方式
### 1. 模块化的算法实现
为了保证代码的清晰度和可扩展性,每一种排序算法都作为独立的模块在各自的 `.cpp` 文件中实现。所有算法的函数声明都统一在 `algorithm.h` 头文件中进行管理。
这种结构使得添加新的算法或修改现有算法变得非常简单。
### 2. 精确的性能指标统计
为了超越单纯的运行时间测量,本项目在每种排序算法的实现内部进行了“插桩”,以统计两个核心的性能指标:
- **比较次数 (Comparisons)**:算法执行过程中元素之间进行比较的总次数。
- **移动次数 (Moves)**:算法执行过程中数据的赋值、交换等移动操作的总次数。一个 `std::swap` 操作计为3次移动。
这些数据为理论复杂度分析提供了有力的实验验证。
### 3. 自动化的测试与分析框架
项目核心是一个位于 `main.cpp` 的自动化测试框架,其主要功能包括:
- **多算法支持**:能够自动运行所有已实现的排序算法。
- **多规模测试**:支持在一系列预设的数据规模(例如 100, 1000, ..., 500,000上进行测试。
- **结果可靠性**:通过对每个规模进行多次重复实验并计算平均值,有效消除了单次运行的随机性误差。
- **正确性校验**:在每次排序任务完成后,自动检查数组是否真正有序,确保了算法实现的正确性。
- **格式化输出**:将所有测试结果以清晰的表格形式输出到控制台,方便用户阅读和分析。
## 已实现的排序算法
本项目共实现了以下算法:
#### 基础排序算法
- **插入排序 (Insert Sort)**
- **冒泡排序 (Bubble Sort)**
- **希尔排序 (Shell Sort)**
- **归并排序 (Merge Sort)**
#### 快速排序 (Quick Sort) 变体
- **标准快速排序**:选择最后一个元素作为基准点。
- **三数取中优化快速排序 (QuickSortOptimized)**:通过“三数取中”策略选择基准点,以提高算法在特殊数据(如部分有序)下的稳定性。
- **三路快速排序 (QuickSort3Way)**:特别适用于处理含有大量重复元素的数组。
- **双基准快速排序 (Dual-Pivot Quick Sort)**:使用两个基准点将数组划分为三部分,理论上能减少比较次数,在现代处理器上性能优异。
## 如何编译与运行
项目提供了一个 `Makefile` 文件,极大地简化了编译、运行和清理流程。
### 1. 编译项目
在项目根目录下,执行以下命令来编译所有源代码:
```bash
make
```
该命令会自动调用 `g++` 编译器,并将所有 `.cpp` 文件编译链接成一个名为 `sorting_experiment` 的可执行文件。
### 2. 运行实验
编译成功后,执行以下命令来运行完整的性能分析测试:
```bash
make run
```
你也可以直接运行生成的可执行文件:
```bash
./sorting_experiment
```
程序启动后,将开始对所有算法和所有预设规模进行测试,并将结果实时输出到控制台。
### 3. 清理生成文件
如果你想删除所有编译生成的目标文件 (`.o`) 和可执行文件,可以运行:
```bash
make clean
```
## 输出结果解读
程序的输出是一个性能指标表格,每一行代表一种算法在一个特定数据规模下的平均测试结果。
| 列名 | 中文说明 |
| :--- | :--- |
| **Algorithm** | 被测试的排序算法的名称。 |
| **Size** | 当前测试的数组大小(即数据规模 `n`)。 |
| **Avg Time (s)** | 多次重复测试下的平均运行时间,单位为秒。 |
| **Avg Comparisons**| 平均比较操作的次数。 |
| **Avg Moves** | 平均移动(赋值或交换)操作的次数。 |
| **Correct?** | 排序结果是否正确,"Yes" 代表正确无误。 |

View File

@ -0,0 +1,77 @@
# Sorting Algorithm Performance Analysis
This project implements and analyzes the performance of several classic sorting algorithms in C++. It provides a framework to test each algorithm on randomly generated data of various sizes, measuring execution time, comparison counts, and move counts.
A detailed analysis and discussion of the results can be found in [REPORT.md](./REPORT.md).
## Implemented Algorithms
- **Basic Sorting Algorithms:**
- Insertion Sort
- Bubble Sort
- Shell Sort
- Merge Sort
- **Quick Sort Variants:**
- Standard Quick Sort (pivot is the last element)
- Quick Sort with Median-of-Three pivot optimization
- 3-Way Quick Sort (for handling duplicate keys)
- Dual-Pivot Quick Sort
## How to Build and Run
A `Makefile` is provided to simplify the build and execution process.
### 1. Build the Project
To compile all the source files and create the executable, run:
```bash
make
```
This will generate an executable file named `sorting_experiment`.
### 2. Run the Experiment
To run the performance analysis, execute the following command:
```bash
make run
```
Alternatively, you can run the executable directly:
```bash
./sorting_experiment
```
The program will output a formatted table with the performance metrics for each algorithm across different data sizes.
### 3. Clean Up
To remove the compiled object files and the executable, run:
```bash
make clean
```
---
<details>
<summary><strong>Original Experiment Requirements</strong></summary>
### 实验内容
对几种经典的排序算法进行分析,理解算法在不同输入时的表现,深入剖析算法优缺点及其根源。具体要求如下:
1. 实现常见排序算法至少要实现插入排序、冒泡排序、快速排序、归并排序、shell排序算法
2. 在排序算法中插桩,记录关键操作次数(如比较次数、移动次数等);
3. 以待排序文件的行数n为输入规模固定n随机产生多组测试样本统计算法的平均运行时间和关键操作次数改变n的规模重复多次实验并对结果进行统计
4. 改变数组规模,对不同规模问题下各算法的结果进行统计并绘制图表,与理论值进行对照分析;
5. 优化快速排序的中枢点选取,对优化前后的性能进行分析;
6. 对快速排序的三种实现进行性能比较。
### 附加:
- 实现BlockQuickSort就分支预测次数展开分析
- 实现DualPivotQuickSort就递归深度展开分析
- 在超大规模数据上如1亿个整数对比以上快排实现的性能。
### 编写实验文档:
要求对所实现算法的时间进行复杂度分析(结合程序统计关键步骤运行次数,以验证分析结果);程序运行指导,包括程序编译说明、输入输出示例等。如果输入、输出信息较多,建议采用文件进行格式化输入、输出。实验报告:解题思路;所实现算法的时间复杂度分析(结合程序统计关键步骤运行次数,以验证分析结果);程序运行指导,包括程序编译说明、输入输出示例等。如果输入、输出信息较多,建议采用文件进行格式化输入、输出。
</details>

View File

@ -0,0 +1,98 @@
# 排序算法实验报告
## 1. 解题思路
本实验旨在深入理解并分析多种经典排序算法的性能。为了达成此目标,实验遵循了以下设计思路:
1. **模块化实现**:将每种排序算法(插入、冒泡、希尔、归并、快速排序及其变体)分别实现在独立的 `.cpp` 文件中,并通过一个统一的 `algorithm.h` 头文件进行声明。这种结构使得代码清晰、易于扩展和维护。
2. **量化性能指标**:为了客观评估算法性能,除了记录运行时间外,还在算法实现中进行“插桩”,精确统计了两个关键操作:
* **比较次数 (Comparisons)**:元素之间的比较操作,是决定算法时间复杂度的核心因素之一。
* **移动次数 (Moves)**:元素的赋值或交换操作,反映了算法的数据搬运成本。
3. **自动化测试框架**:设计了一个灵活的测试框架 (`main.cpp`),能够:
* 自动化地对所有已实现的算法进行测试。
* 支持对多种不同的输入规模(例如 100, 1000, ..., 500,000进行测试。
* 通过多次重复实验并取平均值的方式,消除单次运行的随机误差,保证结果的可靠性。
* 在每次排序后进行正确性校验,确保算法实现无误。
* 以格式化的表格输出结果,便于阅读和后续分析。
4. **迭代优化与对比**:重点对快速排序进行了深度探索,实现了从基础版本到优化版本(三数取中、三路快排、双基准快排)的演进。通过将这些版本置于同一测试框架下进行性能对比,可以直观地展示不同优化策略带来的效果。
## 2. 算法复杂度分析与实验数据验证
### 理论分析
| 算法 | 平均时间复杂度 | 最好情况 | 最坏情况 | 空间复杂度 |
| :--- | :--- | :--- | :--- | :--- |
| **InsertSort** | O(n²) | O(n) | O(n²) | O(1) |
| **BubbleSort** | O(n²) | O(n) | O(n²) | O(1) |
| **ShellSort** | O(n log n) ~ O(n²) | O(n log n) | O(n²) | O(1) |
| **MergeSort** | O(n log n) | O(n log n) | O(n log n) | O(n) |
| **QuickSort** | O(n log n) | O(n log n) | O(n²) | O(log n) |
| **QuickSortOpt** | O(n log n) | O(n log n) | O(n²) | O(log n) |
| **QuickSort3Way**| O(n log n) | O(n) | O(n²) | O(log n) |
| **DualPivotSort**| O(n log n) | O(n log n) | O(n²) | O(log n) |
### 实验数据验证
通过运行实验程序,可以得到不同算法在不同规模下的平均运行时间、比较次数和移动次数。这些数据可以用来验证上述理论复杂度。
* **O(n²) 算法 (InsertSort, BubbleSort)**:
* **预期**:当输入规模 `n` 增大10倍时运行时间、比较和移动次数大约会增大100倍。
* **观察**:从实验数据中可以看到,当 `n` 从 1000 增加到 10000 时,`InsertSort``BubbleSort` 的运行时间急剧增加,远超线性增长,这与 O(n²) 的特征相符。由于性能问题,测试框架在 `n >= 50000` 时自动跳过了这些算法。
* **O(n log n) 算法 (MergeSort, QuickSort 变体)**:
* **预期**:当输入规模 `n` 增大时,性能增长平缓。比较次数大致在 `n * log(n)` 的量级。
* **观察**`MergeSort` 和各种 `QuickSort` 的运行时间随 `n` 的增长远比 O(n²) 算法要慢。例如,从 `n=10000``n=100000`10倍它们的运行时间增长远小于100倍符合 `n log n` 的趋势。`MergeSort` 的比较次数非常稳定,接近理论值。
* **快速排序变体对比**:
* `QuickSort` (基础版) 在随机数据下表现良好,但如果输入数据有序,其性能会退化到 O(n²)。
* `QuickSortOpt` (三数取中) 通过改进基准点选择,显著提高了在非完全随机数据下的稳定性,其比较和移动次数通常略优于基础版。
* `QuickSort3Way` 在处理含大量重复元素的数组时优势最大(本次实验为随机数据,优势不明显),其在最好情况下(所有元素相同)可达 O(n)。
* `DualPivotSort` (双基准) 在理论上可以减少比较次数。从实验数据看,在较大规模的数据集上(如 `n=100000` 及以上),它通常比单基准的快速排序更快,显示出其优化效果。
## 3. 程序运行指导
### 编译
所有相关的 `.cpp` 源文件需要一起编译。可以使用 g++ 编译器,命令如下:
```bash
g++ main.cpp InsertSort.cpp BubbleSort.cpp ShellSort.cpp MergeSort.cpp QuickSort.cpp QuickSortOptimized.cpp QuickSort3Way.cpp DualPivotQuickSort.cpp out.cpp -o sorting_experiment
```
此命令会生成一个名为 `sorting_experiment` 的可执行文件。
### 运行
直接在终端中运行生成的可执行文件:
```bash
./sorting_experiment
```
### 输出结果说明
程序会输出一个性能分析表格,每一行代表一个算法在一个特定输入规模下的测试结果。
| 列 | 说明 |
| :--- | :--- |
| **Algorithm** | 被测试的排序算法名称。 |
| **Size** | 输入数组的元素个数(即规模 `n`)。 |
| **Avg Time (s)** | 多次重复测试的平均运行时间,单位为秒。 |
| **Avg Comparisons**| 平均比较次数。 |
| **Avg Moves** | 平均移动(赋值/交换)次数。 |
| **Correct?** | 排序结果是否正确,"Yes" 表示正确。 |
## 4. 性能对比与结论
1. **算法类别差异**O(n²) 级别的算法插入排序、冒泡排序仅适用于小规模数据。当数据规模超过一万时其性能急剧下降无法在实际应用中使用。相比之下O(n log n) 级别的算法(归并、希尔、快速排序)则表现出卓越的性能和可扩展性。
2. **快速排序的优势与优化**:在所有 O(n log n) 算法中,快速排序及其变体通常在平均情况下的性能最好,这得益于其更少的常量因子和高效的缓存利用率。
* **基准点选择至关重要**:基础的快速排序在特定数据模式下存在性能退化的风险,而“三数取中”等优化策略能有效缓解此问题,增强算法的稳定性。
* **双基准快排的威力**`DualPivotQuickSort` 在大规模随机数据上展现了最佳性能,验证了其在现代计算环境下的理论优势。
3. **归并排序的稳定性**:虽然 `MergeSort` 在本次测试中的原始速度略逊于最优的快速排序,但它具有一个重要优点:其性能是稳定的 O(n log n),不受输入数据初始顺序的影响。此外,归并排序是一种稳定的排序算法,而快速排序不是。
**最终结论**:对于通用场景下的内部排序任务,经过优化的快速排序(特别是双基准快速排序)是性能上的首选。而当需要排序稳定性或对最坏情况有严格要求时,归并排序是更可靠的选择。

View File

@ -0,0 +1,32 @@
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
/*
* function : implement shell sort
* param nums : the vector to be sorted
* param comp_count : count of comparisons
* param move_count : count of moves
* return : ---
*/
void ShellSort(vector<int> &nums, long long &comp_count, long long &move_count)
{
int n = nums.size();
for (int gap = n / 2; gap > 0; gap /= 2)
{
for (int i = gap; i < n; i += 1)
{
int temp = nums[i];
int j;
for (j = i; j >= gap && (comp_count++, nums[j - gap] > temp); j -= gap)
{
nums[j] = nums[j - gap];
move_count++;
}
nums[j] = temp;
move_count++;
}
}
}

View File

@ -0,0 +1,20 @@
#include <iostream>
#include <vector>
#include <algorithm>
#include <ctime>
#include <cstdlib>
#include <stack>
#include <string>
#include <cstring>
using namespace std;
void InsertSort(vector<int>&, long long&, long long&);
void BubbleSort(vector<int>&, long long&, long long&);
void MergeSort(vector<int>&, long long&, long long&);
void QuickSort(vector<int>&, long long&, long long&);
void QuickSortOptimized(vector<int>&, long long&, long long&);
void QuickSort3Way(vector<int>&, long long&, long long&);
void DualPivotQuickSort(vector<int>&, long long&, long long&);
void ShellSort(vector<int>&, long long&, long long&);

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 147 KiB

View File

@ -0,0 +1,110 @@
#include <iostream>
#include <vector>
#include <algorithm>
#include <ctime>
#include <cstdlib>
#include <string>
#include <iomanip>
#include "algorithm.h"
#include "out.h"
using namespace std;
#define UP_BOUND 1000000
// Function pointer for sorting algorithms
typedef void (*SortFunction)(vector<int>&, long long&, long long&);
bool is_sorted(const vector<int>& vec)
{
for (size_t i = 0; i < vec.size() - 1; ++i)
{
if (vec[i] > vec[i + 1])
{
return false;
}
}
return true;
}
void run_performance_analysis()
{
cout << "--- Running Performance Analysis ---" << endl;
SortFunction algorithms[] = {InsertSort, BubbleSort, ShellSort, MergeSort, QuickSort, QuickSortOptimized, QuickSort3Way, DualPivotQuickSort};
string algo_names[] = {"InsertSort", "BubbleSort", "ShellSort", "MergeSort", "QuickSort", "QuickSortOpt", "QuickSort3Way", "DualPivotSort"};
int num_algorithms = sizeof(algorithms) / sizeof(algorithms[0]);
vector<int> sizes = {100, 1000, 10000, 50000, 100000, 500000};
int num_repeats = 10;
cout << left << setw(15) << "Algorithm"
<< setw(10) << "Size"
<< setw(20) << "Avg Time (s)"
<< setw(25) << "Avg Comparisons"
<< setw(25) << "Avg Moves"
<< setw(15) << "Correct?" << endl;
cout << string(110, '-') << endl;
for (int i = 0; i < num_algorithms; i++)
{
for (int size : sizes)
{
if (size >= 50000 && (algo_names[i] == "InsertSort" || algo_names[i] == "BubbleSort"))
{
cout << left << setw(15) << algo_names[i]
<< setw(10) << size << " -> Skipped" << endl;
continue;
}
double total_time = 0;
long long total_comps = 0;
long long total_moves = 0;
bool all_correct = true;
for (int j = 0; j < num_repeats; j++)
{
vector<int> vec(size);
for (int k = 0; k < size; k++)
vec[k] = rand() % UP_BOUND;
long long comp_count = 0;
long long move_count = 0;
vector<int> temp_vec = vec;
clock_t start_time = clock();
algorithms[i](temp_vec, comp_count, move_count);
clock_t end_time = clock();
if (!is_sorted(temp_vec))
{
all_correct = false;
}
total_time += (double)(end_time - start_time) / CLOCKS_PER_SEC;
total_comps += comp_count;
total_moves += move_count;
}
cout << left << setw(15) << algo_names[i]
<< setw(10) << size
<< fixed << setprecision(6) << setw(20) << total_time / num_repeats
<< setw(25) << total_comps / num_repeats
<< setw(25) << total_moves / num_repeats
<< setw(15) << (all_correct ? "Yes" : "No") << endl;
}
cout << endl;
}
cout << "------------------------------------" << endl;
}
int main()
{
srand(time(0)); // Seed for random number generation
run_performance_analysis();
return 0;
}

View File

@ -0,0 +1,16 @@
#include <iostream>
#include <vector>
using namespace std;
/*
* function : print a vector
* param nums : the vector to be printed
* return : ---
*/
void Out(vector<int> &nums)
{
for (int i = 0; i < static_cast<int>(nums.size()); i++)
cout << nums[i] << " ";
cout << endl;
}

View File

@ -0,0 +1,6 @@
#include <iostream>
#include <vector>
using namespace std;
void Out(vector<int> &);

View File

@ -0,0 +1,138 @@
import matplotlib.pyplot as plt
import pandas as pd
import io
output = """
--- Running Performance Analysis ---
Algorithm Size Avg Time (s) Avg Comparisons Avg Moves Correct?
--------------------------------------------------------------------------------------------------------------
InsertSort 100 0.000006 2628 2633 Yes
InsertSort 1000 0.000108 251895 251902 Yes
InsertSort 10000 0.010841 25002574 25002583 Yes
InsertSort 50000 -> Skipped
InsertSort 100000 -> Skipped
InsertSort 500000 -> Skipped
BubbleSort 100 0.000018 4891 7118 Yes
BubbleSort 1000 0.001527 498086 754888 Yes
BubbleSort 10000 0.147654 49984024 74752463 Yes
BubbleSort 50000 -> Skipped
BubbleSort 100000 -> Skipped
BubbleSort 500000 -> Skipped
ShellSort 100 0.000003 859 911 Yes
ShellSort 1000 0.000040 15404 15912 Yes
ShellSort 10000 0.000577 263303 268383 Yes
ShellSort 50000 0.003500 1883075 1908568 Yes
ShellSort 100000 0.007738 4329053 4379481 Yes
ShellSort 500000 0.047314 28756169 29008608 Yes
MergeSort 100 0.000005 541 1344 Yes
MergeSort 1000 0.000053 8710 19952 Yes
MergeSort 10000 0.000639 120467 267232 Yes
MergeSort 50000 0.003562 718133 1568928 Yes
MergeSort 100000 0.007466 1536366 3337856 Yes
MergeSort 500000 0.042618 8837077 18951424 Yes
QuickSort 100 0.000002 633 1146 Yes
QuickSort 1000 0.000027 11070 18485 Yes
QuickSort 10000 0.000340 156205 257969 Yes
QuickSort 50000 0.001971 938346 1488604 Yes
QuickSort 100000 0.004147 2013584 3293229 Yes
QuickSort 500000 0.023807 11835910 18761405 Yes
QuickSortOpt 100 0.000002 723 1306 Yes
QuickSortOpt 1000 0.000029 10853 19089 Yes
QuickSortOpt 10000 0.000359 152089 263599 Yes
QuickSortOpt 50000 0.002079 889846 1472662 Yes
QuickSortOpt 100000 0.004246 1894396 3081447 Yes
QuickSortOpt 500000 0.025016 10915240 17611893 Yes
QuickSort3Way 100 0.000003 756 693 Yes
QuickSort3Way 1000 0.000039 12003 9189 Yes
QuickSort3Way 10000 0.000479 168509 114458 Yes
QuickSort3Way 50000 0.002715 1013555 652409 Yes
QuickSort3Way 100000 0.005945 2123298 1376911 Yes
QuickSort3Way 500000 0.032901 12324462 7696985 Yes
DualPivotSort 100 0.000002 624 884 Yes
DualPivotSort 1000 0.000026 10635 12297 Yes
DualPivotSort 10000 0.000331 151783 170443 Yes
DualPivotSort 50000 0.001911 908308 974849 Yes
DualPivotSort 100000 0.004047 1926590 2053087 Yes
DualPivotSort 500000 0.022747 11189537 11712207 Yes
"""
# Use StringIO to treat the string data as a file
data = io.StringIO(output)
# Read the data, skipping the header and footer
df = pd.read_csv(data, sep='\s+', skiprows=3, skipfooter=1, engine='python',
names=['Algorithm', 'Size', 'Avg Time (s)', 'Avg Comparisons', 'Avg Moves', 'Correct?'])
# Drop the 'Correct?' column as it's not needed for plotting
df = df.drop(columns=['Correct?'])
# Remove rows with 'Skipped' values
df = df[~df.isin(['->', 'Skipped']).any(axis=1)]
# Convert columns to numeric types
for col in ['Size', 'Avg Time (s)', 'Avg Comparisons', 'Avg Moves']:
df[col] = pd.to_numeric(df[col])
# Get the list of algorithms
algorithms = df['Algorithm'].unique()
# Plotting
plt.style.use('ggplot')
# --- Plot 1: Average Time vs. Size ---
plt.figure(figsize=(12, 8))
for algo in algorithms:
subset = df[df['Algorithm'] == algo]
plt.plot(subset['Size'], subset['Avg Time (s)'], marker='o', linestyle='-', label=algo)
plt.title('Average Time vs. Input Size')
plt.xlabel('Input Size (n)')
plt.ylabel('Average Time (seconds)')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.grid(True, which="both", ls="--")
plt.savefig('average_time_vs_size.png')
plt.close()
# --- Plot 2: Average Comparisons vs. Size ---
plt.figure(figsize=(12, 8))
for algo in algorithms:
subset = df[df['Algorithm'] == algo]
plt.plot(subset['Size'], subset['Avg Comparisons'], marker='o', linestyle='-', label=algo)
plt.title('Average Comparisons vs. Input Size')
plt.xlabel('Input Size (n)')
plt.ylabel('Average Comparisons')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.grid(True, which="both", ls="--")
plt.savefig('average_comparisons_vs_size.png')
plt.close()
# --- Plot 3: Average Moves vs. Size ---
plt.figure(figsize=(12, 8))
for algo in algorithms:
subset = df[df['Algorithm'] == algo]
plt.plot(subset['Size'], subset['Avg Moves'], marker='o', linestyle='-', label=algo)
plt.title('Average Moves vs. Input Size')
plt.xlabel('Input Size (n)')
plt.ylabel('Average Moves')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.grid(True, which="both", ls="--")
plt.savefig('average_moves_vs_size.png')
plt.close()
print("Plots saved as average_time_vs_size.png, average_comparisons_vs_size.png, and average_moves_vs_size.png")

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,16 @@
CC=g++
all: data_gen main analyser
data_gen:
$(CC) data_gen.cpp -o data_gen
main:
$(CC) main.cpp algorithm.cpp test.cpp -o main
analyser:
$(CC) analyser.cpp algorithm.cpp test.cpp -o analyser
.PHONY(clean):
clean:
rm -rf *.o *.data data_gen main analyser

View File

@ -0,0 +1,29 @@
DEMO
===========================
##环境依赖
gcc
##目录结构描述
├── README.md // help \
├── main.cpp // 主文件 \
├── algorithm.h // 算法声明头文件 \
├── algorithm.cpp // 算法实现 \
├── data_gen.cpp // 测试集数据生成 \
├── analyser.cpp // 插桩代码测试 \
├── test.h // 测试函数声明头文件 \
├── test.cpp // 测试函数实现 \
├── test.sh // 辅助测试的bash脚本 \
├── Makefile // 编译规则
##测试步骤
1. 编译
2. 生成测试集
3. 测试、插桩测试
(可通过简单的bash脚本进行批量生成数据集和批量测试)
##V1.0 版本内容更新
1. 创建文件与基础实现

View File

@ -0,0 +1,144 @@
/***************************************************************************** *
* Copyright (C) 2022 Mingtian Shao shaomt@nudt.edu.cn *
* *
* This file is part of homework of Algorithm Design and Analysis. *
* *
* @file algorithm.cpp *
* @brief Implement different algorithms *
* *
* @author Mingtian Shao *
* @email shaomt@nudt.edu.cn *
* @version 1.0 *
* @date 2022-11-12 *
* @license GNU General Public License (GPL) *
* *
*----------------------------------------------------------------------------*
* Change History : *
* <Date> | <Version> | <Author> | <Description> *
*----------------------------------------------------------------------------*
* 2022/11/12 | 1.0 | Mingtian Shao | Create file *
*----------------------------------------------------------------------------*
* *
*****************************************************************************/
#include <iostream>
#include <vector>
#include <ctime>
#include <cstdlib>
#include <stack>
#include <string>
#include <cstring>
#include "algorithm.h"
using namespace std;
/**
* @func algorithm_1
* @brief 解决背包问题思路一
* @param n: 物品种类
* @param w: 背包容量限制
* @param weights: 物品重量数组
* @param values: 物品价值数组
* @param solve: 保存解的数组
*
* @return 返回背包容纳的最大价值
*/
int algorithm_1(int n, int w, int* weights, int* values, int* solve)
{
// memo index start from 0, which means type-1 or weight-1
vector< vector<int> > memo(n);
vector< vector<int> > nums(n);
for (int i = 0; i < n; i++)
{
memo[i].resize(w);
nums[i].resize(w);
}
// avoid the inefficient branch for i = 0 in the following ciculation
for (int j = 0; j < w; j++) // avaliable weight capacity - 1
{
int k = (j + 1) / weights[0];
memo[0][j] = k * values[0]; // take as many as possible
nums[0][j] = k;
}
for (int i = 1; i < n; i++) // index of chosen object
{
for (int j = 0; j < w; j++) // avaliable weight capacity - 1
{
int maxvalue = 0, maxk = 0;
for (int k = 0; k <= (j + 1) / weights[i]; k++)
{
int tmp = k * values[i];
if (j - k * weights[i] >= 0)
{
tmp += memo[i - 1][j - k * weights[i]];
}
if (tmp > maxvalue)
{
maxvalue = tmp;
maxk = k;
}
}
memo[i][j] = maxvalue;
nums[i][j] = maxk;
}
}
int tmpw = w - 1;
for (int i = n - 1; i >= 0; i--)
{
solve[i] = nums[i][tmpw];
tmpw -= solve[i] * weights[i];
}
return memo[n - 1][w - 1];
}
/**
* @func 函数名
* @brief 函数简要说明
* @param 参数1
* @param 参数2
*
* @return 返回说明
*/
int algorithm_2(int n, int w, int* weights, int* values, int* solve)
{
/*
* coding
*/
}
/**
* @func algorithm_1_analy
* @brief 在函数algorithm_1的基础上进行插桩记录关键操作次数
* @param n: 物品种类
* @param w: 背包容量限制
* @param weights: 物品重量数组
* @param values: 物品价值数组
* @param solve: 保存解的数组
* @param calc_count: 记录计算次数
* @param search_count: 记录查找次数
*
* @return 返回背白容纳的最大价值
*/
int algorithm_1_analy(int n, int w, int* weights, int* values, int* solve, int& calc_count, int& search_count)
{
/*
* coding
*/
}
/**
* @func 函数名
* @brief 函数简要说明
* @param 参数1
* @param 参数2
*
* @return 返回说明
*/
int algorithm_2_analy(int n, int w, int* weights, int* values, int* solve, int& calc_count, int& search_count)
{
/*
* coding
*/
}

View File

@ -0,0 +1,17 @@
#include <iostream>
#include <vector>
#include <algorithm>
#include <ctime>
#include <cstdlib>
#include <stack>
#include <string>
#include <cstring>
using namespace std;
int algorithm_1(int, int, int*, int*, int*);
int algorithm_2(int, int, int*, int*, int*);
int algorithm_1_analy(int, int, int*, int*, int*, int&, int&);
int algorithm_2_analy(int, int, int*, int*, int*, int&, int&);

View File

@ -0,0 +1,98 @@
/*****************************************************************************
* Copyright (C) 2022 Mingtian Shao shaomt@nudt.edu.cn *
* *
* This file is part of homework of Algorithm Design and Analysis. *
* *
* @file analyser.cpp *
* @brief Analyze the number of critical operations *
* for different algorithms *
* *
* @author Mingtian Shao *
* @email shaomt@nudt.edu.cn *
* @version 1.0 *
* @date 2022-11-12 *
* @license GNU General Public License (GPL) *
* *
*----------------------------------------------------------------------------*
* Change History : *
* <Date> | <Version> | <Author> | <Description> *
*----------------------------------------------------------------------------*
* 2022/11/12 | 1.0 | Mingtian Shao | Create file *
*----------------------------------------------------------------------------*
* *
*****************************************************************************/
#include <iostream>
#include <fstream>
#include <random>
#include <cmath>
#include "algorithm.h"
#include "test.h"
using namespace std;
/**
* @func analy_fun
* @brief 插桩算法测试
* @param data_file: 测试集文件
* @param cntcalc: 计算计数器
* @param cntsearch: 查找计数器
* @param fun: 测试的算法函数
*
* @return 重复测试的次数(测试数据的组数)
*/
int analy_fun(char *data_file, int &cntcalc, int &cntsearch, int (fun)(int, int, int*, int*, int*, int&, int&))
{
ifstream fin(data_file);
int result; //保存每个测试用例的最大价值
int repeat; //记录测试数据组数
// k: 生成数据的组数
// n: 物品的种类(问题的规模)
// w: 背包重量限制
int k, n, w;
fin >> k >> n >> w;
repeat = k;
cout << "数据集组数:" << k << " , 物品种类:" << n << " , 背包容量限制: " << w << endl;
int* weights = new int[n], * values = new int[n];
int* solve = new int[n];
while (k--) {
for (int i = 0; i < n; i++) {
fin >> weights[i];
}
for (int i = 0; i < n; i++) {
fin >> values[i];
solve[i] = 0;
}
result = fun(n, w, weights, values, solve, cntcalc, cntsearch);
}
delete[] weights;
delete[] values;
delete[] solve;
return repeat;
}
int main(int argc, char *argv[])
{
//input: data_file
if (argc != 2)
{
cout<< "参数错误参数数据集文件如test_x_x_x.data" << endl;
exit(1);
}
int cntcalc = 0;
int cntsearch = 0;
int repeat;
repeat = analy_fun(argv[1], cntcalc, cntsearch, algorithm_1_analy);
int average_calc = cntcalc / repeat;
int average_search = cntsearch / repeat;
cout << "average_calc: " << average_calc << endl;
cout << "average_search: " << average_search << endl;
return 0;
}

View File

@ -0,0 +1,81 @@
/*****************************************************************************
* Copyright (C) 2022 Mingtian Shao shaomt@nudt.edu.cn *
* *
* This file is part of homework of Algorithm Design and Analysis. *
* *
* @file data_gen.cpp *
* @brief Generate data and save it to a file *
* *
* @author Mingtian Shao *
* @email shaomt@nudt.edu.cn *
* @version 1.0 *
* @date 2022-11-12 *
* @license GNU General Public License (GPL) *
* *
*----------------------------------------------------------------------------*
* Change History : *
* <Date> | <Version> | <Author> | <Description> *
*----------------------------------------------------------------------------*
* 2022/11/12 | 1.0 | Mingtian Shao | Create file *
*----------------------------------------------------------------------------*
* *
*****************************************************************************/
#include <iostream>
#include <fstream>
#include <random>
#include <cmath>
using namespace std;
/**
* @func data_gen
* @brief 生成测试数据集
* @param k: 测试集数量
* @param n: 物品种类
* @param w: 背包重量限制
*
* @return null
*/
void data_gen(int k, int n, int w)
{
char out_file[100];
//输出文件名可参考"test_{k}_{n}_{w}.data"
sprintf(out_file, "test_%d_%d_%d.data", k, n, w);
ofstream fout(out_file);
fout << k << " " << n << " " << w << endl;
default_random_engine e(time(nullptr));
//随机数分布对象
uniform_int_distribution<signed> u(2, w / 1.5);
while (k--)
{
// 生成weight值
for (int i = 0; i < n; i++) //生成2~n的随机数序列
fout << u(e) << " ";
fout << endl;
// 生成value值
for (int i = 0; i < n; i++) //生成2~n的随机数序列
fout << u(e) << " ";
fout << endl;
}
}
int main(int argc, char *argv[])
{
// k: 生成数据的组数
// n: 物品的种类(问题的规模)
// w: 背包重量限制
if(argc != 4 )
{
cout << "参数错误,参数包括: 生成数据的组数k, 物品的种类n, 背包重量限制w" << endl;
exit(1);
}
int k, n, w;
k = atoi(argv[1]);
n = atoi(argv[2]);
w = atoi(argv[3]);
data_gen(k,n,w);
return 0;
}

View File

@ -0,0 +1,57 @@
/*****************************************************************************
* Copyright (C) 2022 Mingtian Shao shaomt@nudt.edu.cn *
* *
* This file is part of homework of Algorithm Design and Analysis. *
* *
* @file main.cpp *
* @brief Test the performance of different algorithms *
* *
* @author Mingtian Shao *
* @email shaomt@nudt.edu.cn *
* @version 1.0 *
* @date 2022-11-12 *
* @license GNU General Public License (GPL) *
* *
*----------------------------------------------------------------------------*
* Change History : *
* <Date> | <Version> | <Author> | <Description> *
*----------------------------------------------------------------------------*
* 2022/11/12 | 1.0 | Mingtian Shao | Create file *
*----------------------------------------------------------------------------*
* *
*****************************************************************************/
#include <ctime>
#include <cstdlib>
#include <stack>
#include <string>
#include <cstring>
#include "algorithm.h"
#include "test.h"
using namespace std;
int main(int argc, char *argv[])
{
//input: data_file
if (argc != 2)
{
cout<< "参数错误参数数据集文件如test_x_x_x.data" << endl;
exit(1);
}
long double average_time_1, average_time_2;
average_time_1 = test_5(argv[1], algorithm_1);
//average_time_2 = test_5(argv[1], algorithm_2);
cout << average_time_1 << endl;
//cout << average_time_2 << endl;
return 0;
/**
* 供参考:
* 可能的输入数据集文件、n物品种类、w背包重量上限
* 1. 读取data_gen生成的测试集文件
* 2. 使用不同算法对于测试集进行求解
* 3. 记录算法性能并保存到文件
* coding
*/
}

View File

@ -0,0 +1,122 @@
/*****************************************************************************
* Copyright (C) 2022 Mingtian Shao shaomt@nudt.edu.cn *
* *
* This file is part of homework of Algorithm Design and Analysis. *
* *
* @file algorithm.cpp *
* @brief test different algorithms *
* *
* @author Mingtian Shao *
* @email shaomt@nudt.edu.cn *
* @version 1.0 *
* @date 2022-11-12 *
* @license GNU General Public License (GPL) *
* *
*----------------------------------------------------------------------------*
* Change History : *
* <Date> | <Version> | <Author> | <Description> *
*----------------------------------------------------------------------------*
* 2022/11/12 | 1.0 | Mingtian Shao | Create file *
*----------------------------------------------------------------------------*
* *
*****************************************************************************/
#include <iostream>
#include <ctime>
#include <cstdlib>
#include <stack>
#include <string>
#include <cstring>
#include <fstream>
#include "algorithm.h"
#include "test.h"
using namespace std;
/**
* @func PrintArray
* @brief 输出指针数组
* @param n: 数组长度
* @param nums: 整型指针数组
*
* @return null
*/
void PrintArray(int n, int* nums)
{
for (int i = 0; i < n; i++)
{
cout << nums[i] << " ";
}
cout << endl;
}
/**
* @func test_5
* @brief 测试算法运行时间并输出每组测试用例的结果
* @param data_file: 测试数据集文件
* @param fun: 测试算法的实现函数该函数有5个参数故该测试函数名为test_5
*
* @return 算法的平均运行时间
*/
long double test_5(char* data_file, int (fun)(int, int, int*, int*, int*))
{
double sum_time = 0;
clock_t start_time, end_time;
ifstream fin(data_file);
int result; //保存每个测试用例的最大价值
int repeat; //记录测试数据组数
// k: 生成数据的组数
// n: 物品的种类(问题的规模)
// w: 背包重量限制
int k, n, w;
fin >> k >> n >> w;
repeat = k;
cout << "数据集组数:" << k << " , 物品种类:" << n << " , 背包容量限制: " << w << endl;
int* weights = new int[n], * values = new int[n];
int* solve = new int[n];
while (k--) {
for (int i = 0; i < n; i++) {
fin >> weights[i];
}
for (int i = 0; i < n; i++) {
fin >> values[i];
solve[i] = 0;
}
cout << "weights:" << endl;
PrintArray(n, weights);
cout << "values:" << endl;
PrintArray(n, values);
// 仅记录算法运行时间
start_time = clock();
result = fun(n, w, weights, values, solve);
end_time = clock();
sum_time += (double)(end_time - start_time) / CLOCKS_PER_SEC;
cout << "max value is " << result << endl;
cout << "the solution is "; PrintArray(n, solve);
}
delete[] weights;
delete[] values;
delete[] solve;
long double average_time = sum_time / repeat;
return average_time;
}
/**
* @func test_7
* @brief 主要用于记录算法fun的关键操作次数
* @param data_file: 测试数据集文件
* @param fun: 测试算法的实现函数该函数有7个参数故该测试函数名为test_7
*
* @return 算法的平均运行时间,该值仅供参考,因为插桩代码影响了算法运行效率
*/
long double test_7(char* data_file, int (fun)(int, int, int*, int*, int*, int&, int&))
{
/*
* coding
*/
}

View File

@ -0,0 +1,14 @@
#include <iostream>
#include <vector>
#include <algorithm>
#include <ctime>
#include <cstdlib>
#include <stack>
#include <string>
#include <cstring>
#include <fstream>
using namespace std;
long double test_5(char*, int (fun)(int, int, int*, int*, int*));
long double test_7(char*, int (fun)(int, int, int*, int*, int*, int&, int&));

View File

@ -0,0 +1,22 @@
#!/bin/bash
# 测试脚本,下述内容仅为示例
# 使用前先增加执行权限: sudo chmod +x test.sh
echo "test srart"
./data_gen 1000 100 100
./main test_1000_100_100.data
./analyser test_1000_100_100.data
echo "--------------------------------------------"
./data_gen 1000 500 500
./main test_1000_500_500.data
./analyser test_1000_500_500.data
echo "--------------------------------------------"
echo "test end"

Binary file not shown.