Occupy the GPU by using ResNet-18 model to compute intensively:
Name the script train.py
to make it looks like normal training script:
import subprocess
import time
import torch
from torch import nn
from torch.nn import functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def check_gpu_usage():
out_str = subprocess.run(['nvidia-smi', '--query-gpu=utilization.gpu', '--format=csv,noheader,nounits'],
capture_output=True, text=True).stdout
out_list = str(out_str).split('\n')
gpu_load_lines = [int(x.strip()) for x in out_list if x]
# print(gpu_load_lines)
gpu_load = max(gpu_load_lines) # 转换为浮点数
return gpu_load
def gpu_task():
# 定义网络
net = ResNet18()
net = nn.DataParallel(net).cuda()
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for _ in range(100): # 重复执行计算 100 次
# 生成假数据,大小匹配 CIFAR-10 数据集的图片
batch = 20480
fake_input = torch.randn(batch, 3, 32, 32).cuda()
# 生成假标签
fake_labels = torch.randint(0, 10, (batch,)).cuda()
# 清零梯度
optimizer.zero_grad()
# 前向传播,反向传播,优化
outputs = net(fake_input)
loss = criterion(outputs, fake_labels)
loss.backward()
optimizer.step()
del net
torch.cuda.empty_cache()
while True:
gpu_task()
time.sleep(10)
gpu_task()
time.sleep(5)
gpu_task()
time.sleep(15)
gpu_task()
time.sleep(5)
gpu_task()
time.sleep(20)
gpu_task()
time.sleep(4)
gpu_task()
time.sleep(9)
gpu_task()
time.sleep(12)
gpu_task()
time.sleep(1)
And you can use:
CUDA_VISIBLE_DEVICES=2,3,4,5,6,7 python train.py
or some commands else.
Here shows the effect:
(base) aiscuser@node-0:/cosmos/mengzchen$ nvidia-smi
Tue Apr 15 05:06:10 2025
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 525.105.17 Driver Version: 525.105.17 CUDA Version: 12.4 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla V100-SXM2... On | 00000001:00:00.0 Off | 0 |
| N/A 32C P0 57W / 300W | 14995MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 1 Tesla V100-SXM2... On | 00000002:00:00.0 Off | 0 |
| N/A 32C P0 42W / 300W | 3MiB / 32768MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 2 Tesla V100-SXM2... On | 00000003:00:00.0 Off | 0 |
| N/A 54C P0 250W / 300W | 29147MiB / 32768MiB | 100% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 3 Tesla V100-SXM2... On | 00000004:00:00.0 Off | 0 |
| N/A 61C P0 290W / 300W | 29265MiB / 32768MiB | 100% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 4 Tesla V100-SXM2... On | 00000005:00:00.0 Off | 0 |
| N/A 54C P0 302W / 300W | 29385MiB / 32768MiB | 100% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 5 Tesla V100-SXM2... On | 00000006:00:00.0 Off | 0 |
| N/A 61C P0 250W / 300W | 29385MiB / 32768MiB | 100% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 6 Tesla V100-SXM2... On | 00000007:00:00.0 Off | 0 |
| N/A 58C P0 268W / 300W | 29265MiB / 32768MiB | 100% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
| 7 Tesla V100-SXM2... On | 00000008:00:00.0 Off | 0 |
| N/A 62C P0 279W / 300W | 29253MiB / 32768MiB | 99% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
+-----------------------------------------------------------------------------+