Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add CPU Pod Creation Support in Python SDK #403

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -137,9 +137,12 @@ pods = runpod.get_pods()
# Get a specific pod
pod = runpod.get_pod(pod.id)

# Create a pod
# Create a pod with GPU
pod = runpod.create_pod("test", "runpod/stack", "NVIDIA GeForce RTX 3070")

# Create a pod with CPU
pod = runpod.create_pod("test", "runpod/stack", instance_id="cpu3c-2-4")

# Stop the pod
runpod.stop_pod(pod.id)

Expand Down
26 changes: 20 additions & 6 deletions runpod/api/ctl_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def get_pod(pod_id: str):
def create_pod(
name: str,
image_name: str,
gpu_type_id: str,
gpu_type_id: Optional[str] = None,
cloud_type: str = "ALL",
support_public_ip: bool = True,
start_ssh: bool = True,
Expand All @@ -109,17 +109,18 @@ def create_pod(
allowed_cuda_versions: Optional[list] = None,
min_download = None,
min_upload = None,
instance_id: Optional[str] = None,
) -> dict:
"""
Create a pod

:param name: the name of the pod
:param image_name: the name of the docker image to be used by the pod
:param gpu_type_id: the gpu type wanted by the pod (retrievable by get_gpus)
:param gpu_type_id: the gpu type wanted by the pod (retrievable by get_gpus). If None, creates a CPU-only pod
:param cloud_type: if secure cloud, community cloud or all is wanted
:param data_center_id: the id of the data center
:param country_code: the code for country to start the pod in
:param gpu_count: how many gpus should be attached to the pod
:param gpu_count: how many gpus should be attached to the pod (ignored for CPU-only pods)
:param volume_in_gb: how big should the pod volume be
:param ports: the ports to open in the pod, example format - "8888/http,666/tcp"
:param volume_mount_path: where to mount the volume?
Expand All @@ -129,12 +130,19 @@ def create_pod(
:param template_id: the id of the template to use for the pod
:param min_download: minimum download speed in Mbps
:param min_upload: minimum upload speed in Mbps
:param instance_id: the id of a specific instance to deploy to (for CPU pods)
:example:

>>> # Create GPU pod
>>> pod_id = runpod.create_pod("test", "runpod/stack", "NVIDIA GeForce RTX 3070")
>>> # Create CPU pod
>>> pod_id = runpod.create_pod("test", "runpod/stack")
>>> # Create CPU pod on specific instance
>>> pod_id = runpod.create_pod("test", "runpod/stack", instance_id="cpu3c-2-4")
"""
# Input Validation
get_gpu(gpu_type_id) # Check if GPU exists, will raise ValueError if not.
if gpu_type_id is not None:
get_gpu(gpu_type_id) # Check if GPU exists, will raise ValueError if not.
if cloud_type not in ["ALL", "COMMUNITY", "SECURE"]:
raise ValueError("cloud_type must be one of ALL, COMMUNITY or SECURE")

Expand All @@ -158,7 +166,7 @@ def create_pod(
start_ssh,
data_center_id,
country_code,
gpu_count,
gpu_count if gpu_type_id is not None else None,
volume_in_gb,
container_disk_in_gb,
min_vcpu_count,
Expand All @@ -172,10 +180,16 @@ def create_pod(
allowed_cuda_versions,
min_download,
min_upload,
instance_id,
)
)

cleaned_response = raw_response["data"]["podFindAndDeployOnDemand"]
print(f"raw_response: {raw_response}")

if gpu_type_id is not None:
cleaned_response = raw_response["data"]["podFindAndDeployOnDemand"]
else:
cleaned_response = raw_response["data"]["deployCpuPod"]
return cleaned_response


Expand Down
141 changes: 85 additions & 56 deletions runpod/api/mutations/pods.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,104 +10,133 @@
def generate_pod_deployment_mutation(
name: str,
image_name: str,
gpu_type_id: str,
gpu_type_id: Optional[str] = None,
cloud_type: str = "ALL",
support_public_ip: bool = True,
start_ssh: bool = True,
data_center_id=None,
country_code=None,
gpu_count=None,
volume_in_gb=None,
container_disk_in_gb=None,
min_vcpu_count=None,
min_memory_in_gb=None,
docker_args=None,
ports=None,
volume_mount_path=None,
env: dict = None,
template_id=None,
network_volume_id=None,
data_center_id: Optional[str] = None,
country_code: Optional[str] = None,
gpu_count: Optional[int] = None,
volume_in_gb: Optional[int] = None,
container_disk_in_gb: Optional[int] = None,
min_vcpu_count: Optional[int] = None,
min_memory_in_gb: Optional[int] = None,
docker_args: Optional[str] = None,
ports: Optional[str] = None,
volume_mount_path: Optional[str] = None,
env: Optional[dict] = None,
template_id: Optional[str] = None,
network_volume_id: Optional[str] = None,
allowed_cuda_versions: Optional[List[str]] = None,
min_download=None,
min_upload=None,
):
min_download: Optional[int] = None,
min_upload: Optional[int] = None,
instance_id: Optional[str] = None,
) -> str:
"""
Generates a mutation to deploy a pod on demand.

Args:
name: Name of the pod
image_name: Docker image name
gpu_type_id: GPU type ID for GPU pods, None for CPU pods
cloud_type: Cloud type (ALL, COMMUNITY, or SECURE)
support_public_ip: Whether to support public IP
start_ssh: Whether to start SSH service
data_center_id: Data center ID
country_code: Country code for pod location
gpu_count: Number of GPUs (for GPU pods)
volume_in_gb: Volume size in GB
container_disk_in_gb: Container disk size in GB
min_vcpu_count: Minimum vCPU count
min_memory_in_gb: Minimum memory in GB
docker_args: Docker arguments
ports: Port mappings (e.g. "8080/tcp,22/tcp")
volume_mount_path: Volume mount path
env: Environment variables dict
template_id: Template ID
network_volume_id: Network volume ID
allowed_cuda_versions: List of allowed CUDA versions
min_download: Minimum download speed in Mbps
min_upload: Minimum upload speed in Mbps
instance_id: Instance ID for CPU pods

Returns:
str: GraphQL mutation string
"""
input_fields = []

# ------------------------------ Required Fields ----------------------------- #
input_fields.append(f'name: "{name}"')
input_fields.append(f'imageName: "{image_name}"')
input_fields.append(f'gpuTypeId: "{gpu_type_id}"')

# ------------------------------ Default Fields ------------------------------ #
input_fields.append(f"cloudType: {cloud_type}")
# Required Fields
input_fields.extend([
f'name: "{name}"',
f'imageName: "{image_name}"',
f"cloudType: {cloud_type}"
])

if start_ssh:
input_fields.append("startSsh: true")

if support_public_ip:
input_fields.append("supportPublicIp: true")
# GPU Pod Fields
if gpu_type_id is not None:
input_fields.append(f'gpuTypeId: "{gpu_type_id}"')
input_fields.append(f"supportPublicIp: {str(support_public_ip).lower()}")

if gpu_count is not None:
input_fields.append(f"gpuCount: {gpu_count}")
if volume_in_gb is not None:
input_fields.append(f"volumeInGb: {volume_in_gb}")
if min_vcpu_count is not None:
input_fields.append(f"minVcpuCount: {min_vcpu_count}")
if min_memory_in_gb is not None:
input_fields.append(f"minMemoryInGb: {min_memory_in_gb}")
if docker_args is not None:
input_fields.append(f'dockerArgs: "{docker_args}"')
if allowed_cuda_versions is not None:
cuda_versions = ", ".join(f'"{v}"' for v in allowed_cuda_versions)
input_fields.append(f"allowedCudaVersions: [{cuda_versions}]")

# CPU Pod Fields
else:
input_fields.append("supportPublicIp: false")
if instance_id is not None:
input_fields.append(f'instanceId: "{instance_id}"')
template_id = template_id or "runpod-ubuntu"

# ------------------------------ Optional Fields ----------------------------- #
# Optional Fields
if data_center_id is not None:
input_fields.append(f'dataCenterId: "{data_center_id}"')
else:
input_fields.append("dataCenterId: null")

if country_code is not None:
input_fields.append(f'countryCode: "{country_code}"')
if gpu_count is not None:
input_fields.append(f"gpuCount: {gpu_count}")
if volume_in_gb is not None:
input_fields.append(f"volumeInGb: {volume_in_gb}")
if container_disk_in_gb is not None:
input_fields.append(f"containerDiskInGb: {container_disk_in_gb}")
if min_vcpu_count is not None:
input_fields.append(f"minVcpuCount: {min_vcpu_count}")
if min_memory_in_gb is not None:
input_fields.append(f"minMemoryInGb: {min_memory_in_gb}")
if docker_args is not None:
input_fields.append(f'dockerArgs: "{docker_args}"')
if ports is not None:
ports = ports.replace(" ", "")
input_fields.append(f'ports: "{ports}"')
input_fields.append(f'ports: "{ports.replace(" ", "")}"')
if volume_mount_path is not None:
input_fields.append(f'volumeMountPath: "{volume_mount_path}"')
if env is not None:
env_string = ", ".join(
[f'{{ key: "{key}", value: "{value}" }}' for key, value in env.items()]
)
input_fields.append(f"env: [{env_string}]")
env_items = [f'{{ key: "{k}", value: "{v}" }}' for k, v in env.items()]
input_fields.append(f"env: [{', '.join(env_items)}]")
if template_id is not None:
input_fields.append(f'templateId: "{template_id}"')

if network_volume_id is not None:
input_fields.append(f'networkVolumeId: "{network_volume_id}"')

if allowed_cuda_versions is not None:
allowed_cuda_versions_string = ", ".join(
[f'"{version}"' for version in allowed_cuda_versions]
)
input_fields.append(f"allowedCudaVersions: [{allowed_cuda_versions_string}]")

if min_download is not None:
input_fields.append(f'minDownload: {min_download}')

if min_upload is not None:
input_fields.append(f'minUpload: {min_upload}')

# Format input fields
mutation_type = "podFindAndDeployOnDemand" if gpu_type_id else "deployCpuPod"
input_string = ", ".join(input_fields)

return f"""
mutation {{
podFindAndDeployOnDemand(
{mutation_type}(
input: {{
{input_string}
}}
) {{
id
desiredStatus
imageName
env
machineId
Expand Down
34 changes: 29 additions & 5 deletions tests/test_api/test_mutations_pods.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,12 @@ class TestPodMutations(unittest.TestCase):

def test_generate_pod_deployment_mutation(self):
"""
Test generate_pod_deployment_mutation
Test generate_pod_deployment_mutation for both GPU and CPU pods
"""
result = pods.generate_pod_deployment_mutation(
# Test GPU pod deployment
gpu_result = pods.generate_pod_deployment_mutation(
name="test",
image_name="test_image",
image_name="test_image",
gpu_type_id="1",
cloud_type="cloud",
data_center_id="1",
Expand All @@ -33,8 +34,31 @@ def test_generate_pod_deployment_mutation(self):
allowed_cuda_versions=["11.8", "12.0"],
)

# Here you should check the correct structure of the result
self.assertIn("mutation", result)
# Test CPU pod deployment
cpu_result = pods.generate_pod_deployment_mutation(
name="test-cpu",
image_name="test_image",
cloud_type="cloud",
data_center_id="1",
country_code="US",
volume_in_gb=100,
container_disk_in_gb=10,
min_vcpu_count=2,
min_memory_in_gb=4,
docker_args="args",
ports="8080",
volume_mount_path="/path",
env={"ENV": "test"},
instance_id="cpu3c-2-4"
)

# Check GPU pod mutation structure
self.assertIn("mutation", gpu_result)
self.assertIn("podFindAndDeployOnDemand", gpu_result)

# Check CPU pod mutation structure
self.assertIn("mutation", cpu_result)
self.assertIn("deployCpuPod", cpu_result)

def test_generate_pod_stop_mutation(self):
"""
Expand Down