Skip to main content

10 Proven AWS Cost Optimization Strategies

· 5 min read
AWS Pathway Team
AWS Learning Platform

Cloud costs can quickly spiral out of control without proper management. Here are 10 proven strategies to optimize your AWS costs while maintaining performance and reliability.

1. Right-Size Your EC2 Instances

Monitor Resource Utilization

# Use AWS CLI to get instance metrics
aws cloudwatch get-metric-statistics \
--namespace AWS/EC2 \
--metric-name CPUUtilization \
--dimensions Name=InstanceId,Value=i-1234567890abcdef0 \
--start-time 2024-01-01T00:00:00Z \
--end-time 2024-01-08T00:00:00Z \
--period 3600 \
--statistics Average

Automated Right-Sizing with Lambda

import boto3
import json

def lambda_handler(event, context):
ec2 = boto3.client('ec2')
cloudwatch = boto3.client('cloudwatch')

# Get all running instances
instances = ec2.describe_instances(
Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]
)

recommendations = []

for reservation in instances['Reservations']:
for instance in reservation['Instances']:
instance_id = instance['InstanceId']
instance_type = instance['InstanceType']

# Get CPU utilization for the last 7 days
cpu_metrics = cloudwatch.get_metric_statistics(
Namespace='AWS/EC2',
MetricName='CPUUtilization',
Dimensions=[{'Name': 'InstanceId', 'Value': instance_id}],
StartTime='2024-01-01T00:00:00Z',
EndTime='2024-01-08T00:00:00Z',
Period=3600,
Statistics=['Average']
)

if cpu_metrics['Datapoints']:
avg_cpu = sum(dp['Average'] for dp in cpu_metrics['Datapoints']) / len(cpu_metrics['Datapoints'])

if avg_cpu < 10:
recommendations.append({
'InstanceId': instance_id,
'CurrentType': instance_type,
'Recommendation': 'Consider downsizing or using Spot instances',
'AvgCPU': avg_cpu
})

return {
'statusCode': 200,
'body': json.dumps(recommendations)
}

2. Leverage Reserved Instances and Savings Plans

Reserved Instance Strategy

import boto3
from datetime import datetime, timedelta

def analyze_ri_opportunities():
ec2 = boto3.client('ec2')

# Get usage data for the last 30 days
end_date = datetime.now()
start_date = end_date - timedelta(days=30)

# Analyze instance usage patterns
instances = ec2.describe_instances()

usage_patterns = {}
for reservation in instances['Reservations']:
for instance in reservation['Instances']:
instance_type = instance['InstanceType']
availability_zone = instance['Placement']['AvailabilityZone']

key = f"{instance_type}-{availability_zone}"
usage_patterns[key] = usage_patterns.get(key, 0) + 1

# Recommend RIs for consistent usage
ri_recommendations = []
for pattern, count in usage_patterns.items():
if count >= 3: # 3 or more instances of same type
ri_recommendations.append({
'InstanceType': pattern.split('-')[0],
'AvailabilityZone': pattern.split('-')[1],
'RecommendedQuantity': count,
'EstimatedSavings': f"{count * 0.3 * 100}% potential savings"
})

return ri_recommendations

3. Implement Auto Scaling

EC2 Auto Scaling Configuration

{
"AutoScalingGroupName": "web-servers-asg",
"MinSize": 2,
"MaxSize": 10,
"DesiredCapacity": 3,
"DefaultCooldown": 300,
"HealthCheckType": "ELB",
"HealthCheckGracePeriod": 300,
"LaunchTemplate": {
"LaunchTemplateName": "web-server-template",
"Version": "$Latest"
},
"TargetGroupARNs": [
"arn:aws:elasticloadbalancing:region:account:targetgroup/web-servers/1234567890123456"
]
}

Predictive Scaling Policy

{
"PolicyName": "predictive-scaling-policy",
"PolicyType": "PredictiveScaling",
"PredictiveScalingConfiguration": {
"MetricSpecifications": [
{
"TargetValue": 70.0,
"PredefinedMetricSpecification": {
"PredefinedMetricType": "ASGAverageCPUUtilization"
}
}
],
"Mode": "ForecastAndScale",
"SchedulingBufferTime": 300
}
}

4. Optimize Storage Costs

S3 Intelligent Tiering

import boto3

def setup_intelligent_tiering():
s3 = boto3.client('s3')

# Create intelligent tiering configuration
s3.put_bucket_intelligent_tiering_configuration(
Bucket='my-bucket',
Id='EntireBucket',
IntelligentTieringConfiguration={
'Id': 'EntireBucket',
'Status': 'Enabled',
'Filter': {
'Prefix': ''
},
'Tierings': [
{
'Days': 90,
'AccessTier': 'ARCHIVE_ACCESS'
},
{
'Days': 180,
'AccessTier': 'DEEP_ARCHIVE_ACCESS'
}
]
}
)

EBS Volume Optimization

def optimize_ebs_volumes():
ec2 = boto3.client('ec2')

# Find unattached volumes
volumes = ec2.describe_volumes(
Filters=[
{'Name': 'status', 'Values': ['available']}
]
)

cost_savings = []
for volume in volumes['Volumes']:
volume_id = volume['VolumeId']
size = volume['Size']
volume_type = volume['VolumeType']

# Calculate monthly cost (example rates)
monthly_cost = calculate_ebs_cost(size, volume_type)

cost_savings.append({
'VolumeId': volume_id,
'Size': size,
'Type': volume_type,
'MonthlyCost': monthly_cost,
'Recommendation': 'Delete unused volume'
})

return cost_savings

def calculate_ebs_cost(size_gb, volume_type):
# Simplified cost calculation (use actual AWS pricing)
rates = {
'gp3': 0.08, # per GB per month
'gp2': 0.10,
'io1': 0.125,
'io2': 0.125
}
return size_gb * rates.get(volume_type, 0.10)

5. Use Spot Instances for Fault-Tolerant Workloads

Spot Fleet Configuration

{
"SpotFleetRequestConfig": {
"IamFleetRole": "arn:aws:iam::account:role/aws-ec2-spot-fleet-role",
"AllocationStrategy": "diversified",
"TargetCapacity": 10,
"SpotPrice": "0.05",
"LaunchSpecifications": [
{
"ImageId": "ami-12345678",
"InstanceType": "m5.large",
"KeyName": "my-key-pair",
"SecurityGroups": [
{"GroupId": "sg-12345678"}
],
"SubnetId": "subnet-12345678"
}
]
}
}

6. Optimize Data Transfer Costs

CloudFront for Content Delivery

import boto3

def create_cloudfront_distribution():
cloudfront = boto3.client('cloudfront')

distribution_config = {
'CallerReference': str(uuid.uuid4()),
'Comment': 'Cost optimization distribution',
'DefaultCacheBehavior': {
'TargetOriginId': 'S3-my-bucket',
'ViewerProtocolPolicy': 'redirect-to-https',
'TrustedSigners': {
'Enabled': False,
'Quantity': 0
},
'ForwardedValues': {
'QueryString': False,
'Cookies': {'Forward': 'none'}
},
'MinTTL': 0,
'DefaultTTL': 86400,
'MaxTTL': 31536000
},
'Origins': {
'Quantity': 1,
'Items': [
{
'Id': 'S3-my-bucket',
'DomainName': 'my-bucket.s3.amazonaws.com',
'S3OriginConfig': {
'OriginAccessIdentity': ''
}
}
]
},
'Enabled': True
}

return cloudfront.create_distribution(DistributionConfig=distribution_config)

7. Database Cost Optimization

RDS Instance Right-Sizing

def analyze_rds_utilization():
rds = boto3.client('rds')
cloudwatch = boto3.client('cloudwatch')

instances = rds.describe_db_instances()

recommendations = []
for instance in instances['DBInstances']:
db_instance_id = instance['DBInstanceIdentifier']
db_instance_class = instance['DBInstanceClass']

# Get CPU utilization
cpu_metrics = cloudwatch.get_metric_statistics(
Namespace='AWS/RDS',
MetricName='CPUUtilization',
Dimensions=[
{'Name': 'DBInstanceIdentifier', 'Value': db_instance_id}
],
StartTime=datetime.now() - timedelta(days=7),
EndTime=datetime.now(),
Period=3600,
Statistics=['Average']
)

if cpu_metrics['Datapoints']:
avg_cpu = sum(dp['Average'] for dp in cpu_metrics['Datapoints']) / len(cpu_metrics['Datapoints'])

if avg_cpu < 20:
recommendations.append({
'DBInstanceId': db_instance_id,
'CurrentClass': db_instance_class,
'AvgCPU': avg_cpu,
'Recommendation': 'Consider downsizing'
})

return recommendations

8. Monitor and Set Up Billing Alerts

Cost Anomaly Detection

def setup_cost_anomaly_detection():
ce = boto3.client('ce')

# Create anomaly detector
response = ce.create_anomaly_detector(
AnomalyDetector={
'DetectorName': 'ServiceCostAnomalyDetector',
'MonitorType': 'DIMENSIONAL',
'DimensionKey': 'SERVICE',
'MatchOptions': ['EQUALS'],
'MonitorSpecification': 'EC2-Instance'
}
)

return response

9. Implement Lifecycle Policies

S3 Lifecycle Policy

{
"Rules": [
{
"ID": "CostOptimizationRule",
"Status": "Enabled",
"Filter": {
"Prefix": "logs/"
},
"Transitions": [
{
"Days": 30,
"StorageClass": "STANDARD_IA"
},
{
"Days": 90,
"StorageClass": "GLACIER"
},
{
"Days": 365,
"StorageClass": "DEEP_ARCHIVE"
}
],
"Expiration": {
"Days": 2555
}
}
]
}

10. Regular Cost Reviews and Optimization

Automated Cost Reporting

def generate_cost_report():
ce = boto3.client('ce')

# Get cost and usage data
response = ce.get_cost_and_usage(
TimePeriod={
'Start': '2024-01-01',
'End': '2024-01-31'
},
Granularity='MONTHLY',
Metrics=['BlendedCost'],
GroupBy=[
{
'Type': 'DIMENSION',
'Key': 'SERVICE'
}
]
)

# Process and format the report
cost_breakdown = {}
for result in response['ResultsByTime']:
for group in result['Groups']:
service = group['Keys'][0]
cost = float(group['Metrics']['BlendedCost']['Amount'])
cost_breakdown[service] = cost

return cost_breakdown

Conclusion

Cost optimization is an ongoing process that requires regular monitoring and adjustment. Start with the biggest cost drivers in your environment and gradually implement these strategies. Remember to balance cost savings with performance and reliability requirements.

Key takeaways:

  • Monitor resource utilization continuously
  • Automate scaling and optimization where possible
  • Use the right pricing models for your workloads
  • Implement proper governance and alerting
  • Regular reviews and adjustments are essential

By implementing these strategies, you can achieve significant cost savings while maintaining or even improving your application performance.