* Refactor carbon-server service in docker-compose.yml to use pre-built image and remove unnecessary build context and volume mounts

* Enhance performance and security features:
- Update compiler flags for better optimization and security.
- Implement MIME type caching for improved response handling.
- Introduce worker thread pool for efficient connection management.
- Optimize socket settings for low latency and enhanced performance.
- Add support for CPU affinity in worker threads.
- Implement graceful shutdown for worker threads during cleanup.
This commit is contained in:
2025-11-24 18:10:38 +00:00
committed by GitHub
parent 91e300afbc
commit 9535e0d2c8
5 changed files with 222 additions and 84 deletions

View File

@@ -9,8 +9,10 @@ NC := \033[0m
# Compiler and flags
CC = gcc
CFLAGS = -Wall -Wextra -O2 -D_GNU_SOURCE
LDFLAGS = -pthread
CFLAGS = -Wall -Wextra -Werror -O3 -march=native -mtune=native -flto -D_GNU_SOURCE -fstack-protector-strong
CFLAGS += -fPIE -fno-strict-overflow -Wformat -Wformat-security -Werror=format-security
CFLAGS += -D_FORTIFY_SOURCE=2 -fvisibility=hidden
LDFLAGS = -pthread -Wl,-z,relro,-z,now -pie
LIBS = -lssl -lcrypto -lmagic -lnghttp2
# Source files and object files
@@ -72,11 +74,20 @@ install-deps:
@echo "$(GREEN)Dependencies installed ✓$(NC)"
# Debug build
debug: CFLAGS += -g -DDEBUG
debug: CFLAGS = -Wall -Wextra -g -DDEBUG -D_GNU_SOURCE -fstack-protector-strong -O0
debug: clean all
# Release build
release: CFLAGS += -O3 -march=native -flto
# Release build with maximum optimizations
release: CFLAGS = -Wall -Wextra -O3 -march=native -mtune=native -flto -D_GNU_SOURCE
release: CFLAGS += -fPIE -fstack-protector-strong -D_FORTIFY_SOURCE=2 -fomit-frame-pointer
release: CFLAGS += -funroll-loops -finline-functions -ffast-math
release: clean all
# Profile-guided optimization build
pgo-generate: CFLAGS += -fprofile-generate
pgo-generate: clean all
pgo-use: CFLAGS += -fprofile-use -fprofile-correction
pgo-use: clean all
.PHONY: all clean install-deps debug release

View File

@@ -2,24 +2,14 @@ version: '3.8'
services:
carbon-server:
build:
context: .
dockerfile: Dockerfile
image: azreyo/carbon:latest
container_name: carbon-http-server
ports:
- "8080:8080" # HTTP port
- "8443:8443" # HTTPS port
volumes:
# Mount www directory for easy content updates
- ./www:/app/www:ro
# Mount log directory to persist logs
- ./log:/app/log
# Mount SSL certificates if using HTTPS
- ./ssl:/app/ssl:ro
# Mount config file
- ./server.conf:/app/server.conf:ro
environment:
- TZ=UTC
- SERVER_NAME=yourdomain.com # Change this to your domain or IP
restart: unless-stopped
networks:
- carbon-net
@@ -42,6 +32,7 @@ services:
read_only: true
tmpfs:
- /tmp
- /app/log
networks:
carbon-net:

View File

@@ -25,6 +25,43 @@ const char *response_404_header = "HTTP/1.1 404 Not Found\r\n\r\nFile Not Found"
const char *response_403_header = "HTTP/1.1 403 Forbidden\r\n\r\nAccess Denied";
const char *response_429_header = "HTTP/1.1 429 Too Many Requests\r\n\r\nRate limit exceeded";
const char *response_500_header = "HTTP/1.1 500 Internal Server Error\r\n\r\nInternal Server Error";
const char *response_503_header = "HTTP/1.1 503 Service Unavailable\r\n\r\nServer overloaded";
// Common MIME types cache
typedef struct {
const char *ext;
const char *mime;
} mime_cache_t;
static const mime_cache_t mime_cache[] = {
{".html", "text/html"},
{".css", "text/css"},
{".js", "application/javascript"},
{".json", "application/json"},
{".png", "image/png"},
{".jpg", "image/jpeg"},
{".jpeg", "image/jpeg"},
{".gif", "image/gif"},
{".svg", "image/svg+xml"},
{".ico", "image/x-icon"},
{".webp", "image/webp"},
{".woff", "font/woff"},
{".woff2", "font/woff2"},
{".ttf", "font/ttf"},
{".pdf", "application/pdf"},
{".xml", "application/xml"},
{".txt", "text/plain"},
{NULL, NULL}
};
const char *get_mime_from_cache(const char *ext) {
for (int i = 0; mime_cache[i].ext != NULL; i++) {
if (strcasecmp(ext, mime_cache[i].ext) == 0) {
return mime_cache[i].mime;
}
}
return "application/octet-stream";
}
// Task queue implementation
void init_task_queue(task_queue_t *queue)
@@ -38,7 +75,7 @@ void init_task_queue(task_queue_t *queue)
void enqueue_task(task_queue_t *queue, int socket_fd, SSL *ssl, bool is_https)
{
if (queue->count >= INT_MAX - 1)
if (queue->count >= WORKER_QUEUE_SIZE - 1)
{
return;
}
@@ -270,20 +307,37 @@ void init_buffer_pool(void)
char *get_buffer_from_pool(size_t min_size)
{
if (min_size > DEFAULT_BUFFER_SIZE * 4)
{
// For very large requests, allocate directly
return malloc(min_size);
}
pthread_mutex_lock(&buffer_pool_mutex);
buffer_pool_t *current = buffer_pool;
buffer_pool_t *best_fit = NULL;
// Find best fit buffer (smallest that fits)
while (current)
{
if (!current->in_use && current->size >= min_size)
{
current->in_use = true;
pthread_mutex_unlock(&buffer_pool_mutex);
return current->buffer;
if (!best_fit || current->size < best_fit->size)
{
best_fit = current;
}
}
current = current->next;
}
if (best_fit)
{
best_fit->in_use = true;
pthread_mutex_unlock(&buffer_pool_mutex);
return best_fit->buffer;
}
pthread_mutex_unlock(&buffer_pool_mutex);
return malloc(min_size);

View File

@@ -6,6 +6,9 @@
#include <time.h>
#include <sys/mman.h>
#include <openssl/ssl.h>
#include <pthread.h>
#define WORKER_QUEUE_SIZE 2048
// Connection pool structures
typedef struct connection_task_t
@@ -68,5 +71,9 @@ extern const char *response_404_header;
extern const char *response_403_header;
extern const char *response_429_header;
extern const char *response_500_header;
extern const char *response_503_header;
// MIME type cache
const char *get_mime_from_cache(const char *ext);
#endif

View File

@@ -20,6 +20,8 @@
#include <time.h>
#include <sys/sendfile.h>
#include <sys/time.h>
#include <sched.h>
#include <sys/resource.h>
#include "server_config.h"
#include "websocket.h"
@@ -66,7 +68,8 @@
#define SOCKET_BACKLOG 128 // Increased from 50
#define EPOLL_TIMEOUT 100 // 100ms timeout
#define MAX_THREAD_POOL_SIZE 32
#define MAX_THREAD_POOL_SIZE 64
#define WORKER_QUEUE_SIZE 2048
#define MAX_CACHE_SIZE 100
#define MAX_CACHE_FILE_SIZE (1024 * 1024) // 1MB
@@ -76,6 +79,7 @@ typedef struct
{
pthread_t thread;
int busy;
int cpu_core;
} ThreadInfo;
ThreadInfo *thread_pool;
@@ -83,6 +87,12 @@ int thread_pool_size = 0;
pthread_mutex_t thread_pool_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t thread_pool_cond = PTHREAD_COND_INITIALIZER;
// Worker thread queue
task_queue_t worker_queue;
pthread_t *worker_threads = NULL;
int num_worker_threads = 0;
volatile int workers_running = 1;
typedef struct
{
char ip[INET_ADDRSTRLEN];
@@ -122,6 +132,9 @@ pthread_mutex_t rate_limit_mutex = PTHREAD_MUTEX_INITIALIZER;
void cleanup_thread_pool(void);
void *handle_http_client(void *arg);
void *handle_https_client(void *arg);
void *worker_thread(void *arg);
void set_cpu_affinity(int thread_id);
void optimize_socket_for_send(int socket_fd);
void log_event(const char *message);
void initialize_openssl();
void cleanup_openssl();
@@ -183,6 +196,13 @@ void configure_ssl_context(SSL_CTX *ctx)
exit(EXIT_FAILURE);
}
// Security hardening
SSL_CTX_set_min_proto_version(ctx, TLS1_2_VERSION);
SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1);
SSL_CTX_set_options(ctx, SSL_OP_NO_COMPRESSION); // Disable compression (CRIME attack)
SSL_CTX_set_options(ctx, SSL_OP_CIPHER_SERVER_PREFERENCE);
// Use secure ciphers only - TLS 1.3 and strong TLS 1.2 ciphers
const char *cipher_list = "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:"
"TLS_AES_128_GCM_SHA256:" // TLS 1.3
"ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:"
@@ -195,8 +215,6 @@ void configure_ssl_context(SSL_CTX *ctx)
exit(EXIT_FAILURE);
}
SSL_CTX_set_min_proto_version(ctx, TLS1_2_VERSION);
// Enable HTTP/2 ALPN if configured
if (config.enable_http2)
{
@@ -205,6 +223,18 @@ void configure_ssl_context(SSL_CTX *ctx)
}
}
void optimize_socket_for_send(int socket_fd)
{
int flag = 1;
// Enable TCP_NODELAY to disable Nagle's algorithm for low latency
setsockopt(socket_fd, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(flag));
#ifdef TCP_QUICKACK
// Enable quick ACK for faster response
setsockopt(socket_fd, IPPROTO_TCP, TCP_QUICKACK, &flag, sizeof(flag));
#endif
}
void set_socket_options(int socket_fd)
{
int flags = fcntl(socket_fd, F_GETFL, 0);
@@ -321,37 +351,18 @@ void *start_http_server(void *arg)
continue;
}
pthread_mutex_lock(&thread_count_mutex);
if (num_client_threads < config.max_connections)
// Enqueue task to worker thread pool instead of creating new thread
if (worker_queue.count < WORKER_QUEUE_SIZE)
{
pthread_t client_thread;
int *client_socket_ptr = malloc(sizeof(int));
if (!client_socket_ptr)
{
perror("Failed to allocate memory for client socket");
close(client_socket);
pthread_mutex_unlock(&thread_count_mutex);
continue;
}
*client_socket_ptr = client_socket;
if (pthread_create(&client_thread, NULL, handle_http_client, client_socket_ptr) == 0)
{
client_threads[num_client_threads++] = client_thread;
enqueue_task(&worker_queue, client_socket, NULL, false);
}
else
{
perror("Error creating HTTP client thread");
close(client_socket);
free(client_socket_ptr);
}
}
else
{
log_event("Max client threads reached, rejecting connection.");
log_event("Worker queue full, rejecting connection.");
const char *overload_response = "HTTP/1.1 503 Service Unavailable\r\n\r\nServer overloaded";
send(client_socket, overload_response, strlen(overload_response), 0);
close(client_socket);
}
pthread_mutex_unlock(&thread_count_mutex);
}
}
}
@@ -411,37 +422,18 @@ void *start_https_server(void *arg)
break;
}
pthread_mutex_lock(&thread_count_mutex);
if (num_client_threads < config.max_connections)
// Enqueue task to worker thread pool instead of creating new thread
if (worker_queue.count < WORKER_QUEUE_SIZE)
{
pthread_t client_thread;
int *client_socket_ptr = malloc(sizeof(int));
if (!client_socket_ptr)
{
perror("Failed to allocate memory for client socket");
close(client_socket);
pthread_mutex_unlock(&thread_count_mutex);
continue;
}
*client_socket_ptr = client_socket;
if (pthread_create(&client_thread, NULL, handle_https_client, client_socket_ptr) == 0)
{
client_threads[num_client_threads++] = client_thread;
enqueue_task(&worker_queue, client_socket, NULL, true);
}
else
{
perror("Error creating HTTPS client thread");
close(client_socket);
free(client_socket_ptr);
}
}
else
{
log_event("Max client threads reached, rejecting connection.");
log_event("Worker queue full (HTTPS), rejecting connection.");
const char *overload_response = "HTTP/1.1 503 Service Unavailable\r\n\r\nServer overloaded";
send(client_socket, overload_response, strlen(overload_response), 0);
close(client_socket);
}
pthread_mutex_unlock(&thread_count_mutex);
}
close(https_socket);
@@ -1271,6 +1263,75 @@ void signal_handler(int sig)
}
}
void set_cpu_affinity(int thread_id)
{
#ifdef __linux__
int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (num_cpus > 0)
{
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(thread_id % num_cpus, &cpuset);
if (pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset) != 0)
{
log_event("Warning: Failed to set CPU affinity");
}
}
#endif
}
void *worker_thread(void *arg)
{
int thread_id = *((int *)arg);
free(arg);
// Set CPU affinity for this worker thread
set_cpu_affinity(thread_id);
char log_msg[256];
int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
snprintf(log_msg, sizeof(log_msg), "Worker thread %d started on CPU %d", thread_id, thread_id % num_cpus);
log_event(log_msg);
while (workers_running)
{
connection_task_t *task = dequeue_task(&worker_queue);
if (!task || !workers_running)
{
break;
}
// Optimize socket before handling
optimize_socket_for_send(task->socket_fd);
// Handle the connection based on type
if (task->is_https)
{
int *socket_ptr = malloc(sizeof(int));
if (socket_ptr)
{
*socket_ptr = task->socket_fd;
handle_https_client(socket_ptr);
}
}
else
{
int *socket_ptr = malloc(sizeof(int));
if (socket_ptr)
{
*socket_ptr = task->socket_fd;
handle_http_client(socket_ptr);
}
}
free(task);
}
return NULL;
}
void initialize_thread_pool()
{
thread_pool = calloc(MAX_THREAD_POOL_SIZE, sizeof(ThreadInfo));
@@ -1692,6 +1753,29 @@ int check_rate_limit(const char *ip)
void cleanup_thread_pool()
{
// Signal worker threads to stop
workers_running = 0;
// Wake up all waiting workers
pthread_mutex_lock(&worker_queue.mutex);
pthread_cond_broadcast(&worker_queue.cond);
pthread_mutex_unlock(&worker_queue.mutex);
// Join all worker threads
if (worker_threads)
{
for (int i = 0; i < num_worker_threads; i++)
{
pthread_join(worker_threads[i], NULL);
}
free(worker_threads);
worker_threads = NULL;
}
// Cleanup worker queue
destroy_task_queue(&worker_queue);
// Cleanup old thread pool structure if exists
if (!thread_pool)
{
return;
@@ -1699,15 +1783,6 @@ void cleanup_thread_pool()
pthread_mutex_lock(&thread_pool_mutex);
for (int i = 0; i < thread_pool_size; i++)
{
if (thread_pool[i].busy)
{
pthread_cancel(thread_pool[i].thread);
pthread_join(thread_pool[i].thread, NULL);
}
}
ThreadInfo *temp = thread_pool;
thread_pool = NULL;
thread_pool_size = 0;