Enhance server ssl configuration

- Added SSL certificate and key paths to ServerConfig structure.
- Updated init_config function to initialize new SSL paths.
- Formated code for better readability.
This commit is contained in:
2025-10-05 17:35:00 +00:00
parent 46b653efe0
commit 72df6a73fc
13 changed files with 1420 additions and 1000 deletions

View File

@@ -7,7 +7,7 @@
#include <stdio.h>
#define MAX_MMAP_CACHE_SIZE 50
#define MAX_MMAP_FILE_SIZE (10 * 1024 * 1024) // 10MB
#define MAX_MMAP_FILE_SIZE (10 * 1024 * 1024) // 10MB
#define BUFFER_POOL_SIZE 32
#define DEFAULT_BUFFER_SIZE 16384
@@ -27,7 +27,8 @@ const char *response_429_header = "HTTP/1.1 429 Too Many Requests\r\n\r\nRate li
const char *response_500_header = "HTTP/1.1 500 Internal Server Error\r\n\r\nInternal Server Error";
// Task queue implementation
void init_task_queue(task_queue_t *queue) {
void init_task_queue(task_queue_t *queue)
{
queue->head = NULL;
queue->tail = NULL;
queue->count = 0;
@@ -35,182 +36,216 @@ void init_task_queue(task_queue_t *queue) {
pthread_cond_init(&queue->cond, NULL);
}
void enqueue_task(task_queue_t *queue, int socket_fd, SSL *ssl, bool is_https) {
void enqueue_task(task_queue_t *queue, int socket_fd, SSL *ssl, bool is_https)
{
connection_task_t *task = malloc(sizeof(connection_task_t));
if (!task) return;
if (!task)
return;
task->socket_fd = socket_fd;
task->ssl = ssl;
task->is_https = is_https;
task->next = NULL;
pthread_mutex_lock(&queue->mutex);
if (queue->tail) {
if (queue->tail)
{
queue->tail->next = task;
} else {
}
else
{
queue->head = task;
}
queue->tail = task;
queue->count++;
pthread_cond_signal(&queue->cond);
pthread_mutex_unlock(&queue->mutex);
}
connection_task_t* dequeue_task(task_queue_t *queue) {
connection_task_t *dequeue_task(task_queue_t *queue)
{
pthread_mutex_lock(&queue->mutex);
while (queue->head == NULL) {
while (queue->head == NULL)
{
pthread_cond_wait(&queue->cond, &queue->mutex);
}
connection_task_t *task = queue->head;
queue->head = task->next;
if (queue->head == NULL) {
if (queue->head == NULL)
{
queue->tail = NULL;
}
queue->count--;
pthread_mutex_unlock(&queue->mutex);
return task;
}
void destroy_task_queue(task_queue_t *queue) {
void destroy_task_queue(task_queue_t *queue)
{
pthread_mutex_lock(&queue->mutex);
connection_task_t *current = queue->head;
while (current) {
while (current)
{
connection_task_t *next = current->next;
free(current);
current = next;
}
pthread_mutex_unlock(&queue->mutex);
pthread_mutex_destroy(&queue->mutex);
pthread_cond_destroy(&queue->cond);
}
// Memory-mapped file cache implementation
void init_mmap_cache(void) {
void init_mmap_cache(void)
{
mmap_cache = calloc(MAX_MMAP_CACHE_SIZE, sizeof(mmap_cache_entry_t));
}
mmap_cache_entry_t* get_cached_file(const char *path) {
mmap_cache_entry_t *get_cached_file(const char *path)
{
pthread_mutex_lock(&mmap_cache_mutex);
for (int i = 0; i < mmap_cache_size; i++) {
if (mmap_cache[i].path && strcmp(mmap_cache[i].path, path) == 0) {
for (int i = 0; i < mmap_cache_size; i++)
{
if (mmap_cache[i].path && strcmp(mmap_cache[i].path, path) == 0)
{
mmap_cache[i].last_access = time(NULL);
mmap_cache[i].ref_count++;
pthread_mutex_unlock(&mmap_cache_mutex);
return &mmap_cache[i];
}
}
pthread_mutex_unlock(&mmap_cache_mutex);
return NULL;
}
void cache_file_mmap(const char *path, size_t size, const char *mime_type) {
if (size > MAX_MMAP_FILE_SIZE) return;
void cache_file_mmap(const char *path, size_t size, const char *mime_type)
{
if (size > MAX_MMAP_FILE_SIZE)
return;
pthread_mutex_lock(&mmap_cache_mutex);
// Check if already cached
for (int i = 0; i < mmap_cache_size; i++) {
if (mmap_cache[i].path && strcmp(mmap_cache[i].path, path) == 0) {
for (int i = 0; i < mmap_cache_size; i++)
{
if (mmap_cache[i].path && strcmp(mmap_cache[i].path, path) == 0)
{
pthread_mutex_unlock(&mmap_cache_mutex);
return;
}
}
// Find slot (evict LRU if full)
int slot = mmap_cache_size;
if (mmap_cache_size >= MAX_MMAP_CACHE_SIZE) {
if (mmap_cache_size >= MAX_MMAP_CACHE_SIZE)
{
time_t oldest = time(NULL);
for (int i = 0; i < mmap_cache_size; i++) {
if (mmap_cache[i].ref_count == 0 && mmap_cache[i].last_access < oldest) {
for (int i = 0; i < mmap_cache_size; i++)
{
if (mmap_cache[i].ref_count == 0 && mmap_cache[i].last_access < oldest)
{
oldest = mmap_cache[i].last_access;
slot = i;
}
}
if (slot == mmap_cache_size) {
if (slot == mmap_cache_size)
{
pthread_mutex_unlock(&mmap_cache_mutex);
return; // All entries in use
return; // All entries in use
}
// Evict old entry
if (mmap_cache[slot].mmap_data) {
if (mmap_cache[slot].mmap_data)
{
munmap(mmap_cache[slot].mmap_data, mmap_cache[slot].size);
}
free(mmap_cache[slot].path);
free(mmap_cache[slot].mime_type);
} else {
}
else
{
mmap_cache_size++;
}
// Map file
int fd = open(path, O_RDONLY);
if (fd < 0) {
if (fd < 0)
{
pthread_mutex_unlock(&mmap_cache_mutex);
return;
}
void *mapped = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
close(fd);
if (mapped == MAP_FAILED) {
if (mapped == MAP_FAILED)
{
pthread_mutex_unlock(&mmap_cache_mutex);
return;
}
// Advise kernel about access pattern
madvise(mapped, size, MADV_WILLNEED | MADV_SEQUENTIAL);
mmap_cache[slot].path = strdup(path);
mmap_cache[slot].mmap_data = mapped;
mmap_cache[slot].size = size;
mmap_cache[slot].last_access = time(NULL);
mmap_cache[slot].mime_type = strdup(mime_type);
mmap_cache[slot].ref_count = 0;
pthread_mutex_unlock(&mmap_cache_mutex);
}
void release_cached_file(mmap_cache_entry_t *entry) {
void release_cached_file(mmap_cache_entry_t *entry)
{
pthread_mutex_lock(&mmap_cache_mutex);
entry->ref_count--;
pthread_mutex_unlock(&mmap_cache_mutex);
}
void cleanup_mmap_cache(void) {
void cleanup_mmap_cache(void)
{
pthread_mutex_lock(&mmap_cache_mutex);
for (int i = 0; i < mmap_cache_size; i++) {
if (mmap_cache[i].mmap_data) {
for (int i = 0; i < mmap_cache_size; i++)
{
if (mmap_cache[i].mmap_data)
{
munmap(mmap_cache[i].mmap_data, mmap_cache[i].size);
}
free(mmap_cache[i].path);
free(mmap_cache[i].mime_type);
}
free(mmap_cache);
mmap_cache = NULL;
mmap_cache_size = 0;
pthread_mutex_unlock(&mmap_cache_mutex);
}
// Buffer pool implementation
void init_buffer_pool(void) {
void init_buffer_pool(void)
{
pthread_mutex_lock(&buffer_pool_mutex);
for (int i = 0; i < BUFFER_POOL_SIZE; i++) {
for (int i = 0; i < BUFFER_POOL_SIZE; i++)
{
buffer_pool_t *buf = malloc(sizeof(buffer_pool_t));
if (buf) {
if (buf)
{
buf->buffer = malloc(DEFAULT_BUFFER_SIZE);
buf->size = DEFAULT_BUFFER_SIZE;
buf->in_use = false;
@@ -218,58 +253,66 @@ void init_buffer_pool(void) {
buffer_pool = buf;
}
}
pthread_mutex_unlock(&buffer_pool_mutex);
}
char* get_buffer_from_pool(size_t min_size) {
char *get_buffer_from_pool(size_t min_size)
{
pthread_mutex_lock(&buffer_pool_mutex);
buffer_pool_t *current = buffer_pool;
while (current) {
if (!current->in_use && current->size >= min_size) {
while (current)
{
if (!current->in_use && current->size >= min_size)
{
current->in_use = true;
pthread_mutex_unlock(&buffer_pool_mutex);
return current->buffer;
}
current = current->next;
}
pthread_mutex_unlock(&buffer_pool_mutex);
return malloc(min_size);
}
void return_buffer_to_pool(char *buffer) {
void return_buffer_to_pool(char *buffer)
{
pthread_mutex_lock(&buffer_pool_mutex);
buffer_pool_t *current = buffer_pool;
while (current) {
if (current->buffer == buffer) {
while (current)
{
if (current->buffer == buffer)
{
current->in_use = false;
pthread_mutex_unlock(&buffer_pool_mutex);
return;
}
current = current->next;
}
pthread_mutex_unlock(&buffer_pool_mutex);
// Not from pool, free it
free(buffer);
}
void cleanup_buffer_pool(void) {
void cleanup_buffer_pool(void)
{
pthread_mutex_lock(&buffer_pool_mutex);
buffer_pool_t *current = buffer_pool;
while (current) {
while (current)
{
buffer_pool_t *next = current->next;
free(current->buffer);
free(current);
current = next;
}
buffer_pool = NULL;
pthread_mutex_unlock(&buffer_pool_mutex);
}