{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Libs\n",
    "import cv2\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from tabulate import tabulate\n",
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Methods"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def split_on_nan_row(recording):\n",
    "    # Split the DataFrame into subdatasets\n",
    "    subdatasets = []\n",
    "    current_subdataset = []\n",
    "\n",
    "    for index, row in recording.iterrows():\n",
    "        if pd.isna(row['x1']) and pd.isna(row['y1']) and pd.isna(row['x2']) and pd.isna(row['y2']):\n",
    "            if current_subdataset:\n",
    "                subdatasets.append(pd.DataFrame(current_subdataset))\n",
    "                current_subdataset = []\n",
    "        elif pd.isna(row['x1']) and pd.isna(row['y1']) and row['x2'] and row['y2']:\n",
    "            if current_subdataset:\n",
    "                subdatasets.append(pd.DataFrame(current_subdataset))\n",
    "                current_subdataset = []\n",
    "        elif row['x1'] and row['y1'] and pd.isna(row['x2']) and pd.isna(row['y2']):\n",
    "            if current_subdataset:\n",
    "                subdatasets.append(pd.DataFrame(current_subdataset))\n",
    "                current_subdataset = []\n",
    "        else:\n",
    "            if index > 0:\n",
    "                prev_row = recording.iloc[index - 1]\n",
    "                if not (pd.isna(prev_row['x1']) and pd.isna(prev_row['y1']) and pd.isna(prev_row['x2']) and pd.isna(prev_row['y2'])):\n",
    "                    current_subdataset.append(row)\n",
    "\n",
    "        # Append the last subdataset if it exists\n",
    "    if current_subdataset:\n",
    "        subdatasets.append(pd.DataFrame(current_subdataset))\n",
    "\n",
    "    for i in range(len(subdatasets)):\n",
    "        if len(subdatasets[i]) < 10:\n",
    "            subdatasets.pop(i)\n",
    "            break\n",
    "    return subdatasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_average_points(subdatasets):\n",
    "    average_points = []\n",
    "\n",
    "    for i in subdatasets:\n",
    "        x1 = i['x1'].mean() \n",
    "        y1 = i['y1'].mean()\n",
    "        x2 = i['x2'].mean()\n",
    "        y2 = i['y2'].mean()\n",
    "        average_points.append((x1, y1, x2, y2))\n",
    "\n",
    "    return average_points"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def correct_points_orientation(list_of_points, label):\n",
    "    average_points_corrected = []\n",
    "    for i in range(len(list_of_points)):\n",
    "        x1, y1, x2, y2 = list_of_points[i]\n",
    "        if x1 < x2:\n",
    "            x1, x2 = x2, x1\n",
    "            y1, y2 = y2, y1\n",
    "        average_points_corrected.append((x1, y1, x2, y2, label[i]))\n",
    "    return average_points_corrected"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Angular error\n",
    "angle_error = np.radians(0.25)  # 0.25°\n",
    "\n",
    "# Define the positions of the eyes  (world coordinates)\n",
    "coord_left_eye = np.array([-3.25, 0, 0])\n",
    "coord_right_eye = np.array([3.25, 0, 0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Recordings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "label1 = ['infini', '25c', '25up', '25r', '50c', '50up', '50r', '75c', '75up', '75r', '200c', '200up', '200r' ]\n",
    "\n",
    "recording1_left = pd.read_csv('./recording1/centroids_left.csv')\n",
    "recording1_right = pd.read_csv('./recording1/centroids_right.csv')\n",
    "\n",
    "# Retenir seulement les images non noirs\n",
    "subdatasets1_left = split_on_nan_row(recording1_left)\n",
    "subdatasets1_right = split_on_nan_row(recording1_right)\n",
    "\n",
    "# Recupérer les points moyens pour les douze points de fixations\n",
    "average_points_left1 = get_average_points(subdatasets1_left)\n",
    "average_points_right1 = get_average_points(subdatasets1_right)\n",
    "\n",
    "# Verifier qu'il n'y a pas d'échanges entre les centroides 1 et 2\n",
    "average_points_left_corrected1, average_points_right_corrected1 = correct_points_orientation(average_points_left1, label1), correct_points_orientation(average_points_right1, label1)\n",
    "\n",
    "xy_eye_left1 = np.column_stack(([(average_points_left_corrected1[i][0] + average_points_left_corrected1[i][2])/2 for i in range(len(average_points_left_corrected1))], [(average_points_left_corrected1[i][1] + average_points_left_corrected1[i][3])/2 for i in range(len(average_points_left_corrected1))]))\n",
    "xy_eye_right1 = np.column_stack(([(average_points_right_corrected1[i][0] + average_points_right_corrected1[i][2])/2 for i in range(len(average_points_right_corrected1))], [(average_points_right_corrected1[i][1] + average_points_right_corrected1[i][3])/2 for i in range(len(average_points_right_corrected1))]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Recording 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "label2 = ['infini', '25c', '50c', '75c', '200c', '25up', '50up', '75up', '200up', '25r', '50r',  '75r', '200r' ]\n",
    "\n",
    "recording2_left = pd.read_csv('./recording2/centroids_left.csv')\n",
    "recording2_left = recording2_left.drop([11474])\n",
    "recording2_left = recording2_left.reset_index(drop=True)\n",
    "\n",
    "recording2_right = pd.read_csv('./.recording2/centroids_right.csv')\n",
    "recording2_right = recording2_right.drop([319, 5926, 7611, 10201, 11500, 12791])\n",
    "recording2_right = recording2_right.reset_index(drop=True)\n",
    "\n",
    "subdatasets2_left = split_on_nan_row(recording2_left)\n",
    "subdatasets2_right = split_on_nan_row(recording2_right)\n",
    "\n",
    "average_points_left2 = get_average_points(subdatasets2_left)\n",
    "average_points_right2 = get_average_points(subdatasets2_right)\n",
    "\n",
    "average_points_left_corrected2, average_points_right_corrected2 = correct_points_orientation(average_points_left2, label2), correct_points_orientation(average_points_right2, label2)\n",
    "\n",
    "xy_eye_left2 = np.column_stack(([(average_points_left_corrected2[i][0] + average_points_left_corrected2[i][2])/2 for i in range(len(average_points_left_corrected2))], [(average_points_left_corrected2[i][1] + average_points_left_corrected2[i][3])/2 for i in range(len(average_points_left_corrected2))]))\n",
    "xy_eye_right2 = np.column_stack(([(average_points_right_corrected2[i][0] + average_points_right_corrected2[i][2])/2 for i in range(len(average_points_right_corrected2))], [(average_points_right_corrected2[i][1] + average_points_right_corrected2[i][3])/2 for i in range(len(average_points_right_corrected2))]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Recording 3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "label3 = ['infini', '75c', '75up', '50up', '75r', '50r', '200c', '25c', '25r', '50c', '200r', '25up', '200up']\n",
    "\n",
    "recording3_left = pd.read_csv('./recording3/centroids_left.csv')\n",
    "recording3_right = pd.read_csv('./recording3/centroids_right.csv')\n",
    "recording3_right = recording3_right.drop([9268, 10181, 11001, 12687])\n",
    "recording3_right = recording3_right.reset_index(drop=True)\n",
    "\n",
    "subdatasets3_left = split_on_nan_row(recording3_left)\n",
    "subdatasets3_right = split_on_nan_row(recording3_right)\n",
    "\n",
    "average_points_left3 = get_average_points(subdatasets3_left)\n",
    "average_points_right3 = get_average_points(subdatasets3_right)\n",
    "\n",
    "average_points_left_corrected3, average_points_right_corrected3 = correct_points_orientation(average_points_left3, label3), correct_points_orientation(average_points_right3, label3)\n",
    "\n",
    "xy_eye_left3 = np.column_stack(([(average_points_left_corrected3[i][0] + average_points_left_corrected3[i][2])/2 for i in range(len(average_points_left_corrected3))], [(average_points_left_corrected3[i][1] + average_points_left_corrected3[i][3])/2 for i in range(len(average_points_left_corrected3))]))\n",
    "xy_eye_right3 = np.column_stack(([(average_points_right_corrected3[i][0] + average_points_right_corrected3[i][2])/2 for i in range(len(average_points_right_corrected3))], [(average_points_right_corrected3[i][1] + average_points_right_corrected3[i][3])/2 for i in range(len(average_points_right_corrected3))]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Recording 4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "label4 = ['infini', '25up', '200c', '200up', '75c', '25c', '50c', '200r', '25r', '75r', '75up', '50up', '50r' ]\n",
    "\n",
    "recording4_left = pd.read_csv('./recording4/centroids_left.csv')\n",
    "recording4_left = recording4_left.drop([1971,1972, 4402, 6891,7982,9011, 10229, 10434, 12390, 13182])\n",
    "recording4_left = recording4_left.reset_index(drop=True)\n",
    "\n",
    "recording4_right = pd.read_csv('./recording4/centroids_right.csv')\n",
    "recording4_right = recording4_right.drop([1960, 4394, 8170, 10423, 13177])\n",
    "recording4_right = recording4_right.reset_index(drop=True)\n",
    "\n",
    "subdatasets4_left = split_on_nan_row(recording4_left)\n",
    "subdatasets4_right = split_on_nan_row(recording4_right)\n",
    "\n",
    "average_points_left4 = get_average_points(subdatasets4_left)\n",
    "average_points_right4 = get_average_points(subdatasets4_right)\n",
    "\n",
    "average_points_left_corrected4, average_points_right_corrected4 = correct_points_orientation(average_points_left4, label4), correct_points_orientation(average_points_right4, label4)\n",
    "\n",
    "xy_eye_left4 = np.column_stack(([(average_points_left_corrected4[i][0] + average_points_left_corrected4[i][2])/2 for i in range(len(average_points_left_corrected4))], [(average_points_left_corrected4[i][1] + average_points_left_corrected4[i][3])/2 for i in range(len(average_points_left_corrected4))]))\n",
    "xy_eye_right4 = np.column_stack(([(average_points_right_corrected4[i][0] + average_points_right_corrected4[i][2])/2 for i in range(len(average_points_right_corrected4))], [(average_points_right_corrected4[i][1] + average_points_right_corrected4[i][3])/2 for i in range(len(average_points_right_corrected4))]))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "img_pro",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}