Librairie pour réseaux de neurones :)
=== Agent Incantation des réseaux de neurones
Paramètres : -brainDim (list) : Dimensions du réseau de neurones -agentPerGen (int) : Agents par génération -savName (str) : Nom du fichier de sauvegarde (explication plus bas) -NSavBrain (int) : Nombre d’agents à sauvegarder -actFunc (str) : Fonction d’activation. Options: sigmoide, ReLU, tanh, ou fonction personnalisée (par défaut, sigmoide) -learningRate (float) : Taux d’apprentissage (par défaut, 0.1) -newGen (dict) : Proportion d’agents à conserver de l’ancienne génération (‘A’) et à muter (‘M’) (par défaut, {‘A’: 0.3, ‘M’: 0.5}) -valType (int) : Type de valeurs du réseau. Pour optimiser le stockage des réseaux, les valeurs du réseau sont stockées sous forme d’int8 ou int16. Options: 0 pour int8, 1 pour int16 (meilleure précision dans la valeur du réseau) (par défaut, int8) -dilution (int) : Facteur de dilution pour augmenter les valeurs possible d’un nombre. Chaque dilution rendra les valeurs du réseau moins précises, mais les valeurs pourront être beaucoup plus éloignées (par défaut, 1 (càd aucune modification))
=== forward_propagation(ind, inp) Propagation avant dans le réseau. Retourne l’indice du neurone de sortie ayant la valeur la plus élevée
Paramètres : -ind (int) : Indice de l’agent dans la génération dont on veut faire la propagation -inp (list) : Couche d’entrée du réseau
=== evolve(marks) Crée une nouvelle génération d’individu en utilisant les proportions de newGen. A chaque génération, les meilleurs agents sont conservés, et des agents mutés et aléatoire sont rajoutés
Paramètre : -marks (list) : Liste des notes des individus
=== save/load save encode les cervaux, sauvegarde le nombre total de génération effectuées, valType et dilution (voir plus haut) Load récupère les informations stockées Les sauvegardes se font sur le nom savName (l’extension est ‘.ia’)
Si vous avez des questions/modification, contactez moi sur jametarmand@gmail.com !
from ulab import numpy as np from random import randint, random from kandinsky import draw_line as line, fill_circle as circle # Library for neural networks # Made by Armand JAMET # 12/10/23 v2.3 # For documentation, # check stimorol programs on the numworks workshop! # # <3 binary_repr = lambda val1: format((val1 + 125) % 256, '08b') def custom_choice(parentsIndex, P): cum_sum = [] current_sum = 0 for p in P: current_sum += p cum_sum.append(current_sum) rnd = random() for i, value in enumerate(cum_sum): if rnd < value: return i class Agent: def __init__(ai, brainDim, savName=None, path=None, agentPerGen=400, newGen={'E': 8, 'P': 146, 'C': 146}, actFunc='softMax', p_m=0.125, NSavBrain=3, valType=0, dilution=1): ai.dim, ai.genLen, ai.savName, ai.path, ai.NSavBrain, ai.actFunc, ai.valType, ai.dilution, ai.p_m = brainDim, agentPerGen, savName, path, NSavBrain, actFunc, valType, dilution, p_m for key in newGen: newGen[key] = newGen[key] * agentPerGen ai.newGen = newGen #ai.gauss = lambda size: np.random.normal(0, 30, size).clip(-127, 128).astype(np.int8) ai.gauss = lambda size: np.array([randint(-127, 128) for _ in range(size)], dtype=np.int8) ai.isDrawn = 0 ai.brain = [] # Returns infos relative to the ai ai.info = lambda: 'Save name: %s\nAgents saved: %s/%s\nData stored in int%s\nBrain dimension: %s\nActivation function: %s\nGEN: %s elit, %s parents, %s children' % ( ai.savName + '.ia', ai.NSavBrain, ai.genLen, [8, 16][ai.valType], ', '.join([str(d) for d in ai.dim]), ai.actFunc, ai.newGen['E'], ai.newGen['P'], ai.newGen['C']) def act_func(ai, arr, hidden): if hidden: # Uses ReLU for hidden layers return np.maximum(0, arr) elif ai.actFunc == 'sigmoid': # Checks for the last layer return np.array([1, ]) / (np.exp(-arr) + 1) elif ai.actFunc == 'softmax': return np.exp(arr) / np.sum(np.exp(arr), axis=0) elif ai.actFunc == 'tanh': return np.tanh(arr) elif ai.actFunc == 'ReLU': return np.maximum(0, arr) def draw(ai, ind=0, x=0, y=0, w=320, h=222): dim = ai.dim # c = lambda x, s, isSup: int((x * 255 / s) * (((2 * isSup - 1) * x * 255 / s) > 0)) # col = lambda x, s: (abs(c(x, s, 0)), c(x, s, 1), 0) for L in range(len(dim) - 1): # Iterate trough each layer # std = np.std(ai.brain[ind]['W'][L]) / 10 for n in range(dim[L]): if L != len(dim) - 1: for n1 in range(dim[L + 1]): if ai.brain[ind]['W'][L][n1][n] > 0: col = 'green' else: col = 'red' line((x + int((L + .5) * (w // len(dim))), y + int((n + .5) * (h // dim[L]))), (x + int((L + 1.5) * (w // len(dim))), y + int((n1 + .5) * (h // dim[L + 1]))), color=col, width=2) circle((x + int((L + .5) * (w // len(dim))), y + int((n + .5) * (h // dim[L]))), int(h // (3 * max(dim))), fill_color='black') def setup(ai): # Load past gen from os import listdir if ai.savName + '.ia' in listdir(): if input('Load %s sav ? (y)\n>>> ' % (ai.savName)) in ['', 'y']: new = 0 ai.load() ai.evolve([1, ] * ai.genLen) elif input('! Sav on %s will be erased:\n! are you sure ? (y)\n>>> ' % (ai.savName)) in ['', 'y']: new = 1 else: new = 1 if new: # Create brain from scratch ai.gen, ai.brain = 0, [] for i in range(ai.genLen): ai.brain.append(ai.create_brain()) def save(ai): print('Saving...') # Fuck it sav = str(ai.gen) +'\n' for ind in range(ai.NSavBrain): sav += 'NEW_BRAIN' sav += repr([l.tolist() for l in ai.brain[ind]['W']]) sav += 'b' + repr([l.tolist() for l in ai.brain[ind]['b']]) with open('%s/%s.ia'%(ai.path, ai.savName), 'w', encoding='UTF-8') as file: file.write(sav) def load(ai): print('LOADING...') with open(ai.path + '/' + ai.savName + '.ia', 'r', encoding='UTF-8') as f: ai.gen = eval(f.readline()) data = f.read() ai.brain = [{'W': [], 'b': []} for _ in range(ai.NSavBrain)] t = [np.int8, np.int16][ai.valType] brainData = data.split('NEW_BRAIN')[1:] for indData in range(ai.NSavBrain): ai.brain[indData]['W'] = [np.array(l, dtype=t) for l in eval(brainData[indData].split('b')[0])] ai.brain[indData]['b'] = [np.array(l, dtype=t) for l in eval(brainData[indData].split('b')[1])] def weights(ai, layer_1, layer_2): return np.array([ai.gauss(layer_1) for _ in range(layer_2)], dtype=[np.int8, np.int16][ai.valType]) def biases(ai, layer, randFunc): return np.array([randFunc() for _ in range(layer)], dtype=[np.int8, np.int16][ai.valType]) def create_brain(ai): # brain : {'W':[array_layer_1, ..., array_layer_n], 'b': [layer_1, ..., layer_n]} # array_layer_n : [[neurons_l_to_l+1, ...],[...]] N_lists = neurons_on_l+1 brain = {'W': [ai.gauss(ai.dim[0] * ai.dim[1]).reshape((ai.dim[1], ai.dim[0]))], 'b': [np.zeros(ai.dim[1], dtype=np.int8)]} # Creating first column of synapsis for i in range(len(ai.dim) - 2): # Creating hidden layers brain['W'].append(ai.gauss(ai.dim[i + 1] * ai.dim[i + 2]).reshape((ai.dim[i + 2], ai.dim[i + 1]))) brain['b'].append(np.zeros(ai.dim[i + 2], dtype=np.int8)) return brain def evolve(ai, fitness): """ Creates new agents using previous fitness. Composition of a generation : - Parents, chosen by a roulette wheel - Children, created by cross-over, and can be mutated Note : this first version of evolving if made for data encoded in int 8 """ newBrain = [] # Elitism for _ in range(ai.newGen['E']): newBrain.append(ai.brain[max(range(ai.genLen), key=lambda index: fitness[index])]) # Creation of the probabilities for the roulette wheel total_fitness = sum(fitness) P = [fi / total_fitness for fi in fitness] P[-1] = 1 - sum(P[:-1]) parentsIndex = np.array(range(ai.genLen)) # Repeating the process for rep in range(ai.genLen - ai.newGen['P']//2 - ai.newGen['C']//2 - ai.newGen['E']): # Selection of parents using roulette wheel index = {'Father': 0, 'Mother': 0} for parent in ['Father', 'Mother']: #index[parent] = np.random.choice(parentsIndex, p=P) index[parent] = custom_choice(parentsIndex, P) increase = P[index[parent]]/(len(P) - 1) P = [pi + increase for pi in P] # Modify odds of the roulette to make the odds sum to 1 P[index[parent]] = 0 newBrain.append(ai.brain[index[parent]]) # Add parents to the next gen # Creates children parentIndex = len(newBrain) - 1 for children in [0, 1]: sep = randint(2, 6) childBrain = {'W': [], 'b': []} # Have to go on every layer to cross over the parents and mutate for layer in range(len(ai.dim)-1): childBrain['W'].append(np.array([ai.layer_crossover(newBrain[parentIndex]['W'][layer][neuron], newBrain[parentIndex-1]['W'][layer][neuron], sep) for neuron in range(ai.dim[layer+1])], dtype=np.int8)) childBrain['b'].append(np.array(ai.layer_crossover(newBrain[parentIndex]['b'][layer], newBrain[parentIndex-1]['b'][layer], sep), dtype=np.int8)) newBrain.append(childBrain) # Add children to next gen ai.brain = newBrain # Add new agents for i in range(ai.genLen - len(ai.brain)): ai.brain.append(ai.create_brain()) def layer_crossover(ai, layer1, layer2, sep): return [ai.value_crossover(layer1[n], layer2[n], sep) for n in range(len(layer1))] def value_crossover(ai, val1, val2, sep): # Mutate every neuron in the crossover made by the 2 parents return eval('0b' + ''.join(['01'[val=='0'] if random()<ai.p_m else val for val in binary_repr(val1+125, width=8)[:sep] + binary_repr(val2+125, width=8)[sep:]])) def forward_propagation(ai, ind, inp): out = np.array(inp) for layer in range(len(ai.dim) - 1): out = ai.act_func( (np.dot(ai.brain[ind]['W'][layer] * (ai.dilution + 1), out) + ai.brain[ind]['b'][layer] * (ai.dilution + 1)) / [100, 100_000][ai.valType], layer!=0) return out # Return the output neuron Aristote = Agent( brainDim=[16, 8, 4], agentPerGen=5, savName='Aristote', NSavBrain=1, actFunc='sigmoid', newGen={'E': 0.3, 'P': 0.5, 'C':0.2}, valType=0, dilution=0,) Aristote.setup()