From: Lukas Jiriste Date: Thu, 4 Jul 2024 11:27:40 +0000 (+0200) Subject: Implement token_categorize X-Git-Url: https://git.ljiriste.work/?a=commitdiff_plain;h=9c0ce125af12eba3e54d2abfe939bb24e0a207e7;p=Libft.git Implement token_categorize This function fills the tokens t_vec with the language tokens. It first lists terminal tokens, then end of file token, then nonterminal tokens. --- diff --git a/ft_parse/ft_parsing_table_generate.c b/ft_parse/ft_parsing_table_generate.c index 6f39851..5a61463 100644 --- a/ft_parse/ft_parsing_table_generate.c +++ b/ft_parse/ft_parsing_table_generate.c @@ -6,7 +6,7 @@ /* By: ljiriste +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2024/06/27 11:16:53 by ljiriste #+# #+# */ -/* Updated: 2024/07/04 12:29:18 by ljiriste ### ########.fr */ +/* Updated: 2024/07/04 13:24:49 by ljiriste ### ########.fr */ /* */ /* ************************************************************************** */ @@ -450,9 +450,106 @@ t_ft_stat construct_first_kernel(__attribute__((unused))t_vec *kernel, __attribu return (res); } -t_ft_stat categorize_tokens(__attribute__((unused))t_vec *tokens, __attribute__((unused))const t_vec *rules) +t_ft_stat append_token(t_vec *tokens, const t_token *token) { - ft_printf("categorize_tokens is not yet implemented\n"); + t_ft_stat res; + t_token token_clone; + + token_clone = ft_token_dup(token); + if (!token_clone.type) + return (alloc_fail); + res = ft_vec_append(tokens, &token_clone); + if (res != success) + ft_free_token(&token_clone); + return (res); +} + +t_ft_stat prepend_token(t_vec *tokens, const t_token *token) +{ + t_ft_stat res; + t_token token_clone; + + token_clone = ft_token_dup(token); + if (!token_clone.type) + return (alloc_fail); + res = ft_vec_insert(tokens, &token_clone, 0); + if (res != success) + ft_free_token(&token_clone); + return (res); +} + +int token_in_results(const t_token *token, const t_vec *rules) +{ + size_t i; + const t_grammar_rule *rule; + + i = 1; + while (i < rules->size) + { + rule = ft_vec_caccess(rules, i); + if (!ft_strcmp(token->type, rule->result.type)) + return (1); + ++i; + } + return (0); +} + +t_ft_stat add_constituents(t_vec *tokens, const t_vec *constituents, const t_vec *rules) +{ + t_ft_stat res; + size_t i; + const t_token *token; + + i = 0; + while (i < constituents->size) + { + token = ft_vec_caccess(constituents, i); + if (ft_vec_contains(tokens, token, void_cmp_token_type)) + { + ++i; + continue ; + } + if (token_in_results(token, rules)) + res = append_token(tokens, token); + else + res = prepend_token(tokens, token); + if (res != success) + return (res); + ++i; + } + return (success); +} + +t_ft_stat categorize_tokens(t_vec *tokens, const t_vec *rules) +{ + t_ft_stat res; + size_t i; + const t_grammar_rule *rule; + + res = append_token(tokens, &eof_token); + if (res != success) + return (res); + i = 1; + while (i < rules->size) + { + rule = ft_vec_caccess(rules, i); + if (!ft_vec_contains(tokens, &rule->result, void_cmp_token_type)) + { + res = append_token(tokens, &rule->result); + if (res != success) + { + ft_vec_free(tokens, ft_free_token); + return (res); + } + } + res = add_constituents(tokens, &rule->constituents, rules); + if (res != success) + { + ft_vec_free(tokens, ft_free_token); + return (res); + } + ++i; + } return (success); }