diff options
| author | Dana Jansens <danakj@orodu.net> | 2003-04-05 20:27:03 +0000 |
|---|---|---|
| committer | Dana Jansens <danakj@orodu.net> | 2003-04-05 20:27:03 +0000 |
| commit | cbbf90a718ecc6836ef7a77b9040aebb9da348b8 (patch) | |
| tree | a53bcdc993f850bc0500daaebd5b1bd0b7b50ee1 /openbox/parse.y | |
| parent | 88f8ebada97c4c82252badeb57b7e71a2940600b (diff) | |
change how rc parsing will work. a=b will be parsed in any [section] and given to a separate parsing callback. no more general config infrastructure needed/
Diffstat (limited to 'openbox/parse.y')
| -rw-r--r-- | openbox/parse.y | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/openbox/parse.y b/openbox/parse.y index f8b0a044..125f1803 100644 --- a/openbox/parse.y +++ b/openbox/parse.y @@ -31,6 +31,7 @@ static ParseToken t; /* in parse.c */ void parse_token(ParseToken *token); +void parse_assign(char *name, ParseToken *token); void parse_set_section(char *section); %} @@ -54,31 +55,32 @@ void parse_set_section(char *section); %% sections: - | sections '[' IDENTIFIER ']' { parse_set_section($3); } '\n' lines + | sections '[' IDENTIFIER ']' { parse_set_section($3); } '\n' + { ++yylineno; } lines ; lines: - | lines tokens '\n' { t.type = $3; t.data.character = $3; parse_token(&t); } + | lines tokens { t.type='\n'; t.data.character='\n'; parse_token(&t); } '\n' + { ++yylineno; } + | lines IDENTIFIER '=' listtoken { parse_assign($2, &t); } '\n' + { ++yylineno; } ; tokens: - tokens token - | token + tokens token { parse_token(&t); } + | token { parse_token(&t); } ; token: - REAL { t.type = TOKEN_REAL; t.data.real = $1; parse_token(&t); } - | INTEGER { t.type = TOKEN_INTEGER; t.data.integer = $1; - parse_token(&t); } - | STRING { t.type = TOKEN_STRING; t.data.string = $1; parse_token(&t); } - | IDENTIFIER { t.type = TOKEN_IDENTIFIER; t.data.identifier = $1; - parse_token(&t);} - | BOOL { t.type = TOKEN_BOOL; t.data.bool = $1; parse_token(&t); } - | list { t.type = TOKEN_LIST; t.data.list = $1; parse_token(&t); } - | '{' { t.type = $1; t.data.character = $1; parse_token(&t); } - | '}' { t.type = $1; t.data.character = $1; parse_token(&t); } - | '=' { t.type = $1; t.data.character = $1; parse_token(&t); } - | ',' { t.type = $1; t.data.character = $1; parse_token(&t); } + REAL { t.type = TOKEN_REAL; t.data.real = $1; } + | INTEGER { t.type = TOKEN_INTEGER; t.data.integer = $1; } + | STRING { t.type = TOKEN_STRING; t.data.string = $1; } + | IDENTIFIER { t.type = TOKEN_IDENTIFIER; t.data.identifier = $1; } + | BOOL { t.type = TOKEN_BOOL; t.data.bool = $1; } + | list { t.type = TOKEN_LIST; t.data.list = $1; } + | '{' { t.type = $1; t.data.character = $1; } + | '}' { t.type = $1; t.data.character = $1; } + | ',' { t.type = $1; t.data.character = $1; } ; list: @@ -107,7 +109,6 @@ listtoken: | list { t.type = TOKEN_LIST; t.data.list = $1; } | '{' { t.type = $1; t.data.character = $1; } | '}' { t.type = $1; t.data.character = $1; } - | '=' { t.type = $1; t.data.character = $1; } | ',' { t.type = $1; t.data.character = $1; } ; |
