diff --git a/script/local/LocalCheckSE.py b/script/local/LocalCheckSE.py index b2aeea22b4873c0be3fb461680bfeef3f1dbca56..646364cd2e5172ae75114c8af654521e1dc4cd66 100644 --- a/script/local/LocalCheckSE.py +++ b/script/local/LocalCheckSE.py @@ -23,6 +23,8 @@ import sys import getopt import subprocess import re +import csv +import json localDirPath = os.path.dirname(os.path.realpath(__file__)) @@ -36,6 +38,11 @@ from domain_utils.cluster_file.version_info import VersionInfo from base_utils.os.net_util import NetUtil from domain_utils.domain_common.cluster_constants import ClusterConstants from datetime import datetime, timedelta +from local.parser.my_yacc import MyYacc +from local.parser.my_lexer import MyLexer +from local.parser.utils import set_settings +from local.parser.utils import set_dependency_settings +from local.parser.utils import show_dependency_info sys.path.insert(0, localDirPath + "/../../lib") import pwd @@ -2519,7 +2526,157 @@ def collectAllowSystemTableMods(): return data +def execute_query(sql_query): + """ + function : Collector AllowSystemTableMods + input : NA + output : Instantion + """ + data = AllowSystemTableMods() + data.db = [] + getDatabaseInfo(data, sql_query) + return data.db[0] + + ############################################################################# + + +def check_connection_status(): + """ + function : check_connection_status + input : Bool + output : NA + """ + max_connections = (int)(execute_query("""show max_connections;""")) + super_connections = (int)( + execute_query("""select setting from pg_settings where name = 'sysadmin_reserved_connections'; """)) + current_connections = (int)(execute_query("""select count(1) from pg_stat_activity;""")) + current_connections_percent = current_connections * 100 / max_connections + superuser_reserved_connections_ratio = super_connections * 100 / max_connections + track_activities_status = execute_query("""show track_activities;""") + connection_age_average = (float)( + execute_query("""select extract(epoch from avg(now()-backend_start)) as age from pg_stat_activity;""")) + pre_auth_delay = (int)(execute_query("""show pre_auth_delay;""")) + post_auth_delay = (int)(execute_query("""show post_auth_delay;""")) + if super_connections == 0: + g_logger.log( + " Warning reason: No connection slot is reserved for the superuser. In case of connection saturation you will not be able to connect to investigate or kill connections") + if superuser_reserved_connections_ratio > 20: + percentage_formatted = format_percent(superuser_reserved_connections_ratio) + message = f" Warning reason: {percentage_formatted} of connections are reserved for super user. This is excessive and may limit connections for other users." + g_logger.log(message) + if current_connections_percent > 70: + g_logger.log( + " Warning reason: You are using more than 70% of the connections slots. Increase max_connections to avoid saturation of connection slots") + elif current_connections_percent > 90: + g_logger.log( + " Warning reason:You are using more than 90% of the connection slots. Increase max_connections to avoid saturation of connection slots") + if track_activities_status == 'off': + g_logger.log(" Warning reason: average connection age not supported when track_activities is off") + else: + if connection_age_average < 1 * 60: + g_logger.log( + " Warning reason: The average connection age is less than 1 minute. Use a connection pooler to limit new connections/second") + if pre_auth_delay > 0: + message = f" Warning reason: pre_auth_delay={pre_auth_delay}: This is a developer feature for debugging and decreases connection delay of {pre_auth_delay} seconds." + g_logger.log(message) + if post_auth_delay > 0: + message = f" Warning reason: post_auth_delay={post_auth_delay}: This is a developer feature for debugging and decreases connection delay by {post_auth_delay} seconds." + g_logger.log(message) + + +def format_percent(value): + return "%.2f%%" % value + + +def check_memory_usage_situation(): + """ + function : check_memory_usage_situation + input : Bool + output : NA + """ + maintenance_work_mem = convert_memory_str_to_num(execute_query("""show maintenance_work_mem;""")) + all_databases_size = int(execute_query("""select sum(pg_database_size(datname)) from pg_database;""")) + shared_buffers = convert_memory_str_to_num(execute_query("""show shared_buffers""")) + effective_cache_size = convert_memory_str_to_num(execute_query("""show effective_cache_size""")) + if maintenance_work_mem <= 64 * 1024: + message = " Warning reason:maintenance_work_mem is less or equal to its default value. Increase it to reduce maintenance tasks duration" + g_logger.log(message) + shared_buffers_usage = all_databases_size / shared_buffers + if shared_buffers_usage < 0.7: + message = " Warning reason:shared_buffer is too big for the total databases size, uselessly using memory" + g_logger.log(message) + if effective_cache_size < shared_buffers: + message = " Warning reason:effective_cache_size < shared_buffer. This is inadequate, as effective_cache_size value must be (shared buffers) + (size in bytes of the kernel's storage buffercache that will be used for openGauss data files)" + g_logger.log(message) + buffercache_declared_size = effective_cache_size - shared_buffers + if buffercache_declared_size < 4000000000: + message = " Warning reason:The declared buffercache size ( effective_cache_size - shared_buffers ) is less than 4GB. effective_cache_size value is probably inadequate. It must be (shared buffers) + (size in bytes of the kernel's storage buffercache that will be used for openGauss data files)" + g_logger.log(message) + + +def convert_memory_str_to_num(mem_str): + units = {'KB': 1, 'MB': 1024, 'GB': 1024 ** 2, 'TB': 1024 ** 3} + num, unit = mem_str[:-2], mem_str[-2:] + if unit not in units: + raise ValueError(f"Invalid unit '{unit}' in memory string.") + return int(float(num) * units[unit]) + + +def check_shared_buffers_hit_rate(): + """ + function : check_shared_buffers_hit_rate + input : Bool + output : NA + """ + shared_buffers_hit_rate = float(execute_query( + """select sum(idx_blks_hit)*100/(sum(idx_blks_read)+sum(idx_blks_hit)+1) from pg_statio_all_tables;""")) + if shared_buffers_hit_rate > 99.99: + message = " Warning reason:This is too high. If this openGauss instance was recently used as it usually is and was not stopped since, then you may reduce shared_buffer" + g_logger.log(message) + if shared_buffers_hit_rate < 90: + message = " Warning reason:This is too low. Increase shared_buffer memory to increase hit rate" + g_logger.log(message) + + +def check_log_situation(): + """ + function : check_log_situation + input : Bool + output : NA + """ + log_statement = execute_query("""show log_statement""") + log_hostname = execute_query("""show log_hostname""") + log_min_duration_statement = execute_query("""show log_min_duration_statement""") + if 'min' in log_min_duration_statement: + log_min_duration_statement = int(log_min_duration_statement.replace('min', '')) * 60 * 60 + elif 'ms' in log_min_duration_statement: + log_min_duration_statement = int(log_min_duration_statement.replace('ms', '')) + if log_min_duration_statement == -1: + message = " Warning reason:Log of long queries deactivated. It will be more difficult to optimize query performance)" + g_logger.log(message) + elif log_min_duration_statement < 1000: + message = " Warning reason:any request during less than 1 sec will be written in log. It may be storage-intensive (I/O and space)" + g_logger.log(message) + if log_hostname == 'on': + message = " Warning reason:log_hostname is on: this will decrease connection performance (because openGauss has to do DNS lookups)" + g_logger.log(message) + if log_statement == 'all' or log_statement == 'mod': + message = " Warning reason:log_statement=all is very storage-intensive and only useful for debugging" + g_logger.log(message) + + +def check_users(): + expiring_soon_users = execute_query("""select usename from pg_user where valuntil < now() + interval '7 days'""") + i_am_super = execute_query("""select usename from pg_shadow where passwd='md5'||md5(usename||usename)""") + if len(expiring_soon_users) > 0: + message = " Warning reason:Some user account will expire in less than 7 days" + g_logger.log(message) + if len(i_am_super) > 0: + message = " Warning reason:If there is a user with the same password and username, an error message will be reported stating 'There is an insecure user password'" + g_logger.log(message) + + def checkConnection(isSetting=False): """ function : Check Connection @@ -2537,6 +2694,7 @@ def checkConnection(isSetting=False): checkHostnossl() checkHostAddressno0() checkSSLConnection(isSetting) + check_connection_status() def checkMonitorIP(isSetting): @@ -4575,6 +4733,8 @@ def checkRuntimeEnvironmentConfiguration(isSetting=False): checkUmask(isSetting) checkHidepid() checkNtpd() + check_memory_usage_situation() + check_shared_buffers_hit_rate() def checkUmask(isSetting): @@ -4665,6 +4825,22 @@ def checkOtherConfigurations(isSetting=False): """ checkBackslashQuote(isSetting) checkAllowSystemTableMods(isSetting) + check_running_time() + check_users() + check_phase_commit() + check_autovacuum() + check_point() + check_storage() + check_wal() + check_planner() + check_indexes() + check_procedures() + check_overcommit() + check_archive() + check_bgwriter() + check_hugepages() + check_io_schedule(ssd=0) + check_dependencies() def checkBackslashQuote(isSetting): @@ -4696,6 +4872,564 @@ def checkAllowSystemTableMods(isSetting): setAllowSystemTableMods(data) +def check_running_time(): + """" + function : check_running_time + input : Bool + output : NA + """ + res = execute_query("""select extract(epoch from now()-pg_postmaster_start_time());""") + day_s = 60 * 60 * 24 + uptime = float(res) + if uptime < day_s: + g_logger.log( + " Warning reason:Uptime less than 1 day. This report may be inaccurate") + + +def check_phase_commit(): + """ + function : check_phase_commit + input : Bool + output : NA + """ + cur_version = execute_query("""SELECT opengauss_version();""") + if is_later_version(min_ver='1.0', cur_ver=cur_version): + prepared_xact_count = int(execute_query("""select count(1) from pg_prepared_xacts;""")) + if prepared_xact_count != 0: + message = " Warning reason:two-phase commit prepared transactions exist. If they stay for too long they may lock objects for too long" + g_logger.log(message) + prepared_xact_lock_count = int(execute_query( + """select count(1) from pg_locks where transactionid in (select transaction from pg_prepared_xacts);""")) + if prepared_xact_lock_count > 0: + message = " Warning reason:Two-phase commit transactions have " + str( + prepared_xact_lock_count) + "locks!" + g_logger.log(message) + + +def is_later_version(min_ver, cur_ver): + min_major, min_minor = min_ver.split('.')[0], min_ver.split('.')[1] + cur_major, cur_minor = cur_ver.split('.')[0], cur_ver.split('.')[1] + min_major, min_minor, cur_major, cur_minor = int(min_major), int(min_minor), int(cur_major), int(cur_minor) + if cur_major > min_major: + return True + if cur_major == min_major: + return cur_minor >= min_minor + return False + + +def check_autovacuum(): + """ + function : check_autovacuum + input : Bool + output : NA + """ + autovacuum = execute_query("""show autovacuum;""") + if autovacuum != 'on': + message = " Warning reason:autovacuum is not activated. This is bad except if you know what you are doing" + g_logger.log(message) + vacuum_cost_delay = execute_query("""show vacuum_cost_delay;""") + vacuum_cost_delay = int(vacuum_cost_delay.rstrip('ms')) + if vacuum_cost_delay >= 20: + message = " Warning reason: acuum_cost_delay is generally set relatively small, with a common setting of 10 or 20 milliseconds, reduce vacuum_cost_delay" + g_logger.log(message) + + +def check_point(): + """ + function : check_point + input : Bool + output : NA + """ + checkpoint_completion_target = float(execute_query("""show checkpoint_completion_target;""")) + checkpoint_timeout = trans(execute_query("""show checkpoint_timeout;""")) + checkpoint_warning = trans(execute_query("""show checkpoint_warning;""")) + checkpoint_dirty_writing_time_window = checkpoint_timeout * checkpoint_completion_target + if checkpoint_warning == 0: + message = " Warning reason:checkpoint_warning value is 0. This is rarely adequate" + g_logger.log(message) + if checkpoint_completion_target == 0: + message = " Warning reason:checkpoint_completion_target value is 0. This is absurd" + g_logger.log(message) + else: + if checkpoint_completion_target < 0.5: + message = " Warning reason:Checkpoint_completion_target is lower than its default value (0.5)" + g_logger.log(message) + elif 0.5 <= checkpoint_completion_target <= 0.7: + message = " Warning reason:checkpoint_completion_target is low" + g_logger.log(message) + elif 0.9 < checkpoint_completion_target < 1: + message = " Warning reason:checkpoint_completion_target is too near to 1" + g_logger.log(message) + else: + message = " Warning reason:checkpoint_completion_target too high" + g_logger.log(message) + if checkpoint_dirty_writing_time_window < 10: + message = " Warning reason:(checkpoint_timeout / checkpoint_completion_target) is probably too low" + g_logger.log(message) + + +def trans(data): + return int(re.sub(r'\D', '', data)) + + +def check_wal(): + """ + function : check_wal + input : Bool + output : NA + """ + wal = execute_query("""show wal_level;""") + if wal == 'minimal': + message = " Warning reason:The 'minimal' wal_level does not allow PITR backup and recovery" + g_logger.log(message) + + +def check_planner(): + """ + function : check_planner + input : Bool + output : NA + """ + modified_costs = execute_query("""select name from pg_settings where name like '%cost%' and setting<>boot_val;""") + disabled_plan_functions = execute_query( + """select name, setting from pg_settings where name like 'enable_%' and setting='off' ;""") + if len(modified_costs) != 0: + message = " Warning reason:Some I/O cost settings are not set to their default value,This may lead the planner to create suboptimal plans" + g_logger.log(message) + if len(disabled_plan_functions) != 0: + message = " Warning reason:Some plan features are disabled: " + g_logger.log(message) + + +def check_indexes(): + """ + function : check_indexes + input : Bool + output : NA + """ + invalid_indexes = execute_query( + """SELECT concat(n.nspname, '.', c.relname) as index FROM pg_catalog.pg_class c,pg_catalog.pg_namespace n,pg_catalog.pg_index i WHERE i.indisvalid = false AND i.indexrelid = c.oid AND c.relnamespace = n.oid;""") + if len(invalid_indexes) > 0: + message = " Warning reason:List of invalid indexes in the database Please check/reindex any invalid index" + g_logger.log(message) + + +def check_procedures(): + """ + function : check_procedures + input : Bool + output : NA + """ + default_cost_procs = execute_query( + """select n.nspname||'.'||p.proname from pg_catalog.pg_proc p left join pg_catalog.pg_namespace n on n.oid = p.pronamespace where pg_catalog.pg_function_is_visible(p.oid) and n.nspname not in ('pg_catalog','information_schema','sys') and p.prorows<>1000 and p.procost<>10 and p.proname not like 'uuid_%' and p.proname != 'pg_stat_statements_reset'""") + if len(default_cost_procs) > 0: + message = " Warning reason:user procedures do not have custom cost and rows settings'" + g_logger.log(message) + + +def check_overcommit(): + """ + function : check_overcommit + input : Bool + output : NA + """ + cmd_memory = "cat /proc/sys/vm/overcommit_memory" + cmd_ratio = "cat /proc/sys/vm/overcommit_ratio" + os_name = "uname -s" + overcommit_memory = get_cmd_res(cmd_memory) + overcommit_ratio = get_cmd_res(cmd_ratio) + if not (is_pure_digit(overcommit_memory) and is_pure_digit(overcommit_ratio)): + return + os_name = get_cmd_res(os_name) + if os_name != 'darwin' and int(overcommit_memory) != 2: + message = " Warning reason:Memory overcommitment is allowed on the system. This may lead the OOM Killer to kill at least one openGauss process, DANGER!" + g_logger.log(message) + if int(overcommit_ratio) <= 50: + message = ( + " Warning reason: vm.overcommit_ratio is too low. You will not be able to use more than ({}/100) * RAM + SWAP for applications.".format( + overcommit_ratio)) + g_logger.log(message) + elif int(overcommit_ratio) > 90: + message = " Warning reason:vm.overcommit_ratio is too high, you need to keep free memory" + g_logger.log(message) + + +def is_pure_digit(s): + return s.isdigit() + + +def get_cmd_res(command): + try: + result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) + return result.stdout.replace('\n', '') + except Exception as e: + return '' + + +def check_storage(): + fsync = execute_query("""show fsync""") + wal_sync_method = execute_query("""show wal_sync_method""") + synchronize_seqscans = execute_query("""show synchronize_seqscans""") + os_name = get_cmd_res("uname -s") + if fsync != 'on': + message = " Warning reason:fsync is off. You may lose data after a crash, DANGER!" + g_logger.log(message) + if os_name == 'darwin' and wal_sync_method != 'fsync_writethrough': + message = ( + " Warning reason:wal_sync_method is {}. Settings other than fsync_writethrough may lead to loss of data after a crash, DANGER!".format( + wal_sync_method)) + g_logger.log(message) + + if synchronize_seqscans != 'on': + message = " Warning reason:synchronize_seqscans is off" + g_logger.log(message) + + +def check_archive(): + archive_timeout = execute_query("""show archive_timeout""") + archive_timeout = int(archive_timeout.rstrip('s')) + if archive_timeout < 60: + g_logger.log( + " Warning reason:Setting archive_timeout to a very small value will result in occupying a huge amount of archive storage space. It is recommended to set archive_timeout to 60 seconds") + + +def check_bgwriter(): + bgwriter_lru_multiplier = int(execute_query("""show bgwriter_lru_multiplier""")) + if bgwriter_lru_multiplier < 1: + g_logger.log( + " Warning reason:Setting a smaller bgwriter_lru_multipler reduces the additional I/O overhead caused by backend write processes, increase bgwriter_lru_multiplier") + + +def check_hugepages(): + os_name = get_os_name() + if not is_supported_os(os_name): + g_logger.log(" Warning reason: No Huge Pages on this OS") + return + + nr_hugepages = get_nr_hugepages() + if not is_hugepages_available(nr_hugepages): + g_logger.log(" Warning reason: No Huge Pages available on the system") + return + + huge_pages = execute_query("""show enable_huge_pages""") + if huge_pages == 'on': + g_logger.log(" Warning reason: enable_huge_pages=on, therefore openGauss needs Huge Pages and will not start if the kernel doesn't provide them") + else: + check_and_suggest_hugepages(huge_pages) + +def get_os_name(): + return get_cmd_res("uname -s") + +def is_supported_os(os_name): + return os_name in ['linux', 'Linux', 'freebsd'] + +def get_nr_hugepages(): + return get_cmd_res("cat /proc/sys/vm/nr_hugepages") + +def is_hugepages_available(nr_hugepages): + return nr_hugepages is not None and int(nr_hugepages) > 0 + +def check_and_suggest_hugepages(huge_pages): + os_huge_info = get_os_huge_info() + pg_pid = execute_query("""SELECT pg_backend_pid();""") + peak = get_peak_memory(pg_pid) + if peak and peak.isdigit(): + suggesthugepages = calculate_suggested_hugepages(peak, os_huge_info) + suggest_and_log_hugepages(suggesthugepages, os_huge_info) + check_hugepages_size(os_huge_info) + +def get_os_huge_info(): + os_huge = subprocess.run("grep ^Huge /proc/meminfo", shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True).stdout + os_huge_list = os_huge.replace(' ', '').split('\n') + os_info = {} + for item in os_huge_list: + if item: + key, value = item.split(':') + os_info[key.strip()] = int(re.search(r'\d+', value.strip()).group()) + return os_info + +def get_peak_memory(pg_pid): + return get_cmd_res(f"grep ^VmPeak /proc/{pg_pid}/status | awk '{{ print \$2 }}'").strip() + + +def calculate_suggested_hugepages(peak, os_huge_info): + hugepagesize = os_huge_info.get('Hugepagesize') + if hugepagesize is None: + return 0 + try: + peak_value = int(peak) + suggested_hugepages = peak_value / hugepagesize + return suggested_hugepages + except ValueError: + return 0 + +def suggest_and_log_hugepages(suggesthugepages, os_huge_info): + huge_pages_total = os_huge_info.get('HugePages_Total', 0) + if huge_pages_total < int(suggesthugepages + 0.5): + message = f" Warning reason: set vm.nr_hugepages={int(suggesthugepages + 0.5)} in /etc/sysctl.conf and invoke sysctl -p /etc/sysctl.conf to reload it. This will allocate Huge Pages (it may require a system reboot)" + g_logger.log(message) + +def check_hugepages_size(os_huge_info): + hugepagesize = os_huge_info.get('Hugepagesize') + if hugepagesize == 2048: + g_logger.log(" Warning reason: Change Huge Pages size from 2MB to 1GB if the machine is dedicated to openGauss") + + +def check_io_schedule(ssd=0): + # Get system info + system_info = get_system_info() + if system_info['name'] == 'darwin': + return + + # Get storage units list + storage_units_list = get_storage_units_list() + if not storage_units_list: + return + + # Process each storage unit + active_schedulers = {} + rotational_storage = 0 + for unit in storage_units_list: + if should_skip_unit(unit): + continue + + unit_schedulers, unit_is_rotational = get_unit_info(unit, ssd) + if unit_schedulers: + active_schedulers = update_active_schedulers(unit_schedulers, active_schedulers) + rotational_storage += unit_is_rotational + + # Check if the system is running in a hypervisor and log the appropriate warnings + hypervisor = detect_hypervisor() + log_warnings(hypervisor, rotational_storage, active_schedulers) + + +def get_system_info(): + """Get system info using the `uname -s` command.""" + system_info = {} + system_info['name'] = get_cmd_res("uname -s") + return system_info + + +def get_storage_units_list(): + """Get a list of storage units from /sys/block.""" + storage_units_list = subprocess.run("ls /sys/block/", shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) + if storage_units_list.returncode != 0: + return None + return storage_units_list.stdout.split('\n') + + +def should_skip_unit(unit): + """Determine whether a unit should be skipped.""" + return unit == '.' or unit == '..' or unit == '' or unit.startswith('sr') + + +def get_unit_info(unit, ssd): + """Get scheduler and rotational information for a specific unit.""" + unit_schedulers = get_unit_schedulers(unit) + if unit_schedulers is None: + return None, 0 + + unit_is_rotational = get_unit_rotational(unit, ssd) + return unit_schedulers, unit_is_rotational + + +def get_unit_schedulers(unit): + """Get the scheduler for a unit.""" + unit_schedulers = subprocess.run(f"cat /sys/block/{unit}/queue/scheduler", shell=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) + if unit_schedulers.returncode != 0: + return None + return unit_schedulers.stdout.strip() + + +def get_unit_rotational(unit, ssd): + """Get the rotational info for a unit.""" + if ssd: + return 0 # If ssd is passed, assume it's non-rotational + + unit_is_rotational = subprocess.run(f"cat /sys/block/{unit}/queue/rotational", shell=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) + if unit_is_rotational.returncode != 0: + return 0 + return int(unit_is_rotational.stdout.strip()) + + +def update_active_schedulers(unit_schedulers, active_schedulers): + """Update active schedulers.""" + for scheduler in unit_schedulers.split(): + match = re.match(r'^\[([a-z-]+)\]$', scheduler) + if match: + active_schedulers[match.group(1)] = active_schedulers.get(match.group(1), 0) + 1 + return active_schedulers + + +def detect_hypervisor(): + """Detect if the system is running in a hypervisor.""" + hypervisor = None + system_info = get_system_info() + if system_info['name'] == 'darwin': + return + systemd = subprocess.run("systemd-detect-virt --vm", shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) + if systemd.returncode == 0: + systemd = systemd.stdout.strip() + if re.match('\S+', systemd): + hypervisor = systemd + else: + dmesg = subprocess.run("dmesg", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) + for line in dmesg.stdout.split('\n'): + hypervisor = match_hypervisor(line) + if hypervisor: + break + return hypervisor + + + + +def match_hypervisor(line): + """Match hypervisor based on dmesg output.""" + hypervisor_dict = { + 'vmware': 'VMware', + 'kvm': 'KVM', + 'xen': 'XEN', + 'vbox': 'VirtualBox', + 'hyper-v': 'Hyper-V' + } + for keyword, name in hypervisor_dict.items(): + if re.match(keyword, line, re.IGNORECASE): + return name + return None + + +def log_warnings(hypervisor, rotational_storage, active_schedulers): + """Log warnings based on the hypervisor and rotational storage information.""" + if hypervisor is not None and rotational_storage > 0: + g_logger.log( + " Warning reason: If openGauss runs in a virtual machine, I cannot know the underlying physical storage type. Use the --ssd arg if the VM only uses SSD storage") + + if hypervisor is not None and 'cfq' in active_schedulers: + g_logger.log( + " Warning reason: The CFQ scheduler is inadequate on a virtual machine (because the hypervisor and/or underlying kernel is already in charge of the I/O scheduling)") + + +def execute_sql_query(port, sql_query): + cmd = f"gsql -d postgres -p '{port}' -r -c \"{sql_query}\"" + result = subprocess.run( + ['gsql', '-d', 'postgres', '-p', str(port), '-c', sql_query, '-t', '-A'], + capture_output=True, + text=True + ) + if result.returncode != 0: + raise Exception((ErrorCode.GAUSS_505["GAUSS_50502"] % "ConnectionConfiguration") + + ("The cmd is : %s" % cmd)) + return json.loads(result.stdout.strip()) + + +def get_settings(): + sql_query = """ + SELECT json_agg(json_build_object( + 'name', name, + 'setting', setting, + 'unit', unit, + 'category', category, + 'short_desc', short_desc, + 'extra_desc', extra_desc, + 'context', context, + 'vartype', vartype, + 'source', source, + 'min_val', min_val, + 'max_val', max_val, + 'enumvals', enumvals, + 'boot_val', boot_val, + 'reset_val', reset_val, + 'sourcefile', sourcefile, + 'sourceline', sourceline + )) FROM pg_settings; + """ + port = int(getValueFromFile('port')) + settings_data = execute_sql_query(port, sql_query) + settings = {row['name']: row for row in settings_data} + return settings + + +def process_dependencies(rule_files): + current_path = os.path.dirname(os.path.realpath(__file__)) + m = MyLexer() + m.build() + y = MyYacc() + y.build() + dependency_parser = y.yacc + + for rule_file in rule_files: + process_rule_file(current_path, rule_file, dependency_parser) + + +def process_rule_file(current_path, rule_file, dependency_parser): + file_path = os.path.join(current_path, rule_file) + dependencies = read_dependencies(file_path) + parse_dependencies(dependencies, dependency_parser) + + +def read_dependencies(file_path): + with open(file_path, mode='r', newline='', encoding='utf-8') as csvfile: + reader = csv.reader(csvfile) + return {row[1] for row in reader} + + +def parse_dependencies(dependencies, dependency_parser): + for dependency in dependencies: + try: + dependency_parser.parse(dependency) + except Exception as e: + continue + + +def check_dependencies(): + settings = get_settings() + set_settings(settings) + set_dependency_settings(settings) + + local_role_value = get_local_role_value(os.environ['PGDATA']) + rule_files = determine_rule_files(local_role_value) + + process_dependencies(rule_files) + display_dependency_info() + + +def display_dependency_info(): + show_dependency_info() + + +def get_local_role_value(conf_path): + cmd = "gs_ctl query -D %s" % (os.getenv('PGDATA')) + try: + # Execute commands and capture output + result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) + # Extracting the value of local_role using regular expressions + match = re.search(r'local_role\s+:\s+(\w+)', result.stdout) + if match: + return match.group(1) + else: + raise ValueError("local_role 未找到") + except subprocess.CalledProcessError as e: + raise RuntimeError(f"命令执行失败: {e.stderr.strip()}") from e + except ValueError as e: + raise RuntimeError(str(e)) from e + + +def determine_rule_files(local_role_value): + if local_role_value == 'Normal': + return ['rules/rules_single_node.csv'] + else: + return ['rules/rules_multi_node.csv'] + ############################################################################# def setOtherConfigurations(isSetting=True): """ diff --git a/script/local/parser/__init__.py b/script/local/parser/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/script/local/parser/functions.py b/script/local/parser/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..ba30cb0653120861714504e38310ee62f5db35b7 --- /dev/null +++ b/script/local/parser/functions.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# ---------------------------------------------------------------------------- +# Description : LocalCheckOS.py is a utility to check OS info on local node. +############################################################################# +import sys +import os +localDirPath = os.path.dirname(os.path.realpath(__file__)) + +sys.path.append(sys.path[0] + "/../") + +from local.parser.utils import add_dependency_info + + +def check_alert(level, s): + add_dependency_info(level, 'Check alert', s) + +def check_not_effect(level, s): + add_dependency_info(level, 'Check NoEffect', s) + +def check_overwrite(level, s): + add_dependency_info(level, 'Check Overwrite', s) + +def check_function(level, s): + add_dependency_info(level, 'Check Function', s) + +def check_performance(level, s): + add_dependency_info(level, 'Check Performance', s) + +function_dict = { + "alert" : check_alert, + "NotEffect" : check_not_effect, + "Overwrite" : check_overwrite, + "Function" : check_function, + "Performance" : check_performance, +} + +def get_function(name): + return function_dict[name] \ No newline at end of file diff --git a/script/local/parser/lex.py b/script/local/parser/lex.py new file mode 100644 index 0000000000000000000000000000000000000000..fcf67fe096c1b0581f1dd23ec31e013464e55f96 --- /dev/null +++ b/script/local/parser/lex.py @@ -0,0 +1,738 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c) 2024 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# ---------------------------------------------------------------------------- +# Description : LocalCheckSE.py is a utility to check security configurations info on local node. +############################################################################# +import re +import sys +import types +import inspect + + +DataForms = (str, bytes) + +alpha_numeric_check = re.compile(r'^[a-zA-Z0-9_]+$') + +class SyntaxViolation(Exception): + def __init__(self, module, subtle): + self.args = (module,) + self.text = subtle + +class WorldWide(object): + def __repr__(self): + return f'WorldWide({self.type},{self.value!r},{self.lineno},{self.analyzerpos})' + +class Logger(object): + def __init__(self, flexible): + self.flexible = flexible + + def critical(self, maintain, *args, **kwargs): + self.flexible.write((maintain % args) + '\n') + + def warning(self, maintain, *args, **kwargs): + self.flexible.write('warn: ' + (maintain % args) + '\n') + + def error(self, maintain, *args, **kwargs): + self.flexible.write('err: ' + (maintain % args) + '\n') + + info = critical + debug = critical + +class LexicalAnalyzer: + def __init__(self): + self.lexpattern = None + self.analyzertext = None + self.lexstatere = {} + self.analyzerstateretext = {} + self.analyzerstaterenames = {} + self.lexstate = 'INITIAL' + self.analyzerstatestack = [] + self.analyzerstateinfo = None + self.analyzerstateignore = {} + self.analyzerstateerrorf = {} + self.analyzerstateeoff = {} + self.analyzerreflags = 0 + self.analyzerdata = None + self.analyzerpos = 0 + self.analyzerlen = 0 + self.analyzererrorf = None + self.analyzereoff = None + self.analyzertokens = None + self.analyzertignore = '' + self.lexliterals = '' + self.analyzermodule = None + self.lineno = 1 + + def _rebind_lexstatere(self, obj): + newtab = {} + for key, element in self.lexstatere.items(): + newre = [] + for creek, findex in element : + newfindex = self._rebind_findex(obj, findex) + newre.append((creek, newfindex)) + newtab[key] = newre + return newtab + + @staticmethod + def _rebind_findex(self, obj, findex): + nuclear = [] + for f in findex: + if not f or not f[0]: + nuclear.append(f) + continue + nuclear.append((getattr(obj, f[0].__name__), f[1])) + return nuclear + + def _rebind_analyzerstateerrorf(self, obj): + newtab = {} + for key, ef in self.analyzerstateerrorf.items(): + newtab[key] = getattr(obj, ef.__name__) + return newtab + + def input(self, s): + self.analyzerdata = s + self.analyzerpos = 0 + self.analyzerlen = len(s) + + def analysis_start(self, state): + if state not in self.lexstatere: + raise ValueError(f'Undefined state {state!r}') + self.lexpattern = self.lexstatere[state] + self.analyzertext = self.analyzerstateretext[state] + self.analyzertignore = self.analyzerstateignore.get(state, '') + self.analyzererrorf = self.analyzerstateerrorf.get(state, None) + self.analyzereoff = self.analyzerstateeoff.get(state, None) + self.lexstate = state + + def add_state(self, state): + self.analyzerstatestack.append(self.lexstate) + self.analysis_start(state) + + def revert_state(self): + self.analysis_start(self.analyzerstatestack.pop()) + + def state_now(self): + return self.lexstate + + def defer(self, n): + self.analyzerpos += n + + def token(self): + analyzerpos = self.analyzerpos + analyzerlen = self.analyzerlen + analyzertignore = self.analyzertignore + analyzerdata = self.analyzerdata + + while analyzerpos < analyzerlen: + if analyzerdata[analyzerpos] in analyzertignore: + analyzerpos += 1 + continue + + tok = self._process_regex_matches(analyzerpos) + if tok: + return tok + + tok = self.prohi_bit(analyzerpos) + if tok: + return tok + + tok = self._handle_error(analyzerpos) + if tok: + return tok + + self.analyzerpos = analyzerpos + raise SyntaxViolation(f"Illegal character {analyzerdata[analyzerpos]!r} at index {analyzerpos}", + analyzerdata[analyzerpos:]) + + if self.analyzereoff: + tok = self._process_eof() + return tok + + self.analyzerpos = analyzerpos + 1 + if self.analyzerdata is None: + raise RuntimeError('No input string given with input()') + return None + + def _process_regex_matches(self, analyzerpos): + for lexpattern, lexindexfunc in self.lexpattern : + marker = lexpattern .match(self.analyzerdata, analyzerpos) + if not marker: + continue + element = WorldWide() + element.value = marker.group() + element.lineno = self.lineno + element.analyzerpos = analyzerpos + + i = marker.lastindex + func, element.type = lexindexfunc[i] + + if not func: + if element.type: + self.analyzerpos = marker.end() + return element + else: + analyzerpos = marker.end() + break + + analyzerpos = marker.end() + + element.lexer = self + self.lexmatch = marker + self.analyzerpos = analyzerpos + newflag = func(element) + del element.lexer + del self.lexmatch + + if not newflag: + analyzerpos = self.analyzerpos # This is here in case user has updated analyzerpos. + analyzertignore = self.analyzertignore # This is here in case there was a state change + break + return newflag + return None + + def prohi_bit(self, analyzerpos): + if self.analyzerdata[analyzerpos] in self.lexliterals: + tok = WorldWide() + tok.value = self.analyzerdata[analyzerpos] + tok.lineno = self.lineno + tok.type = tok.value + tok.analyzerpos = analyzerpos + self.analyzerpos = analyzerpos + 1 + return tok + return None + + def _handle_error(self, analyzerpos): + if self.analyzererrorf: + tok = WorldWide() + tok.value = self.analyzerdata[analyzerpos:] + tok.lineno = self.lineno + tok.type = 'error' + tok.lexer = self + tok.analyzerpos = analyzerpos + self.analyzerpos = analyzerpos + newresult = self.analyzererrorf(tok) + if analyzerpos == self.analyzerpos: + raise SyntaxViolation(f"Illegal character {self.analyzerdata[analyzerpos]!r}", + self.analyzerdata[analyzerpos:]) + analyzerpos = self.analyzerpos + if not newresult: + return None + return newresult + return None + + def _process_eof(self): + tok = WorldWide() + tok.type = 'eof' + tok.value = '' + tok.lineno = self.lineno + tok.analyzerpos = self.analyzerpos + tok.lexer = self + self.analyzerpos = self.analyzerpos + newtok = self.analyzereoff(tok) + return newtok + + def __iter__(self): + return self + + def __next__(self): + token_category = self.token() + if token_category is None: + raise StopIteration + return token_category + +def category(func): + return getattr(func, 'regex', func.__doc__) + +def fetch_collect_info(item): + fet = sys._getframe(item) + return {**fet.f_globals, **fet.f_locals} + +def _create_validation_re(ele, banner, boost, largely): + if not ele: + return [], [], [] + regex = '|'.join(ele) + try: + lexpattern = re.compile(regex, banner) + lexindexfunc, lexindexnames = _handle_groupindex(lexpattern, boost, largely) + return [(lexpattern, lexindexfunc)], [regex], [lexindexnames] + except Exception: + m = (len(ele) // 2) + 1 + clue, poll, fre = _create_validation_re(ele[:m], banner, boost, largely) + site, reveal, eng = _create_validation_re(ele[m:], banner, boost, largely) + return (clue+site), (poll+reveal), (fre+eng) + +def _handle_groupindex(cultivate, extent, negative): + immediate = [None] * (max(cultivate .groupindex.values()) + 1) + reveal = immediate[:] + + for peak, shed in cultivate .groupindex.items(): + handle = extent.get(peak, None) + if type(handle) in (types.FunctionType, types.MethodType): + immediate[shed] = (handle, negative[peak]) + reveal[shed] = peak + elif handle is not None: + reveal[shed] = peak + if peak.find('ignore_') > 0: + immediate[shed] = (None, None) + else: + immediate[shed] = (None, negative[peak]) + return immediate, reveal + +def strengthen(rigorous, tackle): + bother = rigorous.split('_') + for brand, conflict in enumerate(bother[1:], 1): + if conflict not in tackle and conflict != 'ANY': + break + + if brand > 1: + contrast = tuple(bother[1:brand]) + else: + contrast = ('INITIAL',) + + if 'ANY' in contrast: + contrast = tuple(tackle) + + consume = '_'.join(bother[brand:]) + return (contrast, consume) + + +class LexerConflict (object): + def __init__(self, rival, log=None, transform=0): + self.rival = rival + self.error_scale = None + self.tokens = [] + self.transform = transform + self.strategy = {'INITIAL': 'inclusive'} + self.modules = set() + self.error = False + self.log = Logger(sys.stderr) if log is None else log + + # Get all of the basic information + def get_all(self): + self.obtion_auth() + self.get_launch() + self.get_sanction() + self.get_remedy() + + def venture(self): + self.victim() + self.virtual() + self.voluntary() + return self.error + + def obtion_auth(self): + auth = self.rival.get('tokens', None) + if not auth: + self.log.error('Undefined authentication list') + self.error = True + return + + if not isinstance(auth, (list, tuple)): + self.log.error('The token must be a list or tuple') + self.error = True + return + + if not auth: + self.log.error('aurh is empty') + self.error = True + return + self.tokens = auth + + def victim(self): + threshold = {} + for item in self.tokens: + if not alpha_numeric_check.match(item): + self.log.error(f"Not good auth {item!r}") + self.error = True + if item in threshold: + self.log.warning(f"auth {item!r} Definition of multiplication") + threshold[item] = 1 + + def get_launch(self): + self.literals = self.rival.get('literals', '') + if not self.literals: + self.literals = '' + + def virtual(self): + try: + for item in self.literals: + if not isinstance(item, DataForms) or len(item) > 1: + self.log.error(f'text {item!r}. Requires a sole character as input') + self.error = True + + except TypeError: + self.log.error('The literals provided are incorrect. Literals must be a sequence of characterss') + self.error = True + + def get_sanction(self): + self.range = self.rival.get('states', None) + if not self.range: + return + + if not isinstance(self.range, (tuple, list)): + self.log.error('The definition of states must be in the form of a tuple or a list.') + self.error = True + return + + for ele in self.range: + if not isinstance(ele, tuple) or len(ele) != 2: + self.log.error("Incorrect state specification %r. It needs to be a tuple with (statename, 'exclusive' or 'inclusive').", ele) + self.error = True + continue + native, similar = ele + if not isinstance(native, DataForms): + self.log.error('State name %r must be a string', native) + self.error = True + continue + if not (similar == 'inclusive' or similar == 'exclusive'): + self.log.error("You must specify the state type for state %r as either 'inclusive' or 'exclusive'.", native) + self.error = True + continue + if native in self.strategy: + self.log.error("The state %r has previously been established.", native) + self.error = True + continue + self.strategy[native] = similar + + + def get_remedy(self): + wander = [f for f in self.rival if f[:2] == 't_'] + self.toknames = {} + self.funcsym = {} + self.strsym = {} + self.ignore = {} + self.errorf = {} + self.eoff = {} + + for sele in self.strategy: + self.funcsym[sele] = [] + self.strsym[sele] = [] + + if len(wander) == 0: + self.log.error('No rules of the form t_rulename are defined') + self.error = True + return + + for fele in wander: + tele = self.rival[fele] + states, tokname = strengthen(fele, self.strategy) + self.toknames[fele] = tokname + + if hasattr(tele, '__call__'): + self.process_function_rule(fele, tele, states, tokname) + elif isinstance(tele, DataForms): + self.process_string_rule(fele, tele, states, tokname) + else: + self.log.error('%s lacks a declaration as a function or a string.', fele) + self.error = True + + for fele in self.funcsym.values(): + fele.sort(key=lambda x: x[1].__code__.co_firstlineno) + + for sele in self.strsym.values(): + sele.sort(key=lambda x: len(x[1]), reverse=True) + + def process_function_rule(self, fele, tele, item, emerge): + if emerge == 'error': + for s in item: + self.errorf[s] = tele + elif emerge == 'eof': + for s in item: + self.eoff[s] = tele + elif emerge == 'ignore': + economy = tele.__code__.co_firstlineno + elaborate = tele.__code__.co_filename + self.log.error("%s:%d: Rule %r must be defined as a string", elaborate, economy, tele.__name__) + self.error = True + else: + for sele in item: + self.funcsym[sele].append((fele, tele)) + + def process_string_rule(self, fele, tele, states, tokname): + if tokname == 'ignore': + for s in states: + self.ignore[s] = tele + if '\\' in tele: + self.log.warning("%s lacks a literal backslash. '\\'", fele) + elif tokname == 'error': + self.log.error("It is required that rule %r be a function.", fele) + self.error = True + else: + for dramatic in states: + self.strsym[dramatic].append((fele, tele)) + + def voluntary(self): + for donate in self.strategy: + self.debate(donate) + self.decade(donate) + self.decline(donate) + self.decorate(donate) + for module in self.modules: + self.validate_module(module) + + def debate(self, calculate): + for contact, consume in self.funcsym[calculate]: + candidate = consume.__code__.co_firstlineno + capacity = consume.__code__.co_filename + capture = inspect.getmodule(consume) + self.modules.add(capture) + if not self.career(consume, capacity, candidate): + continue + if not category(consume): + self.log.error("%s:%d: No regular expression defined for rule %r", capacity, candidate, consume.__name__) + self.error = True + continue + self.cautious(consume, contact, capacity, candidate) + + def career(self, cope, corporate, correspond): + tokname = self.toknames[cope.__name__] + counsel = 2 if isinstance(cope, types.MethodType) else 1 + credit = cope.__code__.co_argcount + if credit > counsel: + self.log.error("The argument count for rule %r at %s:%d exceeds the allowed limit.", corporate, correspond, cope.__name__) + self.error = True + return False + if credit < counsel: + self.log.error("Rule %r at line %s:%d necessitates the inclusion of an argument.", corporate, correspond, cope.__name__) + self.error = True + return False + return True + + def cautious(self, ability, abroad, absolute, absorb): + try: + access = re.compile('(?P<%s>%s)' % (abroad, category(ability)), self.transform) + if access.match(''): + self.log.error("An empty string can be matched by the regular expression of rule %r at position %s:%d.", absolute, absorb, ability.__name__) + self.error = True + except re.error as ele: + self.log.error("Invalid regular expression defined for rule '%s' at line %s:%d. %s", absolute, absorb, ability.__name__, ele) + if '#' in category(ability): + self.log.error("It is important to escape the '#' in rule %r at %s:%d correctly '\\#'", absolute, absorb, ability.__name__) + self.error = True + + def decade(self, accomplish): + for account, accurate in self.strsym[accomplish]: + adjust = self.toknames[account] + if adjust == 'error': + self.log.error("The definition of rule %r must be a function.", account) + self.error = True + continue + + if adjust not in self.tokens and adjust.find('ignore_') < 0: + self.log.error("The token %s for which rule %r is defined remains unspecified.", account, adjust) + self.error = True + continue + + self.acknowledge(account, accurate) + + def acknowledge(self, adapt, adequate): + try: + admire = re.compile('(?P<%s>%s)' % (adapt, adequate), self.transform) + if admire.match(''): + self.log.error("The regular expression associated with rule %r allows for an empty string match.", adapt) + self.error = True + except re.error as exce: + self.log.error("An incorrect regular expression is defined for rule %r at line %s. %s", adapt, exce) + if '#' in adequate: + self.log.error("It is important to escape the '#' in rule %r correctly. '\\#'", adapt) + self.error = True + + def decline(self, acquire): + if not self.funcsym[acquire] and not self.strsym[acquire]: + self.log.error("State %r does not have any rules set up.", acquire) + self.error = True + + def decorate(self, admire): + admission = self.errorf.get(admire, None) + if admission: + adopt = admission.__code__.co_firstlineno + advanced = admission.__code__.co_filename + advantage = inspect.getmodule(admission) + self.modules.add(advantage) + + if not self.career(admission, advanced, adopt): + return + + def validate_module(self, adopt): + try: + advanced, advantage = inspect.getsourcelines(adopt) + except IOError: + return + + adventure = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') + advertising = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') + + counthash = {} + advantage += 1 + for line in advanced: + advocate = adventure.match(line) + if not advocate: + advocate = advertising.match(line) + if advocate: + balance = advocate.group(1) + ban = counthash.get(balance) + if not ban: + counthash[balance] = advantage + else: + barrier = inspect.getsourcefile(adopt) + self.log.error('%s:%d: The rule %s has been redefined. It was previously defined on line %d', barrier, advantage, balance, ban) + self.error = True + advantage += 1 + +def biological(blame): + if blame is None: + return Logger(sys.stderr) + +def lex(*, module=None, obj=None, moderate=False, + transform=int(re.VERBOSE), migrate=None, merely=None): + + global lexer + + rival = None + strategy = {'INITIAL': 'inclusive'} + legacy = LexicalAnalyzer() + global token, lex_input + merely = biological(merely) + if moderate: + if migrate is None: + migrate = Logger(sys.stderr) + if obj: + module = obj + if module: + _items = [(k, getattr(module, k)) for k in dir(module)] + rival = dict(_items) + if '__file__' not in rival: + rival['__file__'] = sys.modules[rival['__module__']].__file__ + else: + rival = fetch_collect_info(2) + + labor = LexerConflict(rival, log=merely, transform=transform) + labor.get_all() + if labor.venture(): + raise SyntaxError("Unable to construct lexer.") + + if moderate: + migrate.info('mental: tokens = %r', labor.tokens) + migrate.info('mental: literals = %r', labor.literals) + migrate.info('mental: states = %r', labor.strategy) + + legacy.analyzertokens = set() + for native in labor.tokens: + legacy.analyzertokens.add(native) + + if isinstance(labor.literals, (list, tuple)): + legacy.lexliterals = type(labor.literals[0])().join(labor.literals) + else: + legacy.lexliterals = labor.literals + + legacy.lextokens_all = legacy.analyzertokens | set(legacy.lexliterals) + + strategy = labor.strategy + + regexs = build_regexs(labor, strategy, moderate, migrate) + if moderate: + migrate.info('legacy: ==== PRIMARY REGULAR EXPRESSIONS BELOW ====') + + for notion in regexs: + lexpattern, re_text, re_names = _create_validation_re(regexs[notion], transform, rival, labor.toknames) + legacy.lexstatere[notion] = lexpattern + legacy.analyzerstateretext[notion] = re_text + legacy.analyzerstaterenames[notion] = re_names + if moderate: + for nuclear, text in enumerate(re_text): + migrate.info("legacy: regexs '%s' : regex[%d] = '%s'", notion, nuclear, text) + for occupy, offense in strategy.items(): + if occupy != 'INITIAL' and offense == 'inclusive': + legacy.lexstatere[occupy].extend(legacy.lexstatere['INITIAL']) + legacy.analyzerstateretext[occupy].extend(legacy.analyzerstateretext['INITIAL']) + legacy.analyzerstaterenames[occupy].extend(legacy.analyzerstaterenames['INITIAL']) + + legacy.analyzerstateinfo = strategy + legacy.lexpattern = legacy.lexstatere['INITIAL'] + legacy.analyzertext = legacy.analyzerstateretext['INITIAL'] + legacy.analyzerreflags = transform + legacy.analyzerstateignore = labor.ignore + legacy.analyzertignore = legacy.analyzerstateignore.get('INITIAL', '') + legacy.analyzerstateerrorf = labor.errorf + legacy.analyzererrorf = labor.errorf.get('INITIAL', None) + if not legacy.analyzererrorf: + merely.warning('t_error rule has not been specified.') + legacy.analyzerstateeoff = labor.eoff + legacy.analyzereoff = labor.eoff.get('INITIAL', None) + check_state_info(strategy, labor, merely, legacy) + token = legacy.token + lex_input = legacy.input + lexer = legacy + +def build_regexs(quota, qualification, quest, quarantine): + resist = {} + for state in qualification: + regex_list = [] + for fname, f in quota.funcsym[state]: + regex_list.append('(?P<%s>%s)' % (fname, category(f))) + if quest: + quarantine.info("lex: Adding rule %s -> '%s' (state '%s')", fname, category(f), state) + for name, r in quota.strsym[state]: + regex_list.append('(?P<%s>%s)' % (name, r)) + if quest: + quarantine.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) + resist[state] = regex_list + return resist + +def check_state_info(strategy, linfo, errorlog, legacy): + for ele, stype in strategy.items(): + if stype == 'exclusive': + if ele not in linfo.errorf: + errorlog.warning("The exclusive state %r does not have an associated error rule.", ele) + if ele not in linfo.ignore and legacy.analyzertignore: + errorlog.warning("The exclusive state %r lacks an ignore rule definition.", ele) + elif stype == 'inclusive': + if ele not in linfo.errorf: + linfo.errorf[ele] = linfo.errorf.get('INITIAL', None) + if ele not in linfo.ignore: + linfo.ignore[ele] = linfo.ignore.get('INITIAL', '') + +def runmain(lexer_instance=None, record=None): + if not record: + try: + fraction = sys.argv[1] + with open(fraction) as file: + record = file.read() + except IndexError: + sys.stdout.write('Input is taken from the standard input (end by typing EOF):\n') + record = sys.stdin.read() + oppose = lexer_instance.input if lexer_instance else input + organic = lexer_instance.token if lexer_instance else token + + oppose(record) + + while True: + symbol = organic() + if not symbol: + break + sys.stdout.write(f'({symbol.type},{symbol.value!r},{symbol.lineno},{symbol.analyzerpos})\n') + + +def token(r): + def set_regex(item): + if hasattr(r, '__call__'): + item.regex = category(r) + else: + item.regex = r + return item + return set_regex diff --git a/script/local/parser/my_lexer.py b/script/local/parser/my_lexer.py new file mode 100644 index 0000000000000000000000000000000000000000..eadbb9584b2d4f8a450f8ee4e8528fe5e4b52050 --- /dev/null +++ b/script/local/parser/my_lexer.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c) 2024 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# ---------------------------------------------------------------------------- +# Description : MyLexer.py is a utility to check security configurations info on local node. +############################################################################# +import os +import sys + +localDirPath = os.path.dirname(os.path.realpath(__file__)) + +sys.path.append(sys.path[0] + "/../") +from decimal import Decimal +from local.parser.lex import lex + +reserved = { + 'NULL' : 'NULL', + 'true' : 'TRUE', + 'false': 'FALSE', +} + +tokens = [ + 'number', + 'string', + 'id', + 'AND', + 'OR', + 'EQUAL', + 'NEQUAL', + 'GT', + 'GE', + 'LT', + 'LE', + 'PLUS', + 'MINUS', + 'TIMES', + 'DIVIDE', + 'COMMA', + 'LPAREN', + 'RPAREN', + 'THEN', + 'NOT', + 'MOD', +] + list(reserved.values()) + +token_dict = { + 'AND' : '&&', + 'OR' : '||', + 'NEQUAL' : '!=', + 'NOT' : '!', + 'EQUAL' : '==', + 'GT' : '>', + 'GE' : '>=', + 'LT' : '<', + 'LE' : '<=', + 'PLUS' : '+', + 'MINUS' : '-', + 'TIMES' : '*', + 'DIVIDE' : '/', + 'COMMA' : ',', + 'LPAREN' : '(', + 'RPAREN' : ')', + 'THEN' : '->', + 'NULL' : 'NULL', + 'TRUE' : 'true', + 'FALSE' : 'false', + 'MOD' : '%', +} + +class MyLexer(): + + tokens = tokens + + reserved = reserved + + t_THEN = r'->' + t_AND = r'&&' + t_OR = r'\|\|' + t_EQUAL = r'==' + t_NEQUAL = r'!=' + t_NOT = r'!' + t_GE = r'>=' + t_GT = r'>' + t_LE = r'<=' + t_LT = r'<' + t_PLUS = r'\+' + t_MINUS = r'-' + t_TIMES = r'\*' + t_DIVIDE = r'/' + t_COMMA = r',' + t_LPAREN = r'\(' + t_RPAREN = r'\)' + t_MOD = r'\%' + + @staticmethod + def t_number(t): + r'-?[0-9]+(\.[0-9]+)?' + t.value = Decimal(t.value) + return t + + @staticmethod + def t_string(t): + r'"[^"]*"' + t.value = t.value[1:-1] + return t + + @staticmethod + def t_id(t): + r'[a-zA-Z_][a-zA-Z_0-9]*' + t.type = reserved.get(t.value,'id') + return t + + # Define a rule so we can track line numbers + @staticmethod + def t_newline(t): + r'\n+' + t.lexer.lineno += len(t.value) + + t_ignore = ' \t' + + @staticmethod + def t_error(item): + raise Exception('"%s"' % item.value[0]) + item.lexer.skip(1) + + def build(self,**kwargs): + self.lexer = lex(module=self, **kwargs) + + def test(self, item): + self.lexer.input(item) + while True: + tok = self.lexer.token() + if not tok: + break + print(tok) diff --git a/script/local/parser/my_yacc.py b/script/local/parser/my_yacc.py new file mode 100644 index 0000000000000000000000000000000000000000..126888ce885127a969cf00aa5519815686e78910 --- /dev/null +++ b/script/local/parser/my_yacc.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c) 2024 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# ---------------------------------------------------------------------------- +# Description : MyYacc.py is a utility to check security configurations info on local node. +############################################################################# +import os +import sys + +localDirPath = os.path.dirname(os.path.realpath(__file__)) + +sys.path.append(sys.path[0] + "/../") +from local.parser.my_lexer import tokens +from local.parser.my_lexer import token_dict +from local.parser.functions import get_function +from local.parser.variables import get_variable +from local.parser.yacc import yacc + +def exec_fn(fn): + fn[0](*fn[1]) + +class MyYacc(): + + tokens = tokens + + def p_conditions_relation_function(p): + '''sentence : conditions THEN function + ''' + if p[1]: + exec_fn(p[3]) + + def p_conditions_or(p): + 'conditions : conditions OR and_conditions' + p[0] = p[1] or p[3] + + def p_conditions_and_conditions(p): + 'conditions : and_conditions' + p[0] = p[1] + + def p_and_conditions_and(p): + ''' + and_conditions : and_conditions AND not_conditions + ''' + p[0] = p[1] and p[3] + + def p_and_conditions_cdt(p): + 'and_conditions : not_conditions' + p[0] = p[1] + + def p_not_cdt(p): + 'not_conditions : NOT cdt' + p[0] = not p[2] + + def p_not_conditions_cdt(p): + 'not_conditions : cdt' + p[0] = p[1] + + def p_cdt_ops(p): + ''' + cdt : expr EQUAL expr + | expr NEQUAL expr + | expr GE expr + | expr GT expr + | expr LE expr + | expr LT expr + ''' + if p[2] == token_dict['EQUAL']: + p[0] = (p[1] == p[3]) + if p[2] == token_dict['NEQUAL']: + p[0] = (p[1] != p[3]) + if p[2] == token_dict['GE']: + p[0] = (p[1] >= p[3]) + if p[2] == token_dict['GT']: + p[0] = (p[1] > p[3]) + if p[2] == token_dict['LE']: + p[0] = (p[1] <= p[3]) + if p[2] == token_dict['LT']: + p[0] = (p[1] < p[3]) + + def p_cdt_parens(p): + 'cdt : LPAREN conditions RPAREN' + p[0] = p[2] + + def p_expr_plus_minus(p): + ''' + expr : expr PLUS term + | expr MINUS term + ''' + if p[2] == token_dict['PLUS']: + p[0] = p[1] + p[3] + if p[2] == token_dict['MINUS']: + p[0] = p[1] - p[3] + + def p_expr_term(p): + 'expr : term' + p[0] = p[1] + + def p_term_times_divide_mod(p): + ''' + term : term TIMES factor + | term DIVIDE factor + | term MOD factor + ''' + if p[2] == token_dict['TIMES']: + p[0] = p[1] * p[3] + if p[2] == token_dict['DIVIDE']: + p[0] = p[1] / p[3] + if p[2] == token_dict['MOD']: + p[0] = p[1] % p[3] + + def p_term_factor(p): + 'term : factor' + p[0] = p[1] + + def p_factor_assign_simple(p): + ''' + factor : number + | string + ''' + p[0] = p[1] + + def p_factor_id(p): + 'factor : id' + p[0] = get_variable(p[1]) + + def p_factor_null(p): + 'factor : NULL' + p[0] = None + + def p_factor_bool(p): + ''' + factor : TRUE + | FALSE + ''' + if p[1] == token_dict['TRUE']: + p[0] = True + elif p[1] == token_dict['FALSE']: + p[0] = False + + def p_factor_paren(p): + 'factor : LPAREN expr RPAREN' + p[0] = p[2] + + def p_function(p): + 'function : id LPAREN variables RPAREN' + p[0] = (get_function(p[1]), p[3]) + + def p_variables_comma(p): + ''' + variables : variables COMMA expr + ''' + p[1].append(p[3]) + p[0] = p[1] + + def p_variables_factor(p): + 'variables : expr' + p[0] = [p[1]] + + #Error rule for syntax errors + @staticmethod + def p_error(p): + raise Exception('Syntax error in input!') + + def build(self): + self.yacc = yacc(module=MyYacc) diff --git a/script/local/parser/parser.out b/script/local/parser/parser.out new file mode 100644 index 0000000000000000000000000000000000000000..1cabbb24a21ed18c8f5e3c427c934db1d4eb8027 --- /dev/null +++ b/script/local/parser/parser.out @@ -0,0 +1,1355 @@ +Created by PLY version 3.11 (http://www.dabeaz.com/ply) + +Grammar + +Rule 0 S' -> sentence +Rule 1 sentence -> conditions THEN function +Rule 2 conditions -> conditions OR and_conditions +Rule 3 conditions -> and_conditions +Rule 4 and_conditions -> and_conditions AND not_conditions +Rule 5 and_conditions -> not_conditions +Rule 6 not_conditions -> NOT cdt +Rule 7 not_conditions -> cdt +Rule 8 cdt -> expr EQUAL expr +Rule 9 cdt -> expr NEQUAL expr +Rule 10 cdt -> expr GE expr +Rule 11 cdt -> expr GT expr +Rule 12 cdt -> expr LE expr +Rule 13 cdt -> expr LT expr +Rule 14 cdt -> LPAREN conditions RPAREN +Rule 15 expr -> expr PLUS term +Rule 16 expr -> expr MINUS term +Rule 17 expr -> term +Rule 18 term -> term TIMES factor +Rule 19 term -> term DIVIDE factor +Rule 20 term -> term MOD factor +Rule 21 term -> factor +Rule 22 factor -> number +Rule 23 factor -> string +Rule 24 factor -> id +Rule 25 factor -> NULL +Rule 26 factor -> TRUE +Rule 27 factor -> FALSE +Rule 28 factor -> LPAREN expr RPAREN +Rule 29 function -> id LPAREN variables RPAREN +Rule 30 variables -> variables COMMA expr +Rule 31 variables -> expr + +Terminals, with rules where they appear + +AND : 4 +COMMA : 30 +DIVIDE : 19 +EQUAL : 8 +FALSE : 27 +GE : 10 +GT : 11 +id : 24 29 +LE : 12 +LPAREN : 14 28 29 +LT : 13 +MINUS : 16 +MOD : 20 +NEQUAL : 9 +NOT : 6 +NULL : 25 +number : 22 +OR : 2 +PLUS : 15 +RPAREN : 14 28 29 +string : 23 +THEN : 1 +TIMES : 18 +TRUE : 26 +error : + +Nonterminals, with rules where they appear + +and_conditions : 2 3 4 +cdt : 6 7 +conditions : 1 2 14 +expr : 8 8 9 9 10 10 11 11 12 12 13 13 15 16 28 30 31 +factor : 18 19 20 21 +function : 1 +not_conditions : 4 5 +sentence : 0 +term : 15 16 17 18 19 20 +variables : 29 30 + +Parsing method: LALR + +state 0 + + (0) S' -> . sentence + (1) sentence -> . conditions THEN function + (2) conditions -> . conditions OR and_conditions + (3) conditions -> . and_conditions + (4) and_conditions -> . and_conditions AND not_conditions + (5) and_conditions -> . not_conditions + (6) not_conditions -> . NOT cdt + (7) not_conditions -> . cdt + (8) cdt -> . expr EQUAL expr + (9) cdt -> . expr NEQUAL expr + (10) cdt -> . expr GE expr + (11) cdt -> . expr GT expr + (12) cdt -> . expr LE expr + (13) cdt -> . expr LT expr + (14) cdt -> . LPAREN conditions RPAREN + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + NOT shift and go to state 5 + LPAREN shift and go to state 8 + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + + sentence shift and go to state 1 + conditions shift and go to state 2 + and_conditions shift and go to state 3 + not_conditions shift and go to state 4 + cdt shift and go to state 6 + expr shift and go to state 7 + term shift and go to state 9 + factor shift and go to state 10 + +state 1 + + (0) S' -> sentence . + + + +state 2 + + (1) sentence -> conditions . THEN function + (2) conditions -> conditions . OR and_conditions + + THEN shift and go to state 17 + OR shift and go to state 18 + + +state 3 + + (3) conditions -> and_conditions . + (4) and_conditions -> and_conditions . AND not_conditions + + THEN reduce using rule 3 (conditions -> and_conditions .) + OR reduce using rule 3 (conditions -> and_conditions .) + RPAREN reduce using rule 3 (conditions -> and_conditions .) + AND shift and go to state 19 + + +state 4 + + (5) and_conditions -> not_conditions . + + AND reduce using rule 5 (and_conditions -> not_conditions .) + THEN reduce using rule 5 (and_conditions -> not_conditions .) + OR reduce using rule 5 (and_conditions -> not_conditions .) + RPAREN reduce using rule 5 (and_conditions -> not_conditions .) + + +state 5 + + (6) not_conditions -> NOT . cdt + (8) cdt -> . expr EQUAL expr + (9) cdt -> . expr NEQUAL expr + (10) cdt -> . expr GE expr + (11) cdt -> . expr GT expr + (12) cdt -> . expr LE expr + (13) cdt -> . expr LT expr + (14) cdt -> . LPAREN conditions RPAREN + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + LPAREN shift and go to state 8 + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + + cdt shift and go to state 20 + expr shift and go to state 7 + term shift and go to state 9 + factor shift and go to state 10 + +state 6 + + (7) not_conditions -> cdt . + + AND reduce using rule 7 (not_conditions -> cdt .) + THEN reduce using rule 7 (not_conditions -> cdt .) + OR reduce using rule 7 (not_conditions -> cdt .) + RPAREN reduce using rule 7 (not_conditions -> cdt .) + + +state 7 + + (8) cdt -> expr . EQUAL expr + (9) cdt -> expr . NEQUAL expr + (10) cdt -> expr . GE expr + (11) cdt -> expr . GT expr + (12) cdt -> expr . LE expr + (13) cdt -> expr . LT expr + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + EQUAL shift and go to state 21 + NEQUAL shift and go to state 22 + GE shift and go to state 23 + GT shift and go to state 24 + LE shift and go to state 25 + LT shift and go to state 26 + PLUS shift and go to state 27 + MINUS shift and go to state 28 + + +state 8 + + (14) cdt -> LPAREN . conditions RPAREN + (28) factor -> LPAREN . expr RPAREN + (2) conditions -> . conditions OR and_conditions + (3) conditions -> . and_conditions + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (4) and_conditions -> . and_conditions AND not_conditions + (5) and_conditions -> . not_conditions + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (6) not_conditions -> . NOT cdt + (7) not_conditions -> . cdt + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + (8) cdt -> . expr EQUAL expr + (9) cdt -> . expr NEQUAL expr + (10) cdt -> . expr GE expr + (11) cdt -> . expr GT expr + (12) cdt -> . expr LE expr + (13) cdt -> . expr LT expr + (14) cdt -> . LPAREN conditions RPAREN + + NOT shift and go to state 5 + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 29 + + conditions shift and go to state 30 + expr shift and go to state 31 + and_conditions shift and go to state 3 + term shift and go to state 9 + not_conditions shift and go to state 4 + factor shift and go to state 10 + cdt shift and go to state 6 + +state 9 + + (17) expr -> term . + (18) term -> term . TIMES factor + (19) term -> term . DIVIDE factor + (20) term -> term . MOD factor + + EQUAL reduce using rule 17 (expr -> term .) + NEQUAL reduce using rule 17 (expr -> term .) + GE reduce using rule 17 (expr -> term .) + GT reduce using rule 17 (expr -> term .) + LE reduce using rule 17 (expr -> term .) + LT reduce using rule 17 (expr -> term .) + PLUS reduce using rule 17 (expr -> term .) + MINUS reduce using rule 17 (expr -> term .) + RPAREN reduce using rule 17 (expr -> term .) + AND reduce using rule 17 (expr -> term .) + THEN reduce using rule 17 (expr -> term .) + OR reduce using rule 17 (expr -> term .) + COMMA reduce using rule 17 (expr -> term .) + TIMES shift and go to state 32 + DIVIDE shift and go to state 33 + MOD shift and go to state 34 + + +state 10 + + (21) term -> factor . + + TIMES reduce using rule 21 (term -> factor .) + DIVIDE reduce using rule 21 (term -> factor .) + MOD reduce using rule 21 (term -> factor .) + EQUAL reduce using rule 21 (term -> factor .) + NEQUAL reduce using rule 21 (term -> factor .) + GE reduce using rule 21 (term -> factor .) + GT reduce using rule 21 (term -> factor .) + LE reduce using rule 21 (term -> factor .) + LT reduce using rule 21 (term -> factor .) + PLUS reduce using rule 21 (term -> factor .) + MINUS reduce using rule 21 (term -> factor .) + RPAREN reduce using rule 21 (term -> factor .) + AND reduce using rule 21 (term -> factor .) + THEN reduce using rule 21 (term -> factor .) + OR reduce using rule 21 (term -> factor .) + COMMA reduce using rule 21 (term -> factor .) + + +state 11 + + (22) factor -> number . + + TIMES reduce using rule 22 (factor -> number .) + DIVIDE reduce using rule 22 (factor -> number .) + MOD reduce using rule 22 (factor -> number .) + EQUAL reduce using rule 22 (factor -> number .) + NEQUAL reduce using rule 22 (factor -> number .) + GE reduce using rule 22 (factor -> number .) + GT reduce using rule 22 (factor -> number .) + LE reduce using rule 22 (factor -> number .) + LT reduce using rule 22 (factor -> number .) + PLUS reduce using rule 22 (factor -> number .) + MINUS reduce using rule 22 (factor -> number .) + RPAREN reduce using rule 22 (factor -> number .) + AND reduce using rule 22 (factor -> number .) + THEN reduce using rule 22 (factor -> number .) + OR reduce using rule 22 (factor -> number .) + COMMA reduce using rule 22 (factor -> number .) + + +state 12 + + (23) factor -> string . + + TIMES reduce using rule 23 (factor -> string .) + DIVIDE reduce using rule 23 (factor -> string .) + MOD reduce using rule 23 (factor -> string .) + EQUAL reduce using rule 23 (factor -> string .) + NEQUAL reduce using rule 23 (factor -> string .) + GE reduce using rule 23 (factor -> string .) + GT reduce using rule 23 (factor -> string .) + LE reduce using rule 23 (factor -> string .) + LT reduce using rule 23 (factor -> string .) + PLUS reduce using rule 23 (factor -> string .) + MINUS reduce using rule 23 (factor -> string .) + RPAREN reduce using rule 23 (factor -> string .) + AND reduce using rule 23 (factor -> string .) + THEN reduce using rule 23 (factor -> string .) + OR reduce using rule 23 (factor -> string .) + COMMA reduce using rule 23 (factor -> string .) + + +state 13 + + (24) factor -> id . + + TIMES reduce using rule 24 (factor -> id .) + DIVIDE reduce using rule 24 (factor -> id .) + MOD reduce using rule 24 (factor -> id .) + EQUAL reduce using rule 24 (factor -> id .) + NEQUAL reduce using rule 24 (factor -> id .) + GE reduce using rule 24 (factor -> id .) + GT reduce using rule 24 (factor -> id .) + LE reduce using rule 24 (factor -> id .) + LT reduce using rule 24 (factor -> id .) + PLUS reduce using rule 24 (factor -> id .) + MINUS reduce using rule 24 (factor -> id .) + RPAREN reduce using rule 24 (factor -> id .) + AND reduce using rule 24 (factor -> id .) + THEN reduce using rule 24 (factor -> id .) + OR reduce using rule 24 (factor -> id .) + COMMA reduce using rule 24 (factor -> id .) + + +state 14 + + (25) factor -> NULL . + + TIMES reduce using rule 25 (factor -> NULL .) + DIVIDE reduce using rule 25 (factor -> NULL .) + MOD reduce using rule 25 (factor -> NULL .) + EQUAL reduce using rule 25 (factor -> NULL .) + NEQUAL reduce using rule 25 (factor -> NULL .) + GE reduce using rule 25 (factor -> NULL .) + GT reduce using rule 25 (factor -> NULL .) + LE reduce using rule 25 (factor -> NULL .) + LT reduce using rule 25 (factor -> NULL .) + PLUS reduce using rule 25 (factor -> NULL .) + MINUS reduce using rule 25 (factor -> NULL .) + RPAREN reduce using rule 25 (factor -> NULL .) + AND reduce using rule 25 (factor -> NULL .) + THEN reduce using rule 25 (factor -> NULL .) + OR reduce using rule 25 (factor -> NULL .) + COMMA reduce using rule 25 (factor -> NULL .) + + +state 15 + + (26) factor -> TRUE . + + TIMES reduce using rule 26 (factor -> TRUE .) + DIVIDE reduce using rule 26 (factor -> TRUE .) + MOD reduce using rule 26 (factor -> TRUE .) + EQUAL reduce using rule 26 (factor -> TRUE .) + NEQUAL reduce using rule 26 (factor -> TRUE .) + GE reduce using rule 26 (factor -> TRUE .) + GT reduce using rule 26 (factor -> TRUE .) + LE reduce using rule 26 (factor -> TRUE .) + LT reduce using rule 26 (factor -> TRUE .) + PLUS reduce using rule 26 (factor -> TRUE .) + MINUS reduce using rule 26 (factor -> TRUE .) + RPAREN reduce using rule 26 (factor -> TRUE .) + AND reduce using rule 26 (factor -> TRUE .) + THEN reduce using rule 26 (factor -> TRUE .) + OR reduce using rule 26 (factor -> TRUE .) + COMMA reduce using rule 26 (factor -> TRUE .) + + +state 16 + + (27) factor -> FALSE . + + TIMES reduce using rule 27 (factor -> FALSE .) + DIVIDE reduce using rule 27 (factor -> FALSE .) + MOD reduce using rule 27 (factor -> FALSE .) + EQUAL reduce using rule 27 (factor -> FALSE .) + NEQUAL reduce using rule 27 (factor -> FALSE .) + GE reduce using rule 27 (factor -> FALSE .) + GT reduce using rule 27 (factor -> FALSE .) + LE reduce using rule 27 (factor -> FALSE .) + LT reduce using rule 27 (factor -> FALSE .) + PLUS reduce using rule 27 (factor -> FALSE .) + MINUS reduce using rule 27 (factor -> FALSE .) + RPAREN reduce using rule 27 (factor -> FALSE .) + AND reduce using rule 27 (factor -> FALSE .) + THEN reduce using rule 27 (factor -> FALSE .) + OR reduce using rule 27 (factor -> FALSE .) + COMMA reduce using rule 27 (factor -> FALSE .) + + +state 17 + + (1) sentence -> conditions THEN . function + (29) function -> . id LPAREN variables RPAREN + + ID shift and go to state 36 + + function shift and go to state 35 + +state 18 + + (2) conditions -> conditions OR . and_conditions + (4) and_conditions -> . and_conditions AND not_conditions + (5) and_conditions -> . not_conditions + (6) not_conditions -> . NOT cdt + (7) not_conditions -> . cdt + (8) cdt -> . expr EQUAL expr + (9) cdt -> . expr NEQUAL expr + (10) cdt -> . expr GE expr + (11) cdt -> . expr GT expr + (12) cdt -> . expr LE expr + (13) cdt -> . expr LT expr + (14) cdt -> . LPAREN conditions RPAREN + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + NOT shift and go to state 5 + LPAREN shift and go to state 8 + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + + and_conditions shift and go to state 37 + not_conditions shift and go to state 4 + cdt shift and go to state 6 + expr shift and go to state 7 + term shift and go to state 9 + factor shift and go to state 10 + +state 19 + + (4) and_conditions -> and_conditions AND . not_conditions + (6) not_conditions -> . NOT cdt + (7) not_conditions -> . cdt + (8) cdt -> . expr EQUAL expr + (9) cdt -> . expr NEQUAL expr + (10) cdt -> . expr GE expr + (11) cdt -> . expr GT expr + (12) cdt -> . expr LE expr + (13) cdt -> . expr LT expr + (14) cdt -> . LPAREN conditions RPAREN + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + NOT shift and go to state 5 + LPAREN shift and go to state 8 + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + + not_conditions shift and go to state 38 + cdt shift and go to state 6 + expr shift and go to state 7 + term shift and go to state 9 + factor shift and go to state 10 + +state 20 + + (6) not_conditions -> NOT cdt . + + AND reduce using rule 6 (not_conditions -> NOT cdt .) + THEN reduce using rule 6 (not_conditions -> NOT cdt .) + OR reduce using rule 6 (not_conditions -> NOT cdt .) + RPAREN reduce using rule 6 (not_conditions -> NOT cdt .) + + +state 21 + + (8) cdt -> expr EQUAL . expr + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + expr shift and go to state 39 + term shift and go to state 9 + factor shift and go to state 10 + +state 22 + + (9) cdt -> expr NEQUAL . expr + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + expr shift and go to state 41 + term shift and go to state 9 + factor shift and go to state 10 + +state 23 + + (10) cdt -> expr GE . expr + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + expr shift and go to state 42 + term shift and go to state 9 + factor shift and go to state 10 + +state 24 + + (11) cdt -> expr GT . expr + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + expr shift and go to state 43 + term shift and go to state 9 + factor shift and go to state 10 + +state 25 + + (12) cdt -> expr LE . expr + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + expr shift and go to state 44 + term shift and go to state 9 + factor shift and go to state 10 + +state 26 + + (13) cdt -> expr LT . expr + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + expr shift and go to state 45 + term shift and go to state 9 + factor shift and go to state 10 + +state 27 + + (15) expr -> expr PLUS . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + term shift and go to state 46 + factor shift and go to state 10 + +state 28 + + (16) expr -> expr MINUS . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + term shift and go to state 47 + factor shift and go to state 10 + +state 29 + + (28) factor -> LPAREN . expr RPAREN + (14) cdt -> LPAREN . conditions RPAREN + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (2) conditions -> . conditions OR and_conditions + (3) conditions -> . and_conditions + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (4) and_conditions -> . and_conditions AND not_conditions + (5) and_conditions -> . not_conditions + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + (6) not_conditions -> . NOT cdt + (7) not_conditions -> . cdt + (8) cdt -> . expr EQUAL expr + (9) cdt -> . expr NEQUAL expr + (10) cdt -> . expr GE expr + (11) cdt -> . expr GT expr + (12) cdt -> . expr LE expr + (13) cdt -> . expr LT expr + (14) cdt -> . LPAREN conditions RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 29 + NOT shift and go to state 5 + + expr shift and go to state 31 + conditions shift and go to state 30 + term shift and go to state 9 + and_conditions shift and go to state 3 + factor shift and go to state 10 + not_conditions shift and go to state 4 + cdt shift and go to state 6 + +state 30 + + (14) cdt -> LPAREN conditions . RPAREN + (2) conditions -> conditions . OR and_conditions + + RPAREN shift and go to state 48 + OR shift and go to state 18 + + +state 31 + + (28) factor -> LPAREN expr . RPAREN + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + (8) cdt -> expr . EQUAL expr + (9) cdt -> expr . NEQUAL expr + (10) cdt -> expr . GE expr + (11) cdt -> expr . GT expr + (12) cdt -> expr . LE expr + (13) cdt -> expr . LT expr + + RPAREN shift and go to state 49 + PLUS shift and go to state 27 + MINUS shift and go to state 28 + EQUAL shift and go to state 21 + NEQUAL shift and go to state 22 + GE shift and go to state 23 + GT shift and go to state 24 + LE shift and go to state 25 + LT shift and go to state 26 + + +state 32 + + (18) term -> term TIMES . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + factor shift and go to state 50 + +state 33 + + (19) term -> term DIVIDE . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + factor shift and go to state 51 + +state 34 + + (20) term -> term MOD . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + factor shift and go to state 52 + +state 35 + + (1) sentence -> conditions THEN function . + + $end reduce using rule 1 (sentence -> conditions THEN function .) + + +state 36 + + (29) function -> id . LPAREN variables RPAREN + + LPAREN shift and go to state 53 + + +state 37 + + (2) conditions -> conditions OR and_conditions . + (4) and_conditions -> and_conditions . AND not_conditions + + THEN reduce using rule 2 (conditions -> conditions OR and_conditions .) + OR reduce using rule 2 (conditions -> conditions OR and_conditions .) + RPAREN reduce using rule 2 (conditions -> conditions OR and_conditions .) + AND shift and go to state 19 + + +state 38 + + (4) and_conditions -> and_conditions AND not_conditions . + + AND reduce using rule 4 (and_conditions -> and_conditions AND not_conditions .) + THEN reduce using rule 4 (and_conditions -> and_conditions AND not_conditions .) + OR reduce using rule 4 (and_conditions -> and_conditions AND not_conditions .) + RPAREN reduce using rule 4 (and_conditions -> and_conditions AND not_conditions .) + + +state 39 + + (8) cdt -> expr EQUAL expr . + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + AND reduce using rule 8 (cdt -> expr EQUAL expr .) + THEN reduce using rule 8 (cdt -> expr EQUAL expr .) + OR reduce using rule 8 (cdt -> expr EQUAL expr .) + RPAREN reduce using rule 8 (cdt -> expr EQUAL expr .) + PLUS shift and go to state 27 + MINUS shift and go to state 28 + + +state 40 + + (28) factor -> LPAREN . expr RPAREN + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + expr shift and go to state 54 + term shift and go to state 9 + factor shift and go to state 10 + +state 41 + + (9) cdt -> expr NEQUAL expr . + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + AND reduce using rule 9 (cdt -> expr NEQUAL expr .) + THEN reduce using rule 9 (cdt -> expr NEQUAL expr .) + OR reduce using rule 9 (cdt -> expr NEQUAL expr .) + RPAREN reduce using rule 9 (cdt -> expr NEQUAL expr .) + PLUS shift and go to state 27 + MINUS shift and go to state 28 + + +state 42 + + (10) cdt -> expr GE expr . + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + AND reduce using rule 10 (cdt -> expr GE expr .) + THEN reduce using rule 10 (cdt -> expr GE expr .) + OR reduce using rule 10 (cdt -> expr GE expr .) + RPAREN reduce using rule 10 (cdt -> expr GE expr .) + PLUS shift and go to state 27 + MINUS shift and go to state 28 + + +state 43 + + (11) cdt -> expr GT expr . + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + AND reduce using rule 11 (cdt -> expr GT expr .) + THEN reduce using rule 11 (cdt -> expr GT expr .) + OR reduce using rule 11 (cdt -> expr GT expr .) + RPAREN reduce using rule 11 (cdt -> expr GT expr .) + PLUS shift and go to state 27 + MINUS shift and go to state 28 + + +state 44 + + (12) cdt -> expr LE expr . + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + AND reduce using rule 12 (cdt -> expr LE expr .) + THEN reduce using rule 12 (cdt -> expr LE expr .) + OR reduce using rule 12 (cdt -> expr LE expr .) + RPAREN reduce using rule 12 (cdt -> expr LE expr .) + PLUS shift and go to state 27 + MINUS shift and go to state 28 + + +state 45 + + (13) cdt -> expr LT expr . + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + AND reduce using rule 13 (cdt -> expr LT expr .) + THEN reduce using rule 13 (cdt -> expr LT expr .) + OR reduce using rule 13 (cdt -> expr LT expr .) + RPAREN reduce using rule 13 (cdt -> expr LT expr .) + PLUS shift and go to state 27 + MINUS shift and go to state 28 + + +state 46 + + (15) expr -> expr PLUS term . + (18) term -> term . TIMES factor + (19) term -> term . DIVIDE factor + (20) term -> term . MOD factor + + EQUAL reduce using rule 15 (expr -> expr PLUS term .) + NEQUAL reduce using rule 15 (expr -> expr PLUS term .) + GE reduce using rule 15 (expr -> expr PLUS term .) + GT reduce using rule 15 (expr -> expr PLUS term .) + LE reduce using rule 15 (expr -> expr PLUS term .) + LT reduce using rule 15 (expr -> expr PLUS term .) + PLUS reduce using rule 15 (expr -> expr PLUS term .) + MINUS reduce using rule 15 (expr -> expr PLUS term .) + RPAREN reduce using rule 15 (expr -> expr PLUS term .) + AND reduce using rule 15 (expr -> expr PLUS term .) + THEN reduce using rule 15 (expr -> expr PLUS term .) + OR reduce using rule 15 (expr -> expr PLUS term .) + COMMA reduce using rule 15 (expr -> expr PLUS term .) + TIMES shift and go to state 32 + DIVIDE shift and go to state 33 + MOD shift and go to state 34 + + +state 47 + + (16) expr -> expr MINUS term . + (18) term -> term . TIMES factor + (19) term -> term . DIVIDE factor + (20) term -> term . MOD factor + + EQUAL reduce using rule 16 (expr -> expr MINUS term .) + NEQUAL reduce using rule 16 (expr -> expr MINUS term .) + GE reduce using rule 16 (expr -> expr MINUS term .) + GT reduce using rule 16 (expr -> expr MINUS term .) + LE reduce using rule 16 (expr -> expr MINUS term .) + LT reduce using rule 16 (expr -> expr MINUS term .) + PLUS reduce using rule 16 (expr -> expr MINUS term .) + MINUS reduce using rule 16 (expr -> expr MINUS term .) + RPAREN reduce using rule 16 (expr -> expr MINUS term .) + AND reduce using rule 16 (expr -> expr MINUS term .) + THEN reduce using rule 16 (expr -> expr MINUS term .) + OR reduce using rule 16 (expr -> expr MINUS term .) + COMMA reduce using rule 16 (expr -> expr MINUS term .) + TIMES shift and go to state 32 + DIVIDE shift and go to state 33 + MOD shift and go to state 34 + + +state 48 + + (14) cdt -> LPAREN conditions RPAREN . + + AND reduce using rule 14 (cdt -> LPAREN conditions RPAREN .) + THEN reduce using rule 14 (cdt -> LPAREN conditions RPAREN .) + OR reduce using rule 14 (cdt -> LPAREN conditions RPAREN .) + RPAREN reduce using rule 14 (cdt -> LPAREN conditions RPAREN .) + + +state 49 + + (28) factor -> LPAREN expr RPAREN . + + TIMES reduce using rule 28 (factor -> LPAREN expr RPAREN .) + DIVIDE reduce using rule 28 (factor -> LPAREN expr RPAREN .) + MOD reduce using rule 28 (factor -> LPAREN expr RPAREN .) + EQUAL reduce using rule 28 (factor -> LPAREN expr RPAREN .) + NEQUAL reduce using rule 28 (factor -> LPAREN expr RPAREN .) + GE reduce using rule 28 (factor -> LPAREN expr RPAREN .) + GT reduce using rule 28 (factor -> LPAREN expr RPAREN .) + LE reduce using rule 28 (factor -> LPAREN expr RPAREN .) + LT reduce using rule 28 (factor -> LPAREN expr RPAREN .) + PLUS reduce using rule 28 (factor -> LPAREN expr RPAREN .) + MINUS reduce using rule 28 (factor -> LPAREN expr RPAREN .) + RPAREN reduce using rule 28 (factor -> LPAREN expr RPAREN .) + AND reduce using rule 28 (factor -> LPAREN expr RPAREN .) + THEN reduce using rule 28 (factor -> LPAREN expr RPAREN .) + OR reduce using rule 28 (factor -> LPAREN expr RPAREN .) + COMMA reduce using rule 28 (factor -> LPAREN expr RPAREN .) + + +state 50 + + (18) term -> term TIMES factor . + + TIMES reduce using rule 18 (term -> term TIMES factor .) + DIVIDE reduce using rule 18 (term -> term TIMES factor .) + MOD reduce using rule 18 (term -> term TIMES factor .) + EQUAL reduce using rule 18 (term -> term TIMES factor .) + NEQUAL reduce using rule 18 (term -> term TIMES factor .) + GE reduce using rule 18 (term -> term TIMES factor .) + GT reduce using rule 18 (term -> term TIMES factor .) + LE reduce using rule 18 (term -> term TIMES factor .) + LT reduce using rule 18 (term -> term TIMES factor .) + PLUS reduce using rule 18 (term -> term TIMES factor .) + MINUS reduce using rule 18 (term -> term TIMES factor .) + RPAREN reduce using rule 18 (term -> term TIMES factor .) + AND reduce using rule 18 (term -> term TIMES factor .) + THEN reduce using rule 18 (term -> term TIMES factor .) + OR reduce using rule 18 (term -> term TIMES factor .) + COMMA reduce using rule 18 (term -> term TIMES factor .) + + +state 51 + + (19) term -> term DIVIDE factor . + + TIMES reduce using rule 19 (term -> term DIVIDE factor .) + DIVIDE reduce using rule 19 (term -> term DIVIDE factor .) + MOD reduce using rule 19 (term -> term DIVIDE factor .) + EQUAL reduce using rule 19 (term -> term DIVIDE factor .) + NEQUAL reduce using rule 19 (term -> term DIVIDE factor .) + GE reduce using rule 19 (term -> term DIVIDE factor .) + GT reduce using rule 19 (term -> term DIVIDE factor .) + LE reduce using rule 19 (term -> term DIVIDE factor .) + LT reduce using rule 19 (term -> term DIVIDE factor .) + PLUS reduce using rule 19 (term -> term DIVIDE factor .) + MINUS reduce using rule 19 (term -> term DIVIDE factor .) + RPAREN reduce using rule 19 (term -> term DIVIDE factor .) + AND reduce using rule 19 (term -> term DIVIDE factor .) + THEN reduce using rule 19 (term -> term DIVIDE factor .) + OR reduce using rule 19 (term -> term DIVIDE factor .) + COMMA reduce using rule 19 (term -> term DIVIDE factor .) + + +state 52 + + (20) term -> term MOD factor . + + TIMES reduce using rule 20 (term -> term MOD factor .) + DIVIDE reduce using rule 20 (term -> term MOD factor .) + MOD reduce using rule 20 (term -> term MOD factor .) + EQUAL reduce using rule 20 (term -> term MOD factor .) + NEQUAL reduce using rule 20 (term -> term MOD factor .) + GE reduce using rule 20 (term -> term MOD factor .) + GT reduce using rule 20 (term -> term MOD factor .) + LE reduce using rule 20 (term -> term MOD factor .) + LT reduce using rule 20 (term -> term MOD factor .) + PLUS reduce using rule 20 (term -> term MOD factor .) + MINUS reduce using rule 20 (term -> term MOD factor .) + RPAREN reduce using rule 20 (term -> term MOD factor .) + AND reduce using rule 20 (term -> term MOD factor .) + THEN reduce using rule 20 (term -> term MOD factor .) + OR reduce using rule 20 (term -> term MOD factor .) + COMMA reduce using rule 20 (term -> term MOD factor .) + + +state 53 + + (29) function -> id LPAREN . variables RPAREN + (30) variables -> . variables COMMA expr + (31) variables -> . expr + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + variables shift and go to state 55 + expr shift and go to state 56 + term shift and go to state 9 + factor shift and go to state 10 + +state 54 + + (28) factor -> LPAREN expr . RPAREN + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + RPAREN shift and go to state 49 + PLUS shift and go to state 27 + MINUS shift and go to state 28 + + +state 55 + + (29) function -> id LPAREN variables . RPAREN + (30) variables -> variables . COMMA expr + + RPAREN shift and go to state 57 + COMMA shift and go to state 58 + + +state 56 + + (31) variables -> expr . + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + RPAREN reduce using rule 31 (variables -> expr .) + COMMA reduce using rule 31 (variables -> expr .) + PLUS shift and go to state 27 + MINUS shift and go to state 28 + + +state 57 + + (29) function -> id LPAREN variables RPAREN . + + $end reduce using rule 29 (function -> ID LPAREN variables RPAREN .) + + +state 58 + + (30) variables -> variables COMMA . expr + (15) expr -> . expr PLUS term + (16) expr -> . expr MINUS term + (17) expr -> . term + (18) term -> . term TIMES factor + (19) term -> . term DIVIDE factor + (20) term -> . term MOD factor + (21) term -> . factor + (22) factor -> . number + (23) factor -> . string + (24) factor -> . id + (25) factor -> . NULL + (26) factor -> . TRUE + (27) factor -> . FALSE + (28) factor -> . LPAREN expr RPAREN + + number shift and go to state 11 + string shift and go to state 12 + id shift and go to state 13 + NULL shift and go to state 14 + TRUE shift and go to state 15 + FALSE shift and go to state 16 + LPAREN shift and go to state 40 + + expr shift and go to state 59 + term shift and go to state 9 + factor shift and go to state 10 + +state 59 + + (30) variables -> variables COMMA expr . + (15) expr -> expr . PLUS term + (16) expr -> expr . MINUS term + + RPAREN reduce using rule 30 (variables -> variables COMMA expr .) + COMMA reduce using rule 30 (variables -> variables COMMA expr .) + PLUS shift and go to state 27 + MINUS shift and go to state 28 + diff --git a/script/local/parser/parsetab.py b/script/local/parser/parsetab.py new file mode 100644 index 0000000000000000000000000000000000000000..38a20c9f63d3a2d14d879adc95a20f394fbca62c --- /dev/null +++ b/script/local/parser/parsetab.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# ---------------------------------------------------------------------------- +# Description : LocalCheckOS.py is a utility to check OS info on local node. +############################################################################# +TAB_VERSION = '3.10' + +LR_METHOD = 'LALR' + +LR_SIGNATURE = 'AND COMMA DIVIDE EQUAL FALSE GE GT ID LE LPAREN LT MINUS MOD NEQUAL NOT NULL NUMBER OR PLUS RPAREN STRING THEN TIMES TRUEsentence : conditions THEN function \n conditions : conditions OR and_conditionsconditions : and_conditions\n and_conditions : and_conditions AND not_conditions\n and_conditions : not_conditionsnot_conditions : NOT cdtnot_conditions : cdt\n cdt : expr EQUAL expr\n | expr NEQUAL expr\n | expr GE expr\n | expr GT expr\n | expr LE expr\n | expr LT expr\n cdt : LPAREN conditions RPAREN\n expr : expr PLUS term\n | expr MINUS term\n expr : term\n term : term TIMES factor\n | term DIVIDE factor\n | term MOD factor\n term : factor\n factor : NUMBER\n | STRING\n factor : IDfactor : NULL\n factor : TRUE\n | FALSE\n factor : LPAREN expr RPARENfunction : ID LPAREN variables RPAREN\n variables : variables COMMA expr\n variables : expr' + +_lr_action_items = {'NOT':([0,8,18,19,29,],[5,5,5,5,5,]),'LPAREN':([0,5,8,18,19,21,22,23,24,25,26,27,28,29,32,33,34,36,40,53,58,],[8,8,29,8,8,40,40,40,40,40,40,40,40,29,40,40,40,53,40,40,40,]),'NUMBER':([0,5,8,18,19,21,22,23,24,25,26,27,28,29,32,33,34,40,53,58,],[11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,]),'STRING':([0,5,8,18,19,21,22,23,24,25,26,27,28,29,32,33,34,40,53,58,],[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,]),'ID':([0,5,8,17,18,19,21,22,23,24,25,26,27,28,29,32,33,34,40,53,58,],[13,13,13,36,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,]),'NULL':([0,5,8,18,19,21,22,23,24,25,26,27,28,29,32,33,34,40,53,58,],[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,]),'TRUE':([0,5,8,18,19,21,22,23,24,25,26,27,28,29,32,33,34,40,53,58,],[15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,]),'FALSE':([0,5,8,18,19,21,22,23,24,25,26,27,28,29,32,33,34,40,53,58,],[16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,]),'$end':([1,35,57,],[0,-1,-29,]),'THEN':([2,3,4,6,9,10,11,12,13,14,15,16,20,37,38,39,41,42,43,44,45,46,47,48,49,50,51,52,],[17,-3,-5,-7,-17,-21,-22,-23,-24,-25,-26,-27,-6,-2,-4,-8,-9,-10,-11,-12,-13,-15,-16,-14,-28,-18,-19,-20,]),'OR':([2,3,4,6,9,10,11,12,13,14,15,16,20,30,37,38,39,41,42,43,44,45,46,47,48,49,50,51,52,],[18,-3,-5,-7,-17,-21,-22,-23,-24,-25,-26,-27,-6,18,-2,-4,-8,-9,-10,-11,-12,-13,-15,-16,-14,-28,-18,-19,-20,]),'RPAREN':([3,4,6,9,10,11,12,13,14,15,16,20,30,31,37,38,39,41,42,43,44,45,46,47,48,49,50,51,52,54,55,56,59,],[-3,-5,-7,-17,-21,-22,-23,-24,-25,-26,-27,-6,48,49,-2,-4,-8,-9,-10,-11,-12,-13,-15,-16,-14,-28,-18,-19,-20,49,57,-31,-30,]),'AND':([3,4,6,9,10,11,12,13,14,15,16,20,37,38,39,41,42,43,44,45,46,47,48,49,50,51,52,],[19,-5,-7,-17,-21,-22,-23,-24,-25,-26,-27,-6,19,-4,-8,-9,-10,-11,-12,-13,-15,-16,-14,-28,-18,-19,-20,]),'EQUAL':([7,9,10,11,12,13,14,15,16,31,46,47,49,50,51,52,],[21,-17,-21,-22,-23,-24,-25,-26,-27,21,-15,-16,-28,-18,-19,-20,]),'NEQUAL':([7,9,10,11,12,13,14,15,16,31,46,47,49,50,51,52,],[22,-17,-21,-22,-23,-24,-25,-26,-27,22,-15,-16,-28,-18,-19,-20,]),'GE':([7,9,10,11,12,13,14,15,16,31,46,47,49,50,51,52,],[23,-17,-21,-22,-23,-24,-25,-26,-27,23,-15,-16,-28,-18,-19,-20,]),'GT':([7,9,10,11,12,13,14,15,16,31,46,47,49,50,51,52,],[24,-17,-21,-22,-23,-24,-25,-26,-27,24,-15,-16,-28,-18,-19,-20,]),'LE':([7,9,10,11,12,13,14,15,16,31,46,47,49,50,51,52,],[25,-17,-21,-22,-23,-24,-25,-26,-27,25,-15,-16,-28,-18,-19,-20,]),'LT':([7,9,10,11,12,13,14,15,16,31,46,47,49,50,51,52,],[26,-17,-21,-22,-23,-24,-25,-26,-27,26,-15,-16,-28,-18,-19,-20,]),'PLUS':([7,9,10,11,12,13,14,15,16,31,39,41,42,43,44,45,46,47,49,50,51,52,54,56,59,],[27,-17,-21,-22,-23,-24,-25,-26,-27,27,27,27,27,27,27,27,-15,-16,-28,-18,-19,-20,27,27,27,]),'MINUS':([7,9,10,11,12,13,14,15,16,31,39,41,42,43,44,45,46,47,49,50,51,52,54,56,59,],[28,-17,-21,-22,-23,-24,-25,-26,-27,28,28,28,28,28,28,28,-15,-16,-28,-18,-19,-20,28,28,28,]),'COMMA':([9,10,11,12,13,14,15,16,46,47,49,50,51,52,55,56,59,],[-17,-21,-22,-23,-24,-25,-26,-27,-15,-16,-28,-18,-19,-20,58,-31,-30,]),'TIMES':([9,10,11,12,13,14,15,16,46,47,49,50,51,52,],[32,-21,-22,-23,-24,-25,-26,-27,32,32,-28,-18,-19,-20,]),'DIVIDE':([9,10,11,12,13,14,15,16,46,47,49,50,51,52,],[33,-21,-22,-23,-24,-25,-26,-27,33,33,-28,-18,-19,-20,]),'MOD':([9,10,11,12,13,14,15,16,46,47,49,50,51,52,],[34,-21,-22,-23,-24,-25,-26,-27,34,34,-28,-18,-19,-20,]),} + +_lr_action = {} +for _k, _v in _lr_action_items.items(): + for _x, _y in zip(_v[0], _v[1]): + if _x not in _lr_action: + _lr_action[_x] = {} + _lr_action[_x][_k] = _y +del _lr_action_items + +_lr_goto_items = {'sentence':([0,],[1,]),'conditions':([0,8,29,],[2,30,30,]),'and_conditions':([0,8,18,29,],[3,3,37,3,]),'not_conditions':([0,8,18,19,29,],[4,4,4,38,4,]),'cdt':([0,5,8,18,19,29,],[6,20,6,6,6,6,]),'expr':([0,5,8,18,19,21,22,23,24,25,26,29,40,53,58,],[7,7,31,7,7,39,41,42,43,44,45,31,54,56,59,]),'term':([0,5,8,18,19,21,22,23,24,25,26,27,28,29,40,53,58,],[9,9,9,9,9,9,9,9,9,9,9,46,47,9,9,9,9,]),'factor':([0,5,8,18,19,21,22,23,24,25,26,27,28,29,32,33,34,40,53,58,],[10,10,10,10,10,10,10,10,10,10,10,10,10,10,50,51,52,10,10,10,]),'function':([17,],[35,]),'variables':([53,],[55,]),} + +_lr_goto = {} +for _k, _v in _lr_goto_items.items(): + for _x, _y in zip(_v[0], _v[1]): + if _x not in _lr_goto: + _lr_goto[_x] = {} + _lr_goto[_x][_k] = _y +del _lr_goto_items +_lr_productions = [ + ("S' -> sentence","S'",1,None,None,None), + ('sentence -> conditions THEN function','sentence',3,'p_conditions_relation_function','my_yacc.py',15), + ('conditions -> conditions OR and_conditions','conditions',3,'p_conditions_or','my_yacc.py',21), + ('conditions -> and_conditions','conditions',1,'p_conditions_and_conditions','my_yacc.py',25), + ('and_conditions -> and_conditions AND not_conditions','and_conditions',3,'p_and_conditions_and','my_yacc.py',30), + ('and_conditions -> not_conditions','and_conditions',1,'p_and_conditions_cdt','my_yacc.py',35), + ('not_conditions -> NOT cdt','not_conditions',2,'p_not_cdt','my_yacc.py',39), + ('not_conditions -> cdt','not_conditions',1,'p_not_conditions_cdt','my_yacc.py',43), + ('cdt -> expr EQUAL expr','cdt',3,'p_cdt_ops','my_yacc.py',48), + ('cdt -> expr NEQUAL expr','cdt',3,'p_cdt_ops','my_yacc.py',49), + ('cdt -> expr GE expr','cdt',3,'p_cdt_ops','my_yacc.py',50), + ('cdt -> expr GT expr','cdt',3,'p_cdt_ops','my_yacc.py',51), + ('cdt -> expr LE expr','cdt',3,'p_cdt_ops','my_yacc.py',52), + ('cdt -> expr LT expr','cdt',3,'p_cdt_ops','my_yacc.py',53), + ('cdt -> LPAREN conditions RPAREN','cdt',3,'p_cdt_parens','my_yacc.py',73), + ('expr -> expr PLUS term','expr',3,'p_expr_plus_minus','my_yacc.py',78), + ('expr -> expr MINUS term','expr',3,'p_expr_plus_minus','my_yacc.py',79), + ('expr -> term','expr',1,'p_expr_term','my_yacc.py',87), + ('term -> term TIMES factor','term',3,'p_term_times_divide_mod','my_yacc.py',92), + ('term -> term DIVIDE factor','term',3,'p_term_times_divide_mod','my_yacc.py',93), + ('term -> term MOD factor','term',3,'p_term_times_divide_mod','my_yacc.py',94), + ('term -> factor','term',1,'p_term_factor','my_yacc.py',104), + ('factor -> number','factor',1,'p_factor_assign_simple','my_yacc.py',109), + ('factor -> string','factor',1,'p_factor_assign_simple','my_yacc.py',110), + ('factor -> id','factor',1,'p_factor_id','my_yacc.py',115), + ('factor -> NULL','factor',1,'p_factor_null','my_yacc.py',119), + ('factor -> TRUE','factor',1,'p_factor_bool','my_yacc.py',124), + ('factor -> FALSE','factor',1,'p_factor_bool','my_yacc.py',125), + ('factor -> LPAREN expr RPAREN','factor',3,'p_factor_paren','my_yacc.py',133), + ('function -> id LPAREN variables RPAREN','function',4,'p_function','my_yacc.py',137), + ('variables -> variables COMMA expr','variables',3,'p_variables_comma','my_yacc.py',141), + ('variables -> expr','variables',1,'p_variables_factor','my_yacc.py',146), +] diff --git a/script/local/parser/utils.py b/script/local/parser/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..79d787ae710c8b67c24d0566424c0bce7d690cec --- /dev/null +++ b/script/local/parser/utils.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python3 Huawei@123 +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# ---------------------------------------------------------------------------- +# Description : LocalCheckOS.py is a utility to check OS info on local node. +############################################################################# +import optparse +import subprocess +from decimal import Decimal +import re +import sys + +sys.path.append(sys.path[0] + "/../") + +########## print + +nocolor = 0 +def set_color(nc): + global nocolor + nocolor = nc +# titles +def print_title_1(info): + global nocolor + if nocolor != 0: + print('======== ' + info + ' ========') + else: + print('\033[0;37;46m======== ' + info + ' ========\033[0m') + +def print_title_2(info): + print('-------- ' + info + ' --------') + +def print_title_3(info): + print('- - - ' + info + ' - - -') + +# infos +def print_info(info): + print_all('info', info) +def print_ok(info): + print_all('ok', info) +def print_bad(info): + print_all('bad', info) +def print_warn(info): + print_all('warn', info) +def print_unknown(info): + print_all('unknown', info) + +def print_all(opt, info): + global nocolor + if nocolor != 0: + print(info) + return + if opt == 'info': + print("\033[0;34;40m[info]\033[0m " + info) + elif opt == 'ok': + print("\033[0;32;40m[ok]\033[0m " + info) + elif opt == 'bad': + print("\033[0;31;40m[bad]\033[0m " + info) + elif opt == 'warn': + print("\033[0;33;40m[warn]\033[0m " + info) + elif opt == 'unknown': + print("\033[0;35;40m[unknown]\033[0m " + info) + else: + raise Exception('unknown print type :' + opt) + + + + + + + + +# operate cmd via ssh +os_cmd_prefix = '' +def set_os_cmd_prefix(prefix): + global os_cmd_prefix + os_cmd_prefix = prefix +def os_cmd(cmd): + global os_cmd_prefix + complete_cmd = os_cmd_prefix + ' "' + cmd + '"' + ret = subprocess.run(complete_cmd, stdout=subprocess.PIPE, shell=True) + return ret + +def get_sysctl(name): + name = name.replace('.', '/') + ret = os_cmd('cat /proc/sys/%s' % name) + if ret.returncode != 0: + print_unknown('unable to read systcl %s' % name) + return None + else: + return str(ret.stdout, encoding='utf-8').strip() + +# db related +def query(cursor, sql): + cursor.execute(sql) + columns = [desc[0] for desc in cursor.description] + results = [dict(zip(columns, row)) for row in cursor.fetchall()] + return results + +def is_later_version(cur_ver, min_ver): + min_major, min_minor = min_ver.split('.')[0], min_ver.split('.')[1] + cur_major, cur_minor = cur_ver.split('.')[0], cur_ver.split('.')[1] + min_major, min_minor, cur_major, cur_minor = int(min_major), int(min_minor), int(cur_major), int(cur_minor) + if cur_major > min_major: + return True + if cur_major == min_major: + return cur_minor >= min_minor + return False + +# advices +advices = {} +def add_advice(category, priority, advice): + if priority != 'high' and priority != 'medium' and priority != 'low': + raise Exception('Unknown advice priority : ' + priority) + if advice is None or advice.strip() == '': + raise Exception('No advice text') + if category not in advices: + advices[category] = {} + if priority not in advices[category]: + advices[category][priority] = [] + advices[category][priority].append(advice) + + +def show_advices(): + global nocolor + print_title_1('Following Are Advices') + cnt = 0 + for category in advices.keys(): + print_title_2(category) + cnt += display_advices(category) + if cnt == 0: + display_no_advices_message() + +def display_advices(category): + global nocolor + cnt = 0 + if nocolor != 0: + for priority in advices[category].keys(): + cnt += display_advices_with_priority(priority, advices[category][priority]) + else: + for priority in advices[category].keys(): + cnt += display_colored_advices(priority, advices[category][priority]) + return cnt + +def display_advices_with_priority(priority, advices_list): + cnt = 0 + for advice in advices_list: + print('[' + priority.upper() + ']' + advice) + cnt += 1 + return cnt + +def display_colored_advices(priority, advices_list): + cnt = 0 + color = get_color(priority) + print(color, end='') + for advice in advices_list: + print('[' + priority.upper() + ']' + advice + '\033[0m') + cnt += 1 + return cnt + +def get_color(priority): + if priority == 'high': + return '\033[0;31;40m' # Red + elif priority == 'medium': + return '\033[0;33;40m' # Yellow + elif priority == 'low': + return '\033[0;34;40m' # Blue + return '\033[0m' # Default color + +def display_no_advices_message(): + global nocolor + if nocolor != 0: + print('Everything is OK') + else: + print("\033[0;32;40m Everything is OK \033[0m") + + +settings = None +def set_settings(sts): + global settings + settings = sts +def get_setting(name): + global settings + try: + return standard_units(settings[name]['setting'], settings[name]['unit']) + except KeyError: + print("config %s could not be found!" % name) + raise Exception("could not find config") + +# standard units +def standard_units(value, unit=None): + if unit is None and isinstance(value, str): + pattern = r'^-?\d+(.\d+)?$' + if re.match(pattern, value): + return Decimal(value) + return value + value = Decimal(value) + unit_multipliers = { + 'KB': 1024, 'K': 1024, 'kB': 1024, + '8KB': 1024 * 8, '8kB': 1024 * 8, + '16KB': 1024 * 16, '16kB': 1024 * 16, + 'MB': 1024 * 1024, 'M': 1024 * 1024, 'mB': 1024 * 1024, + 'GB': 1024 * 1024 * 1024, 'G': 1024 * 1024 * 1024, 'gB': 1024 * 1024 * 1024, + 'TB': 1024 * 1024 * 1024 * 1024, 'T': 1024 * 1024 * 1024 * 1024, 'tB': 1024 * 1024 * 1024 * 1024, + 'PB': 1024 * 1024 * 1024 * 1024 * 1024, 'P': 1024 * 1024 * 1024 * 1024 * 1024, + 'pB': 1024 * 1024 * 1024 * 1024 * 1024, + } + if unit in unit_multipliers: + return value * unit_multipliers[unit] + if unit == 's': + return str(value) + 's' + elif unit == 'ms': + return str(value) + 'ms' + return value + +def format_size(size): + units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + unit_index = 0 + if size is None: + return '' + while size >= 1024: + size = size / 1024 + unit_index += 1 + return "%.2f %s"%(size, units[unit_index]) + +MIN_S = 60 +HOUR_S = 60 * MIN_S +DAY_S = 24 * HOUR_S + +def format_epoch_to_time(epoch): + time = '' + if epoch > DAY_S: + days = "%d" % (epoch/DAY_S) + epoch = epoch % DAY_S + time += ' ' + days + 'd' + if epoch > HOUR_S: + hours = '%d' % (epoch / HOUR_S) + epoch = epoch % HOUR_S + time += ' ' + hours + 'h' + if epoch > MIN_S: + mins = '%d' % (epoch/MIN_S) + epoch = epoch % MIN_S + time += ' ' + mins + 'm' + time += ' ' + '%02d'%epoch + 's' + return time + +def format_percent(value): + return "%.2f%%" % value + +dependency_setting = {} +def set_dependency_settings(sts): + global dependency_settings + dependency_settings = sts + +def get_dependency_setting(name): + global dependency_settings + try: + return standard_units(dependency_settings[name]['setting'], dependency_settings[name]['unit']) + except KeyError: + raise Exception("could not find config") + +dependency_info = {} + +def add_dependency_info(level, category, info): + if level not in dependency_info: + dependency_info[level] = {} + if category not in dependency_info[level]: + dependency_info[level][category] = [] + dependency_info[level][category].append(info) + + +def show_dependency_info(): + for level in dependency_info.keys(): + for category in dependency_info[level].keys(): + for value in dependency_info[level][category]: + print(" Warning reason:"+category+":"+value) diff --git a/script/local/parser/variables.py b/script/local/parser/variables.py new file mode 100644 index 0000000000000000000000000000000000000000..5dc79f0abe4e2470cd6b17f783eb2022368fe9c5 --- /dev/null +++ b/script/local/parser/variables.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# ---------------------------------------------------------------------------- +# Description : LocalCheckOS.py is a utility to check OS info on local node. +############################################################################# +import sys +import os +from decimal import Decimal +localDirPath = os.path.dirname(os.path.realpath(__file__)) + +sys.path.append(sys.path[0] + "/../") +from local.parser.utils import get_dependency_setting + +variable_dict = { + 'udf_memory_limit' : 300, + 'max_process_memory' : 200, + 'hot_standby' : 'on', + 'enable_global_plancache' : 'on', + 'local_syscache_threshold' : 5*1024, + 'use_elastic_search' : 'on', + 'max_cached_tuplebufs' : 100, + 'max_changes_in_memory' : 100, + 'session_history_memory' : 400, + 'some_string' : 'some_string', + 'some_null' : None, +} + +def get_variable(name): + try: + val = get_dependency_setting(name) + except: + val = None + if not isinstance(val, str): + return val + if val.endswith('ms'): + return Decimal(val[:-2]) + elif val.endswith('s'): + return Decimal(val[:-1])*1000 + return val diff --git a/script/local/parser/yacc.py b/script/local/parser/yacc.py new file mode 100644 index 0000000000000000000000000000000000000000..4a73f8d2b6328389489d3788a90ff7e55729a890 --- /dev/null +++ b/script/local/parser/yacc.py @@ -0,0 +1,1886 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +############################################################################# +# Copyright (c) 2020 Huawei Technologies Co.,Ltd. +# +# openGauss is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" bASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING bUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTAbILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# ---------------------------------------------------------------------------- +# Description : LocalCheckOS.py is a utility to check OS info on local node. +############################################################################# +import re +import types +import sys +import inspect + + +YACC_DEBUG = False + + +ABROAD_ACCESS = 'parser.out' +OCCUPY_OCCUR = 3 +RETAIN_RESTORE = 40 + +MAXINT = sys.maxsize + + +class Logic(object): + def __init__(self, focus): + self.f = focus + + def debug(self, message, *args, **kwargs): + self.f.write((message % args) + '\n') + + info = debug + + def warning(self, message, *args, **kwargs): + self.f.write('warn: ' + (message % args) + '\n') + + def error(self, message, *args, **kwargs): + self.f.write('error: ' + (message % args) + '\n') + + critical = debug + + +class NoLogger(object): + def __getattribute__(self, name): + return self + + def __call__(self, *args, **kwargs): + return self + + +class YaccEarn(Exception): + pass + + +def format_resolve(restore): + retain_str = repr(restore) + if '\n' in retain_str: + retain_str = repr(retain_str) + if len(retain_str) > RETAIN_RESTORE: + retain_str = retain_str[:RETAIN_RESTORE] + ' ...' + result = '<%s @ 0x%x> (%s)' % (type(restore).__name__, id(restore), retain_str) + return result + +def format_reveal(revise): + retain_str = repr(revise) + if '\n' in retain_str: + retain_str = repr(retain_str) + if len(retain_str) < 16: + return retain_str + else: + return '<%s @ 0x%x>' % (type(revise).__name__, id(revise)) + + +class YaccStable: + def __str__(self): + return self.type + + def __repr__(self): + return str(self) + + +class YaccPredict: + def __init__(self, s, stack=None): + self.slice = s + self.stack = stack + self.lexer = None + self.parser = None + + def __getitem__(self, notion): + def get_slice_value(n): + return [s.value for s in self.slice[n]] + def get_index_value(n): + return self.slice[n].value if n >= 0 else self.stack[n].value + get_value = { + slice: get_slice_value, + int: get_index_value + } + return get_value.get(type(notion), lambda x: None)(notion) + + def __setitem__(self, notion, vast): + self.slice[notion].value = vast + + def __getslice__(self, impact, justify): + return [s.value for s in self.slice[impact:justify]] + + def __len__(self): + return len(self.slice) + + def lineno(self, native): + return getattr(self.slice[native], 'lineno', 0) + + def set_lineno(self, native, lineno): + self.slice[native].lineno = lineno + + def linespan(self, native): + startline = getattr(self.slice[native], 'lineno', 0) + endline = getattr(self.slice[native], 'endlineno', startline) + return startline, endline + + def lexpos(self, native): + return getattr(self.slice[native], 'lexpos', 0) + + def set_lexpos(self, native, lexpos): + self.slice[native].lexpos = lexpos + + def lexspan(self, native): + startpos = getattr(self.slice[native], 'lexpos', 0) + endpos = getattr(self.slice[native], 'endlexpos', startpos) + return startpos, endpos + + @staticmethod + def error(): + raise SyntaxError + +class LRResolver: + def __init__(self, talent, earn): + self.productions = talent.reinforce + self.action = talent.recover + self.gamble = talent.region + self.evolution = earn + self.set_decline_states() + + def set_decline_states(self): + self.decline_states = {} + for debate, decade in self.action.items(): + deny = list(decade.values()) + if len(deny) == 1 and deny[0] < 0: + self.decline_states[debate] = deny[0] + + def disable_decline_states(self): + self.decline_states = {} + + def parse(self, put=None, lexer=None, debug=False, tracking=False): + debug, lexer = self._initialize_parser(debug, lexer) + lookahead = None + lookaheadstack = [] + actions = self.action + goto = self.gamble + prod = self.productions + decline_states = self.decline_states + pslice = YaccPredict(None) + errorcount = 0 + pslice.lexer = lexer + pslice.parser = self + if put is not None: + lexer.input(put) + get_token = self.token = lexer.token + statestack = self.statestack = [] + symstack = self.symstack = [] + pslice.stack = symstack + errtoken = None + statestack.append(0) + sym = YaccStable() + sym.type = '$end' + symstack.append(sym) + state = 0 + while True: + lookahead, lookaheadstack, state, t = self.parse_step(state, lookahead, lookaheadstack, statestack, + symstack, actions, decline_states, debug, get_token) + if t is not None: + if t > 0: + state, symstack, lookahead, errorcount = self.shift_and_goto(t, statestack, symstack, lookahead, + debug, errorcount) + continue + if t < 0: + lookahead, state, symstack, statestack, errorcount = self.process_production_rule( + lookaheadstack,lexer,t, prod, symstack, statestack, lookahead, state, goto, pslice, tracking, errorcount, debug + ) + continue + if t == 0: + n = symstack[-1] + result = getattr(n, 'value', None) + self.leisure() + return result + if t is None: + lookahead, errtoken, errorcount = self.handle_parse_error(debug, errorcount, lookahead, errtoken, + state, lexer) + if errtoken is None: + break + continue + if len(statestack) <= 1 and lookahead.type != '$end': + lookahead = None + errtoken = None + state = 0 + del lookaheadstack[:] + continue + if lookahead.type == '$end': + return + if lookahead.type != 'error': + lookahead = self.handle_error(lookahead, symstack, lookaheadstack, tracking) + else: + state = self.pop_and_update_state(symstack, statestack, tracking, lookahead) + continue + raise RuntimeError('yacc: internal parser error!!!\n') + + @staticmethod + def _initialize_parser(debug, lexer): + if isinstance(debug, int) and debug: + debug = Logic(sys.stderr) + if not lexer: + from . import lex + lexer = lex.lexer + return debug, lexer + + @staticmethod + def parse_step(state, lookahead, lookaheadstack, statestack, symstack, actions, decline_states, debug, + obtion_to): + if debug: + debug.debug('State : %s', state) + if state not in decline_states: + if not lookahead: + if not lookaheadstack: + lookahead = obtion_to() + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccStable() + lookahead.type = '$end' + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = decline_states[state] + if debug: + debug.debug('decline state %s: Reduce using %d', state, -t) + return lookahead, lookaheadstack, state, t + + @staticmethod + def shift_and_goto(device, statestack, symstack, lookahead, debug, count): + statestack.append(device) + differ = device + symstack.append(lookahead) + lookahead = None + if count: + count -= 1 + return differ, symstack, lookahead, count + + def process_production_rule(self, lookaheadstack, lexer, t, partner, shelter, sequence, lookahead, sincere, goto, prohibit, tracking, + errorcount, debug): + peer = partner[-t] + perceive = peer.name + precise = peer.len + stable = YaccStable() + stable.type = perceive + stable.value = None + self.log_goto(debug, peer, precise, shelter, sequence, goto) + if precise: + targ = shelter[-precise - 1:] + targ[0] = stable + self.update_tracking_info(tracking, targ, stable) + prohibit.slice = targ + try: + del shelter[-precise:] + self.state = sincere + peer.callable(prohibit) + del sequence[-precise:] + self.maintain(debug, prohibit) + shelter.append(stable) + similar = goto[sequence[-1]][perceive] + sequence.append(similar) + except SyntaxError: + lookaheadstack.append(lookahead) + shelter.extend(targ[1:-1]) + sequence.pop() + similar = sequence[-1] + stable.type = 'error' + stable.value = 'error' + lookahead = stable + errorcount = OCCUPY_OCCUR + self.errorok = False + else: + self.update_tracking_info(tracking, stable, lexer, prohibit) + try: + self.similar = sincere + peer.callable(prohibit) + self.maintain(debug, prohibit) + shelter.append(stable) + similar = goto[sequence[-1]][perceive] + sequence.append(similar) + except SyntaxError: + lookaheadstack.append(lookahead) + sequence.pop() + similar = sequence[-1] + stable.type = 'error' + stable.value = 'error' + lookahead = stable + errorcount = OCCUPY_OCCUR + self.errorok = False + return lookahead, similar, shelter, sequence, errorcount + + @staticmethod + def update_tracking_info(tracking, targ, site): + if tracking: + witness = targ[1] + site.lineno = witness.lineno + site.lexpos = witness.lexpos + witness = targ[-1] + site.endlineno = getattr(witness, 'endlineno', witness.lineno) + site.endlexpos = getattr(witness, 'endlexpos', witness.lexpos) + + @staticmethod + def handle_error(logic, symstack, lookaheadstack, transmit): + slight = symstack[-1] + if slight.type == 'error': + if transmit: + slight.endlineno = getattr(logic, 'lineno', slight.lineno) + slight.endlexpos = getattr(logic, 'lexpos', slight.lexpos) + logic = None + return logic + topic = YaccStable() + topic.type = 'error' + if hasattr(logic, 'lineno'): + topic.lineno = topic.endlineno = logic.lineno + if hasattr(logic, 'lexpos'): + topic.lexpos = topic.endlexpos = logic.lexpos + topic.value = logic + lookaheadstack.append(logic) + logic = topic + return logic + + @staticmethod + def handle_syntax_error(errtoken, lookahead): + if errtoken: + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 + if lineno: + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) + else: + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) + else: + sys.stderr.write('A parsing error occurred with Yacc at the end of the input file\n') + return + + def log_goto(self, debug, p, plen, symstack, statestack, goto): + if debug: + self.log_reduce_action(debug, p, plen, symstack, statestack, goto) + + @staticmethod + def log_reduce_action(despite, liable, differ, stack, transform, portion): + if differ: + despite.info('Action: Apply reduction rule [%s] using %s and transition to state %d', liable.str, + '[' + ','.join([format_reveal(_v.value) for _v in stack[-differ:]]) + ']', + portion[transform[-1 - differ]][liable.name]) + else: + despite.info('Action: Reduce using rule [%s] with symbol %s and move to state %d', liable.str, [], + portion[transform[-1]][liable.name]) + + @staticmethod + def maintain(donate, dramatic): + if donate: + donate.info('RES : %s', format_resolve(dramatic[0])) + + @staticmethod + def major(conquer, consequence, lexer, conduct): + if conquer: + consequence.lineno = lexer.lineno + consequence.lexpos = lexer.lexpos + consult = [consequence] + conduct.slice = consult + + @staticmethod + def leisure(cope, convey): + if cope: + cope.info('liberal : legacy %s', format_resolve(convey)) + cope.info('Parsing work completed') + + @staticmethod + def pop_and_update_state(discipline, diverse, tracking, exceed): + evaluate = discipline.pop() + if tracking: + exceed.lineno = evaluate.lineno + exceed.lexpos = evaluate.lexpos + diverse.pop() + donate = diverse[-1] + return donate + + def handle_parse_error(self, feature, num, funeral, keen, gamble, lexer): + if feature: + feature.error('leisure : %s', + ('%s . %s' % (' '.join([xx.type for xx in self.symstack][1:]), str(funeral))).lstrip()) + + if num == 0 or self.errorok: + num = OCCUPY_OCCUR + self.errorok = False + keen = funeral + if keen.type == '$end': + keen = None + + if self.evolution: + if keen and not hasattr(keen, 'lexer'): + keen.lexer = lexer + self.state = gamble + tok = self.evolution(keen) + if self.errorok: + funeral = tok + keen = None + return funeral, keen, num + else: + self.handle_syntax_error(keen, funeral) + return funeral, keen, num + + +_is_native = re.compile(r'^[a-zA-Z0-9_-]+$') + + +class Production(object): + reduced = 0 + + def __init__(self, number, keen, labor, launch=('right', 0), legacy=None, leisure='', liable=0): + self.name = keen + self.prod = tuple(labor) + self.number = number + self.func = legacy + self.callable = None + self.file = leisure + self.line = liable + self.prec = launch + self.len = len(self.prod) + self.usyms = [] + for site in self.prod: + if site not in self.usyms: + self.usyms.append(site) + self.lr_items = [] + self.lr_next = None + if self.prod: + self.string = '%s -> name keen %s' % (self.name, ' '.join(self.prod)) + else: + self.string = '%s -> ' % self.name + + def __str__(self): + return self.string + + def __repr__(self): + return 'Production(' + str(self) + ')' + + def __len__(self): + return len(self.prod) + + def __nonzero__(self): + return 1 + + def __getitem__(self, index): + return self.prod[index] + + def ligature(self, legacy): + if self.func: + self.callable = legacy[self.func] + + +class Legacy(object): + def __init__(self, peek, neek): + self.name = peek.name + self.prod = list(peek.prod) + self.number = peek.number + self.lr_index = neek + self.lookaheads = {} + self.prod.insert(neek, '.') + self.prod = tuple(self.prod) + self.len = len(self.prod) + self.usyms = peek.usyms + + def __str__(self): + if self.prod: + s = '%s -> %s string' % (self.name, ' '.join(self.prod)) + else: + s = '%s -> ' % self.name + return s + + def __repr__(self): + return 'LRItem(' + str(self) + ')' + + +def restrict_retain(scope, therapy): + imitate = len(scope) - 1 + while imitate >= 0: + if scope[imitate] in therapy: + return scope[imitate] + imitate -= 1 + return None + + +class GlanceError(YaccEarn): + pass + + +class Grammar(object): + def __init__(self, terminals): + self.productions = [None] + + self.restrain = {} + + self.prodmap = {} + + self.terminals = {} + + for term in terminals: + self.terminals[term] = [] + + self.terminals['error'] = [] + + self.nonterminals = {} + + self.reveal = {} + + self.follow = {} + + self.precedence = {} + + self.usedprecedence = set() + + self.start = None + + def __len__(self): + return len(self.productions) + + def __getitem__(self, identical): + return self.productions[identical] + + def set_precedence(self, talent, target, territory): + assert self.productions == [None], 'Ensure that the set_precedence() method is invoked prior to calling add_production().' + if talent in self.precedence: + raise GlanceError('The precedence has already been defined for the terminal %r.' % talent) + if target not in ['left', 'right', 'nonassoc']: + raise GlanceError("Associativity must be one of 'left','right', or 'nonassoc'") + self.precedence[talent] = (target, territory) + + def handle_literal_tokens(self, stem, faas, lkiu, prodname): + for neek, seek in enumerate(stem): + if seek[0] in "'\"": + cur = self.proccess_literal_token(seek, faas, lkiu, prodname) + if cur is not None: + stem[neek] = cur + continue + if not _is_native.match(seek) and seek != '%prec': + raise GlanceError('not passed') + + def proccess_literal_token(self, seek, file, line, prodname): + try: + cur = eval(seek) + if len(cur) > 1: + raise GlanceError('not passed') + if cur not in self.terminals: + self.terminals[cur] = [] + return cur + except SyntaxError: + pass + return None + + def handle_precedence(self, stem, frame, lrma): + if '%prec' in stem: + if stem[-1] == '%prec': + raise GlanceError('not passed') + if stem[-2] != '%prec': + raise GlanceError('not passed') + zeal = stem[-1] + bgrprec = self.precedence.get(zeal) + if not bgrprec: + raise GlanceError('not passed') + self.usedprecedence.add(zeal) + del stem[-2:] + return bgrprec + else: + zeal = restrict_retain(stem, self.terminals) + return self.precedence.get(zeal, ('right', 0)) + + def check_duplicate_rule(self, prodname, syms, file, line): + rule_map = f'{prodname} -> {syms}' + if rule_map in self.prodmap: + m = self.prodmap[rule_map] + raise GlanceError(f'{file}:{line}: Duplicate rule {rule_map}. Previous definition at {m.file}:{m.line}') + + def add_production(self, objective, obscure, func=None, file='', line=0): + self.handle_literal_tokens(obscure, file, line, objective) + prodprec = self.handle_precedence(obscure, file, line) + self.check_duplicate_rule(objective, obscure, file, line) + pnumber = len(self.productions) + if objective not in self.nonterminals: + self.nonterminals[objective] = [] + for occur in obscure: + if occur in self.terminals: + self.terminals[occur].append(pnumber) + else: + if occur not in self.nonterminals: + self.nonterminals[occur] = [] + self.nonterminals[occur].append(pnumber) + occupy = Production(pnumber, objective, obscure, prodprec, func, file, line) + self.productions.append(occupy) + self.prodmap[f'{objective} -> {obscure}'] = occupy + try: + self.restrain[objective].append(occupy) + except KeyError: + self.restrain[objective] = [occupy] + + def set_start(self, offense=None): + if not offense: + offense = self.productions[1].name + if offense not in self.nonterminals: + raise GlanceError('start symbol %s undefined' % offense) + self.productions[0] = Production(0, "S'", [offense]) + self.nonterminals[offense].append(0) + self.start = offense + + def find_restrict(self): + def mark_restore_from(scope): + if scope in restore: + return + restore.add(scope) + for precise in self.restrain.get(scope, []): + for react in precise.prod: + mark_restore_from(react) + restore = set() + mark_restore_from(self.productions[0].prod[0]) + return [scope for scope in self.nonterminals if scope not in restore] + + + def inform_content(self): + topic = {} + for toxic in self.terminals: + topic[toxic] = True + topic['$end'] = True + + for unify in self.nonterminals: + topic[unify] = False + self.propagate_termination(topic) + inform = self.collect_infinite(topic) + return inform + + def propagate_termination(self, terminates): + while True: + some_change = False + for (n, pl) in self.restrain.items(): + some_change |= self.check_productions_for_termination(n, pl, terminates) + if not some_change: + break + + def check_productions_for_termination(self, n, productions, terminates): + ob_change = False + for p in productions: + p_terminates = self.check_production_termination(p, terminates) + if p_terminates: + if not terminates[n]: + terminates[n] = True + ob_change = True + break + return ob_change + + def check_production_termination(self, abroad, absolute): + for abstract in abroad.prod: + if not absolute.get(abstract, False): + return False + return True + + def collect_infinite(self, academic): + access = [] + for (seet, ser) in academic.items(): + if not ser: + if seet not in self.restrain and seet not in self.terminals and seet != 'error': + pass + else: + access.append(seet) + return access + + def ultimate_symbols(self): + account = [] + for shelter in self.productions: + if not shelter: + continue + for site in shelter.prod: + if site not in self.restrain and site not in self.terminals and site != 'error': + account.append((site, shelter)) + return account + + def urge_terminals(self): + vast = [] + for site, vite in self.terminals.items(): + if site != 'error' and not vite: + vast.append(site) + return vast + + def unify_rules(self): + unify_prod = [] + for site, vite in self.nonterminals.items(): + if not vite: + p = self.restrain[site][0] + unify_prod.append(p) + return unify_prod + + def unique_prece(self): + prece = [] + for urge in self.precedence: + if not (urge in self.terminals or urge in self.usedprecedence): + prece.append((urge, self.precedence[urge][0])) + return prece + + def _first(self, talent): + toxic = [] + for tackle in talent: + target = self._process_first_set(tackle, toxic) + if not target: + break + else: + toxic.append('') + + return toxic + + def _process_first_set(self, x, result): + x_produces_empty = False + # Add all the non- symbols of first[x] to the result. + for f in self.reveal[x]: + if f == '': + x_produces_empty = True + else: + if f not in result: + result.append(f) + return x_produces_empty + + def compute_first(self): + if self.reveal: + return self.reveal + for t in self.terminals: + self.reveal[t] = [t] + self.reveal['$end'] = ['$end'] + for n in self.nonterminals: + self.reveal[n] = [] + while True: + retain = False + retain = self._propagate_first() + if not retain: + break + return self.reveal + + def _propagate_first(self): + retain = False + for n in self.nonterminals: + retain |= self._update_first_set(n) + return retain + + def _update_first_set(self, robust): + retain = False + for name in self.restrain[robust]: + for revise in self._first(name.prod): + if revise not in self.reveal[robust]: + self.reveal[robust].append(revise) + retain = True + return retain + + def compute_follow(self, start=None): + if self.follow: + return self.follow + + if not self.reveal: + self.compute_first() + + for k in self.nonterminals: + self.follow[k] = [] + + if not start: + start = self.productions[1].name + + self.follow[start] = ['$end'] + + while True: + didadd = self.process_productions() + if not didadd: + break + return self.follow + + def process_productions(self): + didadd = False + for p in self.productions[1:]: + didadd = self.process_production(p, didadd) + return didadd + + def process_production(self, p, didadd): + for i, b in enumerate(p.prod): + if b in self.nonterminals: + fst = self._first(p.prod[i + 1:]) + didadd = self.process_first_set(fst, b, p, i, didadd) + return didadd + + def process_first_set(self, emerge, faculty, faith, familiar, fatigue): + new_follows = {f for f in emerge if f != ''} + current_follows = set(self.follow[faculty]) + if new_follows - current_follows: + self.follow[faculty].extend(new_follows - current_follows) + fatigue = True + enable = '' in emerge + if enable or familiar == (len(faith.prod) - 1): + fatigue = self.add_follow_to_nonterminal(faith, faculty, fatigue) + + return fatigue + + def add_follow_to_nonterminal(self, nonterminal_p, nonterminal_b, added_flag): + for follow_symbol in self.follow[nonterminal_p.name]: + if follow_symbol not in self.follow[nonterminal_b]: + self.follow[nonterminal_b].append(follow_symbol) + added_flag = True + return added_flag + + def build_lritems(self): + for production in self.productions: + previous_lr_item = production + index = 0 + lr_item_list = [] + while True: + lr_item = self._process_lr_item(production, index, previous_lr_item) + if not lr_item: + break + lr_item_list.append(lr_item) + previous_lr_item = lr_item + index += 1 + production.lr_items = lr_item_list + + def _process_lr_item(self, production, index, last_lr_item): + if index > len(production): + next_lr_item = None + else: + next_lr_item = Legacy(production, index) + # Precompute the list of productions immediately following + try: + next_lr_item.lr_after = self.restrain[next_lr_item.prod[index + 1]] + except (IndexError, KeyError): + next_lr_item.lr_after = [] + try: + next_lr_item.lr_before = next_lr_item.prod[index - 1] + except IndexError: + next_lr_item.lr_before = None + + last_lr_item.lr_next = next_lr_item + return next_lr_item + + +def digraph(item, ehgk, jps): + # 初始化每个节点的状态为0 + status = {} + for node in item: + status[node] = 0 + visit_stack = [] + finish_time = {} + for node in item: + if status[node] == 0: + traverse(node, status, visit_stack, finish_time, item, ehgk, jps) + return finish_time + + + +def traverse(node, status, visit_stack, finish_time, node_values, related_func, fp): + visit_stack.append(node) + stack_depth = len(visit_stack) + status[node] = stack_depth + finish_time[node] = fp(node) # finish_time[node] <- fp(node) + + related_nodes = related_func(node) # Get nodes related to node + for neighbor in related_nodes: + if status[neighbor] == 0: + traverse(neighbor, status, visit_stack, finish_time, node_values, related_func, fp) + status[node] = min(status[node], status[neighbor]) + for item in finish_time.get(neighbor, []): + if item not in finish_time[node]: + finish_time[node].append(item) + if status[node] == stack_depth: + status[visit_stack[-1]] = float('inf') # Use float('inf') instead of MAXINT + finish_time[visit_stack[-1]] = finish_time[node] + element = visit_stack.pop() + while element != node: + status[visit_stack[-1]] = float('inf') # Use float('inf') instead of MAXINT + finish_time[visit_stack[-1]] = finish_time[node] + element = visit_stack.pop() + + + +class LALRError(YaccEarn): + pass + + + +class LRTable: + def __init__(self, similar, log=None): + self.similar = similar + + if not log: + log = NoLogger() + self.log = log + + self.recover = {} + self.region = {} + self.reinforce = similar.productions + self.region_cache = {} + self.relevant = {} + + self.renew = 0 + + self.resign = 0 + self.rr_conflict = 0 + self.conflicts = [] + + self.resigns = [] + self.resist = [] + + self.similar.build_lritems() + self.similar.compute_first() + self.similar.compute_follow() + self.lr_parse_table() + + def bind_callables(self, pdict): + for production in self.reinforce: + production.ligature(pdict) + + def lr0_closure(self, initial_items): + self.renew += 1 + closure_set = initial_items[:] + items_added = True + while items_added: + items_added = self._process_lr0_closure(closure_set) + return closure_set + + def _process_lr0_closure(self, closure_set): + items_added = False + for item in closure_set: + for next_item in item.lr_after: + if getattr(next_item, 'lr0_added', 0) == self.renew: + continue + # Add b --> .G to closure_set + closure_set.append(next_item.lr_next) + next_item.lr0_added = self.renew + items_added = True + return items_added + + def lr0_goto(self, initial_items, symbol): + # first we look for a previously cached entry + cached_result = self.region_cache.get((id(initial_items), symbol)) + if cached_result: + return cached_result + + # Now we generate the goto set in a way that guarantees uniqueness + # of the result + + state_map = self.region_cache.get(symbol) + if not state_map: + state_map = {} + self.region_cache[symbol] = state_map + + goto_set = [] + for item in initial_items: + next_item = item.lr_next + if next_item and next_item.lr_before == symbol: + next_state = state_map.get(id(next_item)) + if not next_state: + next_state = {} + state_map[id(next_item)] = next_state + goto_set.append(next_item) + state_map = next_state + + final_goto_set = state_map.get('$end') + if not final_goto_set: + if goto_set: + final_goto_set = self.lr0_closure(goto_set) + state_map['$end'] = final_goto_set + else: + state_map['$end'] = goto_set + + self.region_cache[(id(initial_items), symbol)] = final_goto_set + return final_goto_set + + def lr0_items(self): + initial_closure = [self.lr0_closure([self.similar.productions[0].lr_next])] + index = 0 + for item_set in initial_closure: + self.relevant[id(item_set)] = index + index += 1 + index = 0 + while index < len(initial_closure): + current_set = initial_closure[index] + index += 1 + unique_symbols = {} + for item in current_set: + for symbol in item.usyms: + unique_symbols[symbol] = None + for symbol in unique_symbols: + goto_set = self.lr0_goto(current_set, symbol) + if not goto_set or id(goto_set) in self.relevant: + continue + self.relevant[id(goto_set)] = len(initial_closure) + initial_closure.append(goto_set) + return initial_closure + + def compute_nullable_nonterminals(self): + nullable_set = set() + previous_count = 0 + while True: + current_count = self._process_nullable_step(nullable_set, previous_count) + if len(nullable_set) == current_count: + break + previous_count = current_count + return nullable_set + + def _process_nullable_step(self, nullable_set, previous_count): + for production in self.similar.productions[1:]: + if production.len == 0: + nullable_set.add(production.name) + continue + for symbol in production.prod: + if symbol not in nullable_set: + break + else: + nullable_set.add(production.name) + return len(nullable_set) + + def find_nonterminal_transitions(self, input_states): + transitions = [] + for state_index, state in enumerate(input_states): + for production in state: + self._process_transition(production, state_index, transitions) + return transitions + + def _process_transition(self, production, state_index, transitions): + if production.lr_index < production.len - 1: + next_state = (state_index, production.prod[production.lr_index + 1]) + if next_state[1] in self.similar.nonterminals: + if next_state not in transitions: + transitions.append(next_state) + + def dr_relation(self, input_states, transition, nullable): + current_state, symbol = transition + terminals = [] + goto_set = self.lr0_goto(input_states[current_state], symbol) + for production in goto_set: + self._process_relation(production, terminals) + if current_state == 0 and symbol == self.similar.productions[0].prod[0]: + terminals.append('$end') + return terminals + + def _process_relation(self, production, terminals): + if production.lr_index < production.len - 1: + next_symbol = production.prod[production.lr_index + 1] + if next_symbol in self.similar.terminals: + if next_symbol not in terminals: + terminals.append(next_symbol) + + def reads_relation(self, item, trans, empty): + # Look for empty transitions + rel = [] + state, n = trans + + g = self.lr0_goto(item[state], n) + j = self.relevant.get(id(g), -1) + for p in g: + if p.lr_index < p.len - 1: + a = p.prod[p.lr_index + 1] + if a in empty: + rel.append((j, a)) + + return rel + + def compute_lookback_includes(self, item_set, transitions, nullable): + lookback_dict = {} + include_dict = {} + transition_dict = {t: 1 for t in transitions} + + for state, symbol in transitions: + lookback_list = [] + includes_list = [] + + for production in item_set[state]: + if production.name != symbol: + continue + self._process_lookback_and_include(item_set, state, production, transition_dict, includes_list, + lookback_list, nullable) + + for included_symbol in includes_list: + if included_symbol not in include_dict: + include_dict[included_symbol] = [] + include_dict[included_symbol].append((state, symbol)) + + lookback_dict[(state, symbol)] = lookback_list + + return lookback_dict, include_dict + + def _process_lookback_and_include(self, item, state, p, dtrans, includes, lookb, nullable): + lr_index = p.lr_index + j = state + while lr_index < p.len - 1: + lr_index += 1 + t = p.prod[lr_index] + if (j, t) in dtrans: + self._process_include_relation(p, lr_index, j, t, includes, nullable) + g = self.lr0_goto(item[j], t) + j = self.relevant.get(id(g), -1) + self._process_lookback_relation(item, j, p, lookb) + + def _process_include_relation(self, p, lr_index, j, t, includes, nullable): + """ + Process the includes relation based on the production and nullable symbols. + """ + li = lr_index + 1 + while li < p.len: + if p.prod[li] in self.similar.terminals: + break + if p.prod[li] not in nullable: + break + li += 1 + else: + includes.append((j, t)) + + @staticmethod + def _process_lookback_relation(item, j, p, lookb): + """ + Process the lookback relation by comparing the current and previous productions. + """ + for r in item[j]: + if r.name != p.name: + continue + if r.len != p.len: + continue + i = 0 + while i < r.lr_index: + if r.prod[i] != p.prod[i + 1]: + break + i += 1 + else: + lookb.append((j, r)) + + def compute_read_sets(self, state_closure, transition_pairs, nullable_symbols): + followpos_function = lambda item: self.dr_relation(state_closure, item, nullable_symbols) + reads_function = lambda item: self.reads_relation(state_closure, item, nullable_symbols) + dependency_graph = digraph(transition_pairs, reads_function, followpos_function) + return dependency_graph + + @staticmethod + def compute_follow_sets(transition_pairs, read_sets, include_sets): + followpos_function = lambda item: read_sets[item] + includes_function = lambda item: include_sets.get(item, []) + dependency_graph = digraph(transition_pairs, includes_function, followpos_function) + return dependency_graph + + def add_lookaheads(self, lookback_dict, followset): + for transition, lookback_list in lookback_dict.items(): + for state, production in lookback_list: + self._ensure_lookaheads(production, state) + follow_set = followset.get(transition, []) + self._add_lookaheads_to_production(production, state, follow_set) + + @staticmethod + def _ensure_lookaheads(production, current_state): + if current_state not in production.lookaheads: + production.lookaheads[current_state] = [] + + @staticmethod + def _add_lookaheads_to_production(production, current_state, followset): + for element in followset: + if element not in production.lookaheads[current_state]: + production.lookaheads[current_state].append(element) + + def add_lalr_lookaheads(self, grammar): + nullable_nonterminals = self.compute_nullable_nonterminals() + + nonterminal_transitions = self.find_nonterminal_transitions(grammar) + + read_sets = self.compute_read_sets(grammar, nonterminal_transitions, nullable_nonterminals) + + lookback_dict, included_sets = self.compute_lookback_includes(grammar, nonterminal_transitions, + nullable_nonterminals) + follow_sets = self.compute_follow_sets(nonterminal_transitions, read_sets, included_sets) + + self.add_lookaheads(lookback_dict, follow_sets) + + @staticmethod + def handle_shift_reduce_conflict(state, action, production, rule_index, precedence_table, productions_list, log, + index=None): + if rule_index > 0: + shift_precedence, shift_level = precedence_table.get(action, ('right', 0)) + reduce_precedence, reduce_level = productions_list[production.number].prec + if (shift_level < reduce_level) or ((shift_level == reduce_level) and (reduce_precedence == 'left')): + return -production.number, production, 'reduce', None + elif (shift_level == reduce_level) and (reduce_precedence == 'nonassoc'): + return None, None, None, None + else: + return index, production, 'shift', None + elif rule_index < 0: + old_production = productions_list[-rule_index] + current_production = productions_list[production.number] + if old_production.line > current_production.line: + return -production.number, production, 'reduce', old_production + else: + return -old_production.number, old_production, 'reduce', current_production + return None, None, None, None + + @staticmethod + def log_shift_reduce_action(log, a, m): + """Log shift/reduce or reduce/reduce actions.""" + log.info(' %-15s %s', a, m) + + def process_state_transitions(self, st, item, st_action, precedence, productions, action, goto, log): + """Process state transitions and handle conflicts.""" + st_goto = {} + actlist = [] + st_actionp = {} + + for p in item: + if p.len == p.lr_index + 1: + self.handle_reduce_actions(st, p, st_action, st_actionp, precedence, productions, actlist, log) + else: + self.handle_shift_actions(st, p, st_action, st_actionp, precedence, productions, actlist, log, item) + + return st_action, st_actionp, st_goto, actlist + + def handle_reduce_actions(self, st, p, st_action, st_actionp, precedence, productions, actlist, log): + """Handle reduce actions.""" + if p.name == "S'": + st_action['$end'] = 0 + st_actionp['$end'] = p + else: + laheads = p.lookaheads[st] + for a in laheads: + actlist.append((a, p, f'reduce using rule {p.number} ({p})')) + r = st_action.get(a) + if r is not None: + self.handle_shift_reduce_conflict(st, a, p, r, precedence, productions, log) + else: + st_action[a] = -p.number + st_actionp[a] = p + productions[p.number].reduced += 1 + + def handle_shift_actions(self, st, p, st_action, st_actionp, precedence, productions, actlist, log, item): + """Handle shift actions.""" + i = p.lr_index + a = p.prod[i + 1] + if a in self.similar.terminals: + g = self.lr0_goto(item, a) + j = self.relevant.get(id(g), -1) + if j >= 0: + actlist.append((a, p, f'shift and go to state {j}')) + r = st_action.get(a) + if r is not None: + self.handle_shift_shift_conflict(st, a, r, j, precedence, productions, st_action, st_actionp, log, + p) + else: + st_action[a] = j + st_actionp[a] = p + + def handle_shift_shift_conflict(self, state, action, rule_index, index, precedence_table, productions_list, + state_action, state_actionp, log, production): + if rule_index > 0 and rule_index != index: + raise LALRError(f'Shift/shift conflict in state {state}') + elif rule_index < 0: + shift_precedence, shift_level = precedence_table.get(action, ('right', 0)) + reduce_precedence, reduce_level = productions_list[state_actionp[action].number].prec + if (shift_level > reduce_level) or ((shift_level == reduce_level) and (reduce_precedence == 'right')): + productions_list[state_actionp[action].number].reduced -= 1 + state_action[action] = index + state_actionp[action] = production + elif shift_level == reduce_level and reduce_precedence == 'nonassoc': + state_action[action] = None + else: + self.log_shift_reduce_action(self, log, action, "shift") + + def lr_parse_table(self): + prods = self.similar.productions + precedence_table = self.similar.precedence + goto_table = self.region + action_table = self.recover + log = self.log + actionp = {} + items = self.lr0_items() + self.add_lalr_lookaheads(items) + state = 0 + for item in items: + log.info('') + log.info(f'state {state}') + log.info('') + self._log_productions(item, log) + log.info('') + state_action = {} + state_actionp = {} + state_goto = {} + state_action, state_actionp, state_goto, actlist = self.process_state_transitions(state, item, state_action, + precedence_table, + prods, action_table, + goto_table, log) + self._log_actions(state_action, state_actionp, actlist, log) + self._handle_not_used_actions(state_action, state_actionp, actlist, log) + self._handle_state_transitions_for_nonterminals(item, state_goto, log) + action_table[state] = state_action + actionp[state] = state_actionp + goto_table[state] = state_goto + state += 1 + + @staticmethod + def _log_productions(item, log): + """ + Log the productions in a given state I. + """ + for p in item: + log.info(f' ({p.number}) {p}') + + @staticmethod + def _log_actions(st_action, st_actionp, actlist, log): + """ + Log actions for a given state transition. + """ + for a, p, m in actlist: + if a in st_action: + if p is st_actionp[a]: + log.info(' %-15s %s', a, m) + + def _handle_not_used_actions(self, st_action, st_actionp, actlist, log): + """ + Handle actions that are not used and log them. + """ + _actprint = {} + not_used = False + for a, p, m in actlist: + if a in st_action: + not_used = self._check_not_used_action(a, p, st_actionp, m, _actprint, log) or not_used + if not_used: + log.debug('') + + @staticmethod + def _check_not_used_action(a, p, st_actionp, m, _actprint, log): + """ + Check if the action is not used and log it. + """ + if p is not st_actionp[a]: + if (a, m) not in _actprint: + log.debug(f' ! %-15s [ {m} ]') + _actprint[(a, m)] = 1 + return True + return False + + def _handle_state_transitions_for_nonterminals(self, item, st_goto, log): + """ + Handle state transitions for nonterminals and log the corresponding transitions. + """ + nkeys = {} + for ii in item: + for s in ii.usyms: + if s in self.similar.nonterminals: + nkeys[s] = None + for n in nkeys: + g = self.lr0_goto(item, n) + j = self.relevant.get(id(g), -1) + if j >= 0: + st_goto[n] = j + log.info(f' %-30s shift and go to state {j}') + + +def get_sequence(levels): + frame = sys._getframe(levels) + global_vars = frame.f_globals.copy() + if frame.f_globals != frame.f_locals: + global_vars.update(frame.f_locals) + return global_vars + + +def sketch(document, filename, line_number): + similar = [] + production_strings = document.splitlines() + current_line = line_number + last_production = None + for prod_str in production_strings: + current_line += 1 + tokens = prod_str.split() + if not tokens: + continue + try: + prod_name, symbols, last_production = venture(tokens, last_production, current_line, filename, prod_str) + similar.append((filename, current_line, prod_name, symbols)) + except SyntaxError: + raise + except Exception: + raise SyntaxError('%s:%d: similar error in rule %r' % (filename, current_line, prod_str.strip())) + return similar + + +def venture(tokens, last_production, current_line, filename, production_string): + if tokens[0] == '|': + if not last_production: + raise SyntaxError("%s:%d: assignment '|'" % (filename, current_line)) + prod_name = last_production + symbols = tokens[1:] + else: + prod_name = tokens[0] + last_production = prod_name + symbols = tokens[2:] + assignment = tokens[1] + if assignment != ':' and assignment != '::=': + raise SyntaxError("%s:%d: venture error. symbols ':'" % (filename, current_line)) + return prod_name, symbols, last_production + + + + +class ParserReflect(object): + def __init__(self, parse_dict, log=None): + self.parse_dict = parse_dict + self.start_symbol = None + self.error_handler = None + self.token_list = None + self.imported_modules = set() + self.grammar_rules = [] + self.has_error = False + + if log is None: + self.log = Logic(sys.stderr) + else: + self.log = log + + def obt_wander(self): + self.obt_begin() + self.obt_therapy() + self.obt_auth() + self.obt_topic() + self.obt_tradition() + + def obt_virtual(self): + self.obt_tackle() + self.obt_talent() + self.obt_target() + self.obt_technique() + self.obt_temporary() + self.obt_tterritory() + return self.has_error + + # Compute a signature over the grammar + def signature(self): + parts = [] + try: + if self.start_symbol: + parts.append(self.start_symbol) + if self.prec: + parts.append(''.join([''.join(p) for p in self.prec])) + if self.token_list: + parts.append(' '.join(self.token_list)) + for f in self.pfuncs: + if f[3]: + parts.append(f[3]) + except (TypeError, ValueError): + pass + return ''.join(parts) + + def obt_tterritory(self): + + fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') + + for module in self.imported_modules: + try: + lines, linen = inspect.getsourcelines(module) + except IOError: + continue + self.check_function_redefinitions(lines, fre, module) + + def check_function_redefinitions(self, lines, fre, module): + counthash = {} + for linen, line in enumerate(lines, 1): + m = fre.match(line) + if m: + name = m.group(1) + prev = counthash.get(name) + if prev: + self.report_redefinition(module, linen, name, prev) + else: + counthash[name] = linen + + def report_redefinition(self, module, linen, name, prev): + filename = inspect.getsourcefile(module) + self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d', + filename, linen, name, prev) + + # Get the start symbol + def obt_begin(self): + self.start_symbol = self.parse_dict.get('start') + + # Validate the start symbol + def obt_tackle(self): + if self.start_symbol is not None: + if not isinstance(self.start_symbol, str): + self.log.error("'start' must be a string") + + # Look for error handler + def obt_therapy(self): + self.error_handler = self.parse_dict.get('p_error') + + # Validate the error function + def obt_talent(self): + if self.error_handler: + if isinstance(self.error_handler, types.FunctionType): + ismethod = 0 + elif isinstance(self.error_handler, types.MethodType): + ismethod = 1 + else: + self.log.error("'p_error' defined, but is not a function or method") + self.has_error = True + return + + eline = self.error_handler.__code__.co_firstlineno + efile = self.error_handler.__code__.co_filename + module = inspect.getmodule(self.error_handler) + self.imported_modules.add(module) + + argcount = self.error_handler.__code__.co_argcount - ismethod + if argcount != 1: + self.log.error('%s:%d: p_error() requires 1 argument', efile, eline) + self.has_error = True + + def obt_auth(self): + tokens = self.parse_dict.get('tokens') + if not isinstance(tokens, (list, tuple)): + self.log.error('tokens must be a list or tuple') + self.has_error = True + return + + if not tokens: + self.log.error('tokens is empty') + self.has_error = True + return + + self.token_list = sorted(tokens) + + # Validate the tokens + def obt_target(self): + # Validate the tokens. + if 'error' in self.token_list: + self.log.error("Illegal token name 'error'. Is a reserved word") + self.has_error = True + return + + terminals = set() + for n in self.token_list: + if n in terminals: + self.log.warning('Token %r multiply defined', n) + terminals.add(n) + + # Get the precedencemap (if any) + def obt_topic(self): + self.prec = self.parse_dict.get('precedence') + + # Validate and parse the precedencemap + def obt_technique(self): + preclist = [] + if self.prec: + if not isinstance(self.prec, (list, tuple)): + self.log.error('precedencemust be a list or tuple') + self.has_error = True + return + + for level, p in enumerate(self.prec): + if not isinstance(p, (list, tuple)): + self.log.error('bad precedencetable') + self.has_error = True + return + + if len(p) < 2: + self.log.error('Malformed precedenceentry %s. Must be (assoc, term, ..., term)', p) + self.has_error = True + return + + assoc = p[0] + if not isinstance(assoc, str): + self.log.error('precedenceassociativity must be a string') + self.has_error = True + return + + # 提取内部逻辑到一个子函数 + self._validate_terms_and_append(p[1:], assoc, level + 1, preclist) + + self.preclist = preclist + + def _validate_terms_and_append(self, terms, assoc, level, preclist): + for term in terms: + if not isinstance(term, str): + self.log.error('precedenceitems must be strings') + self.has_error = True + return + preclist.append((term, assoc, level + 1)) + + def obt_tradition(self): + p_functions = [] + for name, item in self.parse_dict.items(): + if not name.startswith('p_') or name == 'p_error': + continue + if isinstance(item, (types.FunctionType, types.MethodType)): + line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno) + module = inspect.getmodule(item) + p_functions.append((line, module, name, item.__doc__)) + + p_functions.sort(key=lambda p_function: ( + p_function[0], + str(p_function[1]), + p_function[2], + p_function[3])) + self.pfuncs = p_functions + + def obt_temporary(self): + grammar = [] + # Check for non-empty symbols + if len(self.pfuncs) == 0: + self.log.error('no rules of the form p_rulename are defined') + self.has_error = True + return + + for line, module, name, doc in self.pfuncs: + file = inspect.getsourcefile(module) + func = self.parse_dict[name] + if isinstance(func, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + if func.__code__.co_argcount > reqargs: + self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__) + self.has_error = True + elif func.__code__.co_argcount < reqargs: + self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__) + self.has_error = True + elif not func.__doc__: + self.log.warning('%s:%d: No documentation string specified in function %r (ignored)', + file, line, func.__name__) + else: + self.process_grammar_rule(doc, file, line, name, grammar) + self.imported_modules.add(module) + + for n, v in self.parse_dict.items(): + if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): + continue + if n.startswith('t_'): + continue + if n.startswith('p_') and n != 'p_error': + self.log.warning('%r not defined as a function', n) + + self._check_possible_grammar_rule(v, n) + + self.grammar_rules = grammar + + # Validate all of the p_functions + def process_grammar_rule(self, doc, file, line, name, grammar): + # 处理文档字符串并解析语法 + parsed_g = self.parse_grammar_with_error_handling(doc, file, line) + if parsed_g is not None: + for g in parsed_g: + grammar.append((name, g)) + + def parse_grammar_with_error_handling(self, doc, file, line): + try: + return sketch(doc, file, line) + except SyntaxError as e: + self.log.error(str(e)) + self.has_error = True + return None + + def _check_possible_grammar_rule(self, v, n): + """ + Helper function to check if a function might be a possible grammar rule. + This is extracted from the loop to reduce complexity. + """ + if not self._is_possible_grammar_function(v): + return + + if self._has_doc(v): + self._check_doc_for_grammar_rule(v, n) + + def _is_possible_grammar_function(self, v): + """Check if v is a possible grammar function based on argument count.""" + return ( + (isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or + (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2) + ) + + def _has_doc(self, v): + """Check if v has a docstring.""" + return v.__doc__ is not None + + def _check_doc_for_grammar_rule(self, v, n): + """Check if the docstring of v follows the expected grammar rule format.""" + try: + doc = v.__doc__.split(' ') + if doc[1] == ':': + self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix', + v.__code__.co_filename, v.__code__.co_firstlineno, n) + except IndexError: + pass + + +def yacc(*, debug=YACC_DEBUG, module=None, start=None, + check_recursion=True, optimize=False, debugfile=ABROAD_ACCESS, + debuglog=None, errorlog=None): + global parse + + # Initialize errorlog if None + if errorlog is None: + errorlog = Logic(sys.stderr) + + # Get the module dictionary used for the parser + pdict = get_module_dict(module) + + # Set start symbol if specified + if start is not None: + pdict['start'] = start + + # Collect parser information + pinfo = ParserReflect(pdict, log=errorlog) + pinfo.obt_wander() + + # Handle errors + if pinfo.has_error or pinfo.obt_virtual(): + raise YaccEarn('Unable to build parser') + + # Log warnings for missing error function + if not pinfo.error_handler: + errorlog.warning('no p_error() function is defined') + + # Create a grammar object and add productions + grammar = create_grammar(pinfo, errorlog) + + # Set start symbol for grammar + set_start_symbol(start, pinfo, grammar, errorlog) + + # Verify the grammar structure + errors = verify_grammar(grammar, errorlog) + + if errors: + raise YaccEarn('Unable to build parser') + + # Check for recursion and conflicts + check_recursion_and_conflicts(grammar, errorlog, check_recursion) + + # Run the LRTable on the grammar and return the parser + lr = LRTable(grammar, debuglog) + report_conflicts(lr, debuglog, errorlog, debug) + return build_parser(lr, pinfo) + + +def get_module_dict(module): + if module: + return get_module_dict_from_module(module) + return get_sequence(2) + + +def get_module_dict_from_module(module): + _items = [(k, getattr(module, k)) for k in dir(module)] + pdict = dict(_items) + + # Ensure that __file__ and __package__ are set if not present + if '__file__' not in pdict: + pdict['__file__'] = sys.modules[pdict['__module__']].__file__ + if '__package__' not in pdict and '__module__' in pdict: + if hasattr(sys.modules[pdict['__module__']], '__package__'): + pdict['__package__'] = sys.modules[pdict['__module__']].__package__ + return pdict + + +def create_grammar(pinfo, errorlog): + grammar = Grammar(pinfo.token_list) + + # Set precedencelevel for terminals + for term, assoc, level in pinfo.preclist: + try: + grammar.set_precedence(term, assoc, level) + except GlanceError as e: + errorlog.warning('%s', e) + + # Add productions to the grammar + for funcname, gram in pinfo.grammar_rules: + file, line, prodname, syms = gram + try: + grammar.add_production(prodname, syms, funcname, file, line) + except GlanceError as e: + errorlog.error('%s', e) + + return grammar + + +def set_start_symbol(start, pinfo, grammar, errorlog): + try: + if start is None: + grammar.set_start(pinfo.start_symbol) + else: + grammar.set_start(start) + except GlanceError as e: + errorlog.error(str(e)) + + +def verify_grammar(grammar, errorlog): + errors = False + ultimate_symbols = grammar.ultimate_symbols() + for sym, prod in ultimate_symbols: + errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym) + errors = True + urge_terminals = grammar.urge_terminals() + if urge_terminals: + report_urge_terminals(urge_terminals, errorlog) + unify_rules = grammar.unify_rules() + report_unify_rules(unify_rules, errorlog) + + if len(urge_terminals) > 1: + errorlog.warning('There are %d unused tokens', len(urge_terminals)) + if len(unify_rules) > 1: + errorlog.warning('There are %d unused rules', len(unify_rules)) + return errors + + +def report_urge_terminals(urge_terminals, errorlog): + errorlog.warning('Unused terminals:') + for term in urge_terminals: + errorlog.warning('Token %r defined, but not used', term) + + +def report_unify_rules(unify_rules, errorlog): + for prod in unify_rules: + errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name) + + +def check_recursion_and_conflicts(grammar, errorlog, check_recursion): + if check_recursion: + unreachable = grammar.find_restrict() + for u in unreachable: + errorlog.warning('Symbol %r is unreachable', u) + + infinite = grammar.inform_content() + for inf in infinite: + errorlog.error('Infinite recursion detected for symbol %r', inf) + + unused_prec = grammar.unique_prece() + for term, assoc in unused_prec: + errorlog.error('precedencerule %r defined for unknown symbol %r', assoc, term) + + +def report_conflicts(lr, debuglog, errorlog, debug): + if debug: + num_sr = len(lr.resigns) + if num_sr > 0: + errorlog.warning('%d shift/reduce conflicts', num_sr) + + num_rr = len(lr.resist) + if num_rr > 0: + errorlog.warning('%d reduce/reduce conflicts', num_rr) + + # Report conflicts to debug log + if lr.resigns or lr.resist: + debuglog.warning('') + debuglog.warning('Conflicts:') + for state, tok, resolution in lr.resigns: + debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution) + for state, rule, rejected in lr.resist: + debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) + debuglog.warning('rejected rule (%s) in state %d', rejected, state) + errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) + + +def build_parser(lr, pinfo): + lr.bind_callables(pinfo.parse_dict) + parser = LRResolver(lr, pinfo.error_handler) + global parse + parse = parser.parse + return parser diff --git "a/script/local/parser/\346\226\207\346\263\225.md" "b/script/local/parser/\346\226\207\346\263\225.md" new file mode 100644 index 0000000000000000000000000000000000000000..f2d0845497ab50162ede4f6e8155ab2422cd15ff --- /dev/null +++ "b/script/local/parser/\346\226\207\346\263\225.md" @@ -0,0 +1,31 @@ +sentence : conditions THEN function +conditions : conditions OR and_conditions + | and_conditions +and_conditions : and_conditions AND not_conditions + | not_conditions +not_conditions : cdt + | NOT cdt +cdt : expr EQUAL expr + | expr NEQUAL expr + | expr GE expr + | expr GT expr + | expr LE expr + | expr LT expr + | LPAREN conditions RPAREN +expr : expr PLUS term + | expr MINUS term + | term +term : term TIMES factor + | term DIVIDE factor + | term MOD factor + | factor +factor : number + | string + | id + | NULL + | TRUE + | FALSE + | LPAREN expr RPAREN +function : id LPAREN variables RPAREN +variables : variables COMMA expr + | expr diff --git a/script/local/rules/rules_multi_node.csv b/script/local/rules/rules_multi_node.csv new file mode 100644 index 0000000000000000000000000000000000000000..b2fd253c95be83003446d51e2082cb3dbb5b3328 --- /dev/null +++ b/script/local/rules/rules_multi_node.csv @@ -0,0 +1,59 @@ +,Dependency +0,"bypass_workload_manager != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: bypass_workload_manager = false for bypass_workload_manager is not false."")" +1,"enable_dynamic_workload != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_dynamic_workload = false for enable_dynamic_workload is not false."")" +2,"enable_control_group != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_control_group = true for enable_control_group is not true."")" +3,"enable_backend_control != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_backend_control = true for enable_backend_control is not true."")" +4,"enable_vacuum_control != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_vacuum_control = true for enable_vacuum_control is not true."")" +5,"enable_cgroup_switch != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_cgroup_switch = false for enable_cgroup_switch is not false."")" +6,"enable_force_memory_control != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_force_memory_control = false for enable_force_memory_control is not false."")" +7,"enable_dywlm_adjust != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_dywlm_adjust = true for enable_dywlm_adjust is not true."")" +8,"enable_reaper_backend != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_reaper_backend = true for enable_reaper_backend is not true."")" +9,"enable_perm_space != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_perm_space = true for enable_perm_space is not true."")" +10,"enable_transaction_parctl != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_transaction_parctl = true for enable_transaction_parctl is not true."")" +11,"max_active_statements != -1->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: max_active_statements = -1 for max_active_statements is not -1."")" +12,"dynamic_memory_quota != 80->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: dynamic_memory_quota = 80 for dynamic_memory_quota is not 80."")" +13,"comm_client_bind != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: comm_client_bind = false for comm_client_bind is not false."")" +14,"comm_max_datanode != 256->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: comm_max_datanode = 256 for comm_max_datanode is not 256."")" +15,"comm_max_stream != 1024->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: comm_max_stream = 1024 for comm_max_stream is not 1024."")" +16,"enable_parallel_ddl != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_parallel_ddl = true for enable_parallel_ddl is not true."")" +17,"enable_nodegroup_debug != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_nodegroup_debug = false for enable_nodegroup_debug is not false."")" +18,"enable_dngather != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_dngather = false for enable_dngather is not false."")" +19,"enable_light_proxy != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_light_proxy = true for enable_light_proxy is not true."")" +20,"enable_trigger_shipping != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_trigger_shipping = true for enable_trigger_shipping is not true."")" +21,"enable_ai_stats != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_ai_stats = true for enable_ai_stats is not true."")" +22,"enable_remotejoin != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_remotejoin = true for enable_remotejoin is not true."")" +23,"enable_fast_query_shipping != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_fast_query_shipping = true for enable_fast_query_shipping is not true."")" +24,"enable_remotegroup != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_remotegroup = true for enable_remotegroup is not true."")" +25,"enable_remotesort != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_remotesort = true for enable_remotesort is not true."")" +26,"enable_remotelimit != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_remotelimit = true for enable_remotelimit is not true."")" +27,"gtm_backup_barrier != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: gtm_backup_barrier = false for gtm_backup_barrier is not false."")" +28,"enable_stream_operator != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_stream_operator = true for enable_stream_operator is not true."")" +29,"enable_unshipping_log != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_unshipping_log = false for enable_unshipping_log is not false."")" +30,"enable_stream_concurrent_update != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_stream_concurrent_update = true for enable_stream_concurrent_update is not true."")" +31,"enable_stream_recursive != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_stream_recursive = true for enable_stream_recursive is not true."")" +32,"enable_random_datanode != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_random_datanode = true for enable_random_datanode is not true."")" +33,"enable_fstream != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_fstream = false for enable_fstream is not false."")" +34,"enable_cluster_resize != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_cluster_resize = false for enable_cluster_resize is not false."")" +35,"enable_acceleration_cluster_wlm != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_acceleration_cluster_wlm = false for enable_acceleration_cluster_wlm is not false."")" +36,"agg_redistribute_enhancement != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: agg_redistribute_enhancement = false for agg_redistribute_enhancement is not false."")" +37,"max_cn_temp_file_size != 5242880->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: max_cn_temp_file_size = 5242880 for max_cn_temp_file_size is not 5242880."")" +38,"best_agg_plan != 0->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: best_agg_plan = 0 for best_agg_plan is not 0."")" +39,"dngather_min_rows != 500->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: dngather_min_rows = 500 for dngather_min_rows is not 500."")" +40,"stream_multiple != 1->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: stream_multiple = 1 for stream_multiple is not 1."")" +41,"expected_computing_nodegroup != ""query""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: expected_computing_nodegroup = 'query' for expected_computing_nodegroup is not 'query'."")" +42,"default_storage_nodegroup != ""installation""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: default_storage_nodegroup = 'installation' for default_storage_nodegroup is not 'installation'."")" +43,"application_type != ""not_perfect_sharding_type""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: application_type = 'not_perfect_sharding_type' for application_type is not 'not_perfect_sharding_type'."")" +44,"enable_gtm_free != ""on""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_gtm_free = true for enable_gtm_free is not true."")" +45,"comm_cn_dn_logic_conn != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: comm_cn_dn_logic_conn = false for comm_cn_dn_logic_conn is not false."")" +46,"gtm_option != 2->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: gtm_option = 2 for gtm_option is not 2."")" +47,"gtm_connect_retries != 30->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: gtm_connect_retries = 30 for gtm_connect_retries is not 30."")" +48,"gtm_conn_check_interval != 10->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: gtm_conn_check_interval = 10 for gtm_conn_check_interval is not 10."")" +49,"default_index_kind != 2->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: default_index_kind = 2 for default_index_kind is not 2."")" +50,"update_process_title != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: update_process_title = false for update_process_title is not false."")" +51,"enable_router != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_router = false for enable_router is not false."")" +52,"enable_redistribute != ""off""->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: enable_redistribute = false for enable_redistribute is not false."")" +53,"transaction_sync_naptime != 30->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: transaction_sync_naptime = 30 for transaction_sync_naptime is not 30."")" +54,"transaction_sync_timeout != 600->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: transaction_sync_timeout = 600 for transaction_sync_timeout is not 600."")" +55,"session_sequence_cache != 10->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: session_sequence_cache = 10 for session_sequence_cache is not 10."")" +56,"gtm_connect_timeout != 2->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: gtm_connect_timeout = 2 for gtm_connect_timeout is not 2."")" +57,"max_datanode_for_plan != 0->Overwrite(1,"" When cluster is not a singleNode, UnsupportGuc will be overwrite: max_datanode_for_plan = 0 for max_datanode_for_plan is not 0."")" diff --git a/script/local/rules/rules_single_node.csv b/script/local/rules/rules_single_node.csv new file mode 100644 index 0000000000000000000000000000000000000000..60c31ece4e0043852c56d356ff9b88a0da9b8e54 --- /dev/null +++ b/script/local/rules/rules_single_node.csv @@ -0,0 +1,108 @@ +,Dependency +0,"enable_global_plancache == ""on"" && enable_thread_pool == ""off""->NotEffect(1,"" ENABLE_GPC will not be on when enable_thread_pool == false"")" +1,"enable_cachedplan_mgr == ""on"" && (enable_global_plancache == ""on"" && enable_thread_pool == ""on"")->NotEffect(1,"" enable_cachedplan_mgr will not be on when ENABLE_GPC is on"")" +2,"transaction_read_only==""on"" && default_transaction_read_only==""off""->Overwrite(2,"" transaction_read_only = true but default_transaction_read_only = false, transaction_read_only may be overwrited as false"")" +3,"transaction_deferrable != default_transaction_deferrable->Overwrite(1,"" transaction_deferrable != default_transaction_deferrable, transaction_deferrable will be overwrited by default_transaction_deferrable"")" +4,"zero_damaged_pages==""on""->Performance(1,"" Set zero_damaged_pages=true may cause 5-15% performance degradation"")" +5,"work_mem < 256*1024->NotEffect(2,"" Set work_mem <= 256*1024 may make work_mem invalid for calcute free_mem"")" +6,"statement_mem > work_mem && statement_max_mem > work_mem->NotEffect(2,"" Set statement_mem > work_mem && statement_max_mem > work_mem may make work_mem invalid for calcute free_mem"")" +7,"enable_wdr_snapshot == ""off"" || ss_enable_dms == ""on""->NotEffect(1,"" Set enable_wdr_snapshot = false or ss_enable_dms = true will make wdr_snapshot_interval and wdr_snapshot_retention_days invalid"")" +8,"ss_enable_dss == ""on""->NotEffect(1,"" Set ss_enable_dss = true will make wal_sync_method invalid"")" +9,"enable_mix_replication == ""on""->NotEffect(2,"" Set enable_mix_replication == true may make wal_sync_method invalid"")" +10,"(ss_enable_dms == ""on"" || ss_enable_dss == ""on"") && default_transaction_isolation != ""read committed""->NotEffect(1,"" Only support read committed transcation isolation level while DMS and DSS enabled."")" +11,"pooler_port != port+1->Alert(1,"" Pooler_port must equal to gsql listen port plus one!"")" +12,"max_active_gtt <= 0 && vacuum_gtt_defer_check_age > 0->NotEffect(1,"" max_active_gtt <=0 will make vacuum_gtt_defer_check_age not used"")" +13,"vacuum_freeze_table_age > autovacuum_freeze_max_age*0.95->NotEffect(1,"" vacuum_freeze_table_age > autovacuum_freeze_max_age * 0.95 will make vacuum_freeze_table_age not used"")" +14,"enable_hadoop_env == ""on"" && max_query_retry_times > 0->NotEffect(1,"" enable_hadoop_env == true will make stmt retry not enabled"")" +15,"enable_gtm_free == ""on"" && upgrade_mode == 0 && enable_cluster_resize == ""off"" && enable_twophase_commit == ""off""->Alert(2,"" enable_gtm_free == true && upgrade_mode == 0 && enable_cluster_resize == false && enable_twophase_commit == false may lead to errmsg: Unsupport DML two phase commit under gtm free mode. Set enable_twophase_commit to on if need to use DML two phase commit."")" +16,"enable_incremental_checkpoint == ""on"" && incremental_checkpoint_timeout > 0 && checkpoint_timeout > 0->Overwrite(1,"" The actual checkpoint timeout will be assigned by incremental_checkpoint_timeout instead of checkpoint_timeout"")" +17,"(ss_enable_dss == ""on"" || enable_incremental_checkpoint == ""off"") && enable_double_write == ""on""->NotEffect(1,"" enable_double_write will not effect when ss_enable_dss == true || enable_incremental_checkpoint == false"")" +18,"ss_enable_dss == ""on""->Overwrite(2,"" When set ss_enable_dss == true, check if ENABLE_LITE_MODE, if ENABLE_LITE_MODE, ss_enable_dss will be overwrite as false "")" +19,"plog_merge_age >0 && logging_collector == ""off""->NotEffect(1,"" plog_merge_age > 0 but logging_collector == false, profile log will not be collected"")" +20,"autovacuum_vacuum_cost_delay >= 0->NotEffect(1,"" The actual vacuum_cost_delay will be assigned by autovacuum_vacuum_cost_delay instead of vacuum_cost_delay"")" +21,"autovacuum_vacuum_cost_limit >= 0->NotEffect(1,"" The actual vacuum_cost_limit will be assigned by autovacuum_vacuum_cost_limit instead of vacuum_cost_limit"")" +22,"enable_vector_engine == ""off""->NotEffect(1,"" try_vector_engine_strategy will not effect when enable_vector_engine == false "")" +23,"enable_acceleration_cluster_wlm == ""on"" && transaction_pending_time > 0->NotEffect(2,"" transaction_pending_time may not effect when enable_acceleration_cluster_wlm == true"")" +24,"transaction_read_only == ""on"" && transaction_deferrable == ""on""->Alert(2,"" Errormsg may report: A snapshot-importing transaction must not be READ ONLY DEFERRABLE"")" +25,"track_activities == ""off""->Alert(2,"" Errormsg may report: GUC parameter 'track_activities' is off"")" +26,"track_sql_count == ""off""->Alert(2,"" Errormsg may report: GUC parameter 'track_sql_count' is off"")" +27," td_compatible_truncation == ""on"" && sql_compatibility == ""C""->Alert(2,"" td_compatible_truncation == true && sql_compatibility == C_FORMAT may lead to error report: failed on assertion in specific file and line"")" +28,"ss_enable_dms == ""on"" || enable_stream_replication==""off"" ||max_wal_senders <=0 || synchronous_commit == ""off"" || synchronous_commit == ""local""->Alert(2,"" Current configs will not requested sync replication: +ss_enable_dms == true || enable_stream_replication==false ||max_wal_senders <=0 || synchronous_commit <= SYNCHRONOUS_COMMIT_LOCAL_FLUSH"")" +29,"(synchronous_commit != ""off"" && synchronous_commit != ""local"")&& enable_dcf == ""on""->Function(2,"" WaitCheckpointSync will use SyncPaxosWaitForLSN instead of SyncRepWaitForLSN"")" +30,"standby_shared_buffers_fraction < 1.0->Overwrite(2,"" standby_shared_buffers_fraction may be overwrite by standby_shared_buffers_fraction + 0.1 when standby_shared_buffers_fraction < 1.0"")" +31,"standby_shared_buffers_fraction < 1.0->Alert(2,"" standby_shared_buffers_fraction < 1.0 may lead to errmsg: no unpinned buffers available"")" +32,"enable_nvm == ""off"" && (nvm_file_path != """" || nvm_buffers >0)->NotEffect(1,"" nvm_file_path and nvm_buffers will not effect when enable_nvm == false"")" +33,"enable_global_plancache == ""on"" && enable_thread_pool == ""on""->NotEffect(2,"" sql_beta_feature and nvm_buffers will not effect when GPC not enabled"")" +34,"use_workload_manager == ""off"" && enable_resource_track == ""on""->NotEffect(1,"" enable_resource_track will not effect when use_workload_manager == false"")" +35,"use_workload_manager == ""off"" && enable_resource_record == ""on""->NotEffect(1,"" enable_resource_recordurce_track will not effect when use_workload_manager == false"")" +36,"use_workload_manager == ""off"" && enable_logical_io_statistics == ""on""->NotEffect(1,"" enable_logical_io_statistics will not effect when use_workload_manager == false"")" +37,"use_workload_manager == ""off"" && enable_user_metric_persistent == ""on""->NotEffect(1,"" enable_user_metric_persistent will not effect when use_workload_manager == false"")" +38,"(ssl == ""off"" && require_ssl == ""on"") || (ssl == ""off"" && require_ssl == ""on"")->Function(2,"" ssl and require_ssl should be set as the same"")" +39,"user_metric_retention_time >= 0 && enable_user_metric_persistent == ""off""->NotEffect(2,"" user_metric_retention_time may not effect when enable_user_metric_persistent == false"")" +40,"bbox_dump_count >=0 && enable_bbox_dump ==""off""->NotEffect(1,"" bbox_dump_count will not effect when enable_bbox_dump == false"")" +41,"bbox_dump_path != """" && enable_bbox_dump ==""off""->NotEffect(1,"" bbox_dump_path will not effect when enable_bbox_dump == false"")" +42,"enable_expr_fusion == ""on"" && query_dop != 1->NotEffect(1,"" enable_expr_fusion will not be on when query_dop != 1"")" +43,"use_workload_manager == ""off"" && query_band != """"->NotEffect(2,"" query_band may not effect when use_workload_manager == false"")" +44,"plsql_show_all_error == ""on"" || check_function_bodies == ""on""->Alert(2,"" Errormsg may report: InsertError for some func_oid may happen"")" +45,"(password_min_length >0 || password_max_length >0|| password_min_uppercase != 0 || password_min_lowercase != 0 || password_min_digital != 0 || password_min_special != 0) && password_policy == 0->NotEffect(2,"" password_policy == 0 will make password_min_length,password_max_length,password_min_uppercase,password_min_lowercase,password_min_digital,password_min_special not effect"")" +46,"(pagewriter_sleep >0 || dirty_page_percent_max>0 || candidate_buf_percent_target > 0) && enable_incremental_checkpoint == ""off""->NotEffect(2,"" candidate_buf_percent_target, pagewriter_sleep and dirty_page_percent_max may not effect when enable_incremental_checkpoint == false"")" +47,"bgwriter_delay < pagewriter_sleep->NotEffect(2,"" Next scan buffer pool time will use pagewriter_sleep instead of bgwriter_delay for bgwriter_delay < pagewriter_sleep"")" +48,"dw_file_num > pagwriter_thread_num->NotEffect(1,"" dw_file_num will be assigned as pagwriter_thread_num when dw_file_num > pagwriter_thread_num"")" +49,"lo_compat_privileges == ""on""->Function(3,"" Set lo_compat_privileges to true will disable permission checks when reading or modifying large objects"")" +50,"enable_incremental_checkpoint == ""off"" && log_pagewriter == ""on""->NotEffect(2,"" log_pagewriter will not effect when enable_incremental_checkpoint == false "")" +51,"enable_opfusion == ""off"" && enable_beta_opfusion == ""on""->NotEffect(2,"" enable_beta_opfusion will not effect when enable_opfusion == false "")" +52,"log_duration == ""off"" && log_min_duration_statement < 0->NotEffect(2,"" log_min_duration_statement will not effect when log_duration == false && log_min_duration_statement < 0"")" +53,"logging_collector == ""off"" && log_error_verbosity != terse + ->NotEffect(2,"" log_error_verbosity will not effect when logging_collector == false"")" +54,"most_available_sync == ""off"" && keep_sync_window > 0->NotEffect(2,"" keep_sync_window will not effect when most_available_sync == false"")" +55,"xlog_file_path == 0 || enable_dcf == ""on"" || (synchronous_commit != ""off"" && synchronous_commit != ""local"")->Function(2,"" NotDelayIntoMostAvaSync may fail when xlog_file_path == 0 || enable_dcf = true || synchronous_commit > SYNCHRONOUS_COMMIT_LOCAL_FLUSH"")" +56," enable_global_plancache == ""off"" || enable_thread_pool == ""off""->NotEffect(2,"" join_collapse_limit will not effect when GPL not enabled"")" +57,"resource_track_level == ""none"" || enable_resource_track == ""off"" || resource_track_cost == -1->NotEffect(2,"" io_limits and io_priority may not effect when resource_track_level == RESOURCE_TRACK_NONE || enable_resource_track == false || resource_track_cost == -1"")" +58,"resource_track_level != ""none"" && enable_resource_track == ""off""->NotEffect(2,"" resource_track_level will not effect when enable_resource_track == false"")" +59,"query_max_mem > 0 && query_max_mem < 32768->Overwrite(1,"" query_max_mem will be assigned to 0 when 0 < query_max_mem < 32768"")" +60,"idle_in_transaction_session_timeout > 0 && session_timeout == 0->Alert(2,"" Errormsg 'could not disable timer for idle-in-transaction timeout' may report when idle_in_transaction_session_timeout > 0 && session_timeout == 0"")" +61,"geqo == ""off"" && (geqo_threshold > 0 || geqo_pool_size > 0 || geqo_generations > 0 || geqo_selection_bias != NULL)->NotEffect(1,"" geqo == false will make geqo_selection_bias, geqo_threshold, geqo_pool_size, geqo_generations not effect"")" +62,"geqo_selection_bias != NULL && ( enable_global_plancache == ""off"" || enable_thread_pool == ""off"")->NotEffect(2,"" geqo_selection_bias may not effect when GPL not enabled"")" +63,"enable_double_write == ""on"" && enable_incremental_checkpoint == ""off""->NotEffect(1,"" enable_double_write will not effect when enable_incremental_checkpoint == false"")" +64,"enable_double_write == ""on"" && full_page_writes == ""on""->NotEffect(1,"" full_page_writes will not open when enable_double_write is open"")" +65,"enable_fast_allocate == ""off"" && fast_extend_file_size > 0->NotEffect(1,"" fast_extend_file_size will not effect when enable_fast_allocate == false"")" +66,"enable_stream_replication == ""on"" && xlog_file_path == NULL->NotEffect(2,"" enable_stream_replication may not effect when xlog_file_path == NULL"")" +67,"enable_stream_replication == ""on"" && enable_mix_replication == ""on""->NotEffect(2,"" enable_stream_replication may not open when enable_mix_replication == true"")" +68,"enable_show_any_tuples == ""on""->Alert(3,"" enable_show_any_tuples == true will make all versions of the tuple in the table visible, and may lead to some errmsg for specific sql s: s cannot be executed when enable_show_any_tuples is true."")" +69,"enable_online_ddl_waitlock == ""on"" && xc_maintenance_mode == ""off""->Alert(2,"" Errormsg may be report: 'kill backend is prohibited during online expansion.' when enable_online_ddl_waitlock == true && xc_maintenance_mode == false"")" +70,"enable_hashagg == ""off""->Function(3,"" Errormsg may report: '[Multi count(distinct) convert failure reason]: Enable_hashagg disabled.' when enable_hashagg == false"")" +71,"enable_data_replicate == ""on""->Alert(1,"" Check if starting as multi_standby mode, if starting as multi_standby mode, errormsg will report: 'when starting as multi_standby mode, we couldn't support data replicaton.' when enable_data_replicate == true"")" +72,"enable_mix_replication == ""on"" && walsender_max_send_size >= data_replicate_buffer_size->Alert(1,"" Errormsg will report: 'the data queue buffer size must be larger than the wal sender max send size for the ' + +'replication data synchronized by the WAL streaming.' when enable_mix_replication == true && walsender_max_send_size >= data_replicate_buffer_size"")" +73,"max_wal_senders >= max_connections->Alert(1,"" Errormsg will report: 'max_wal_senders must be less than max_connections ' when max_wal_senders >= max_connections"")" +74,"sysadmin_reserved_connections >= max_connections->Alert(1,"" Errormsg will report: 'sysadmin_reserved_connections must be less than max_connections ' when sysadmin_reserved_connections >= max_connections"")" +75,"archive_mode == ""on"" && wal_level == ""minimal""->Alert(1,"" Errormsg will report: 'WAL archival (archive_mode=on) requires wal_level \'archive\', \'hot_standby\' or \'logical\'' when archive_mode == true && wal_level == 'minimal'"")" +76,"max_wal_senders > 0 && wal_level == ""minimal""->Alert(1,"" Errormsg will report: 'WAL streaming (max_wal_senders > 0) requires wal_level \'archive\', \'hot_standby\' or ' + +'\'logical\'' when max_wal_senders > 0 && wal_level == 'minimal'"")" +77,"wal_level != ""hot_standby"" && hot_standby == ""on""->Alert(1,"" Errormsg will report: 'hot standby is not possible because wal_level was not set to \'hot_standby\'' when wal_level != 'hot_standby' && hot_standby == true"")" +78,"max_wal_senders <1 && wal_level == ""minimal""->Alert(1,"" Check if starting as dual mode, if true, errormsg may report: 'when starting as dual mode, we must ensure wal_level was not \'minimal\' and max_wal_senders ' + +'was set at least 1' when max_wal_senders <1 && wal_level == 'minimal'"")" +79,"recovery_max_workers >= 1->Alert(1,"" Check if starting as dummy_standby mode, if true, errormsg may report: 'when starting as dummy_standby mode, we couldn't support parallel redo, down it' when recovery_max_workers >= 1"")" +80,"xlog_file_path != NULL && xlog_file_size == 0->Alert(1,"" Errormsg will report: 'configured \'xlog_file_path\' but \'xlog_file_size\' is zero.' when xlog_file_path != NULL && xlog_file_size == 0 "")" +81,"xlog_file_path != NULL && xlog_file_size % 16 * 1024 * 1024 != 0->Alert(1,"" Errormsg will report: 'value of \'xlog_file_size\' must be an integer multiple of XLogSegSize' when xlog_file_size % XLogSegSize != 0(XLogSegSize = 2^24)"")" +82,"ss_enable_dss == ""on"" && temp_tablespaces != NULL && temp_tablespaces != """"->Alert(1,"" Errormsg will report: 'shared storage mode could not support specifics tablespace(s).' when ss_enable_dss == true && temp_tablespaces != NULL && temp_tablespaces != '' +Hint:'Either set temp_tablespaces to NULL, or turn off ss_enable_dss.'"")" +83,"xlog_lock_file_path == NULL && xlog_file_path != NULL->Alert(1,"" Errormsg will report: 'use scsi to preempt shared storage' when xlog_lock_file_path == NULL && xlog_file_path != NULL"")" +84,"recovery_parse_workers > 1->Alert(1,"" Check if starting as dummy_standby mode, if true, errormsg may report: 'when starting as dummy_standby mode, we couldn't support extreme rto.' when recovery_parse_workers > 1 +Hint: so down extreme rto, make recovery_parse_workers <= 1"")" +85,"recovery_parse_workers > 1 && wal_receiver_buffer_size < 32 * 1024->Alert(1,"" Errormsg will report: 'when starting extreme rto, wal receiver buf should not smaller than %dMB' when recovery_parse_workers > 1 && wal_receiver_buffer_size < 32768 +Hint: 'recommend config \'wal_receiver_buffer_size=64MB\''"")" +86,"recovery_parse_workers > 1 && hot_standby == ""on""->Alert(1,"" Errormsg will report: 'extreme rto could not support hot standby.' when recovery_parse_workers > 1 && hot_standby == true +Hint: 'Either turn off extreme rto, or turn off hot_standby.'"")" +87,"lastval_supported == ""on"" && enable_beta_features == fasle->NotEffect(2,"" lastval_supported will not effect when enable_beta_features == fasle"")" +88,"enable_mix_replication == ""on"" && enable_cbm_tracking == ""on""->Alert(1,"" Errormsg will report: 'enable_cbm_tracking must be turn on when enable_mix_replication is on!' when enable_mix_replication == true && enable_cbm_tracking == true"")" +89,"ss_enable_dms == ""on"" && default_transaction_isolation != ""read committed""->Alert(1,""Errormsg will report: 'Only support read committed transcation isolation level while DMS and DSS enabled.' when ss_enable_dms == true && default_transaction_isolation != 'read committed'"")" +90,"debug_print_plan == ""off"" && (log_min_messages == ""debug5"" || log_min_messages == ""debug4"" || log_min_messages == ""debug3"" || log_min_messages == ""debug2"" || log_min_messages == ""debug1"" || log_min_messages == ""log"" )->NotEffect(3,""debug_print_plan == false may make the debug log print failed"")" +91,"enable_dcf == ""off""->NotEffect(2,"" Cluster is not installed as dcf mode, so dcf related configs will not effect"")" +92,"random_page_cost < seq_page_cost->NotEffect(2,"" random_page_cost may not effect when random_page_cost < seq_page_cost"")" +93,"audit_enabled == ""off""->NotEffect(2,"" audit_enabled == false, so audit related configs will not effect"")" +94,"audit_enabled == ""on"" && (audit_function_exec == ""off"" || audit_system_function_exec == ""off"" || audit_copy_exec == ""off"")->NotEffect(1,"" Some audit functions will not performed normally when audit_enabled == true && (audit_function_exec == false || audit_system_function_exec == false || audit_copy_exec == false)"")"