Best Python code snippet using autotest_python
cpuset.py
Source:cpuset.py
...16fake_numa_containers = False # container mem via numa=fake mem nodes, else pages17mem_isolation_on = False18node_mbytes = 0 # mbytes in one typical mem node19root_container_bytes = 0 # squishy limit on effective size of root container20def discover_container_style():21 global super_root_path, cpuset_prefix22 global mem_isolation_on, fake_numa_containers23 global node_mbytes, root_container_bytes24 if super_root_path != '':25 return # already looked up26 if os.path.exists('/dev/cgroup/tasks'):27 # running on 2.6.26 or later kernel with containers on:28 super_root_path = '/dev/cgroup'29 cpuset_prefix = 'cpuset.'30 if get_boot_numa():31 mem_isolation_on = fake_numa_containers = True32 else: # memcg containers IFF compiled-in & mounted & non-fakenuma boot33 fake_numa_containers = False34 mem_isolation_on = os.path.exists(35 '/dev/cgroup/memory.limit_in_bytes')36 # TODO: handle possibility of where memcg is mounted as its own37 # cgroup hierarchy, separate from cpuset??38 elif os.path.exists('/dev/cpuset/tasks'):39 # running on 2.6.18 kernel with containers on:40 super_root_path = '/dev/cpuset'41 cpuset_prefix = ''42 mem_isolation_on = fake_numa_containers = get_boot_numa() != ''43 else:44 # neither cpuset nor cgroup filesystem active:45 super_root_path = None46 cpuset_prefix = 'no_cpusets_or_cgroups_exist'47 mem_isolation_on = fake_numa_containers = False48 logging.debug('mem_isolation: %s', mem_isolation_on)49 logging.debug('fake_numa_containers: %s', fake_numa_containers)50 if fake_numa_containers:51 node_mbytes = int(mbytes_per_mem_node())52 elif mem_isolation_on: # memcg-style containers53 # For now, limit total of all containers to using just 98% of system's54 # visible total ram, to avoid oom events at system level, and avoid55 # page reclaim overhead from going above kswapd highwater mark.56 system_visible_pages = utils.memtotal() >> 257 usable_pages = int(system_visible_pages * 0.98)58 root_container_bytes = usable_pages << 1259 logging.debug('root_container_bytes: %s',60 utils.human_format(root_container_bytes))61def need_mem_containers():62 discover_container_style()63 if not mem_isolation_on:64 raise error.AutotestError('Mem-isolation containers not enabled '65 'by latest reboot')66def need_fake_numa():67 discover_container_style()68 if not fake_numa_containers:69 raise error.AutotestError('fake=numa not enabled by latest reboot')70def full_path(container_name):71 discover_container_style()72 return os.path.join(super_root_path, container_name)73def unpath(container_path):74 return container_path[len(super_root_path)+1:]75def cpuset_attr(container_name, attr):76 discover_container_style()77 return os.path.join(super_root_path, container_name, cpuset_prefix+attr)78def io_attr(container_name, attr):79 discover_container_style()80 # current version assumes shared cgroup hierarchy81 return os.path.join(super_root_path, container_name, 'io.'+attr)82def tasks_path(container_name):83 return os.path.join(full_path(container_name), 'tasks')84def mems_path(container_name):85 return cpuset_attr(container_name, 'mems')86def memory_path(container_name):87 return os.path.join(super_root_path, container_name, 'memory')88def cpus_path(container_name):89 return cpuset_attr(container_name, 'cpus')90def container_exists(name):91 return name is not None and os.path.exists(tasks_path(name))92def move_tasks_into_container(name, tasks):93 task_file = tasks_path(name)94 for task in tasks:95 try:96 logging.debug('moving task %s into container "%s"', task, name)97 utils.write_one_line(task_file, task)98 except Exception:99 if utils.pid_is_alive(task):100 raise # task exists but couldn't move it101 # task is gone or zombie so ignore this exception102def move_self_into_container(name):103 me = str(os.getpid())104 move_tasks_into_container(name, [me])105 logging.debug('running self (pid %s) in container "%s"', me, name)106def _avail_mbytes_via_nodes(parent):107 # total mbytes of mem nodes available for new containers in parent108 free_nodes = available_exclusive_mem_nodes(parent)109 mbytes = nodes_avail_mbytes(free_nodes)110 # don't have exact model for how container mgr measures mem space111 # better here to underestimate than overestimate112 mbytes = max(mbytes - node_mbytes//2, 0)113 return mbytes114def _avail_bytes_via_pages(parent):115 # Get memory bytes available to parent container which could116 # be allocated exclusively to new child containers.117 # This excludes mem previously allocated to existing children.118 available = container_bytes(parent)119 mem_files_pattern = os.path.join(full_path(parent),120 '*', 'memory.limit_in_bytes')121 for mem_file in glob.glob(mem_files_pattern):122 child_container = unpath(os.path.dirname(mem_file))123 available -= container_bytes(child_container)124 return available125def avail_mbytes(parent=SUPER_ROOT):126 # total mbytes available in parent, for exclusive use in new containers127 if fake_numa_containers:128 return _avail_mbytes_via_nodes(parent)129 else:130 return _avail_bytes_via_pages(parent) >> 20131def delete_leftover_test_containers():132 # recover mems and cores tied up by containers of prior failed tests:133 for child in inner_containers_of(SUPER_ROOT):134 _release_container_nest(child)135def my_lock(lockname):136 # lockname is 'inner'137 lockdir = os.environ['AUTODIR']138 lockname = os.path.join(lockdir, '.cpuset.lock.'+lockname)139 lockfile = open(lockname, 'w')140 fcntl.flock(lockfile, fcntl.LOCK_EX)141 return lockfile142def my_unlock(lockfile):143 fcntl.flock(lockfile, fcntl.LOCK_UN)144 lockfile.close()145# Convert '1-3,7,9-12' to set(1,2,3,7,9,10,11,12)146def rangelist_to_set(rangelist):147 result = set()148 if not rangelist:149 return result150 for x in rangelist.split(','):151 if re.match(r'^(\d+)$', x):152 result.add(int(x))153 continue154 m = re.match(r'^(\d+)-(\d+)$', x)155 if m:156 start = int(m.group(1))157 end = int(m.group(2))158 result.update(set(range(start, end+1)))159 continue160 msg = 'Cannot understand data input: %s %s' % (x, rangelist)161 raise ValueError(msg)162 return result163def my_container_name():164 # Get current process's inherited or self-built container name165 # within /dev/cpuset or /dev/cgroup. Is '' for root container.166 name = utils.read_one_line('/proc/%i/cpuset' % os.getpid())167 return name[1:] # strip leading /168def get_mem_nodes(container_name):169 # all mem nodes now available to a container, both exclusive & shared170 file_name = mems_path(container_name)171 if os.path.exists(file_name):172 return rangelist_to_set(utils.read_one_line(file_name))173 else:174 return set()175def _busy_mem_nodes(parent_container):176 # Get set of numa memory nodes now used (exclusively or shared)177 # by existing children of parent container178 busy = set()179 mem_files_pattern = os.path.join(full_path(parent_container),180 '*', cpuset_prefix+'mems')181 for mem_file in glob.glob(mem_files_pattern):182 child_container = os.path.dirname(mem_file)183 busy |= get_mem_nodes(child_container)184 return busy185def available_exclusive_mem_nodes(parent_container):186 # Get subset of numa memory nodes of parent container which could187 # be allocated exclusively to new child containers.188 # This excludes nodes now allocated to existing children.189 need_fake_numa()190 available = get_mem_nodes(parent_container)191 available -= _busy_mem_nodes(parent_container)192 return available193def my_mem_nodes():194 # Get set of numa memory nodes owned by current process's container.195 discover_container_style()196 if not mem_isolation_on:197 return set() # as expected by vmstress198 return get_mem_nodes(my_container_name())199def my_available_exclusive_mem_nodes():200 # Get subset of numa memory nodes owned by current process's201 # container, which could be allocated exclusively to new child202 # containers. This excludes any nodes now allocated203 # to existing children.204 return available_exclusive_mem_nodes(my_container_name())205def node_avail_kbytes(node):206 return node_mbytes << 10 # crude; fixed numa node size207def nodes_avail_mbytes(nodes):208 # nodes' combined user+avail size, in Mbytes209 return sum(node_avail_kbytes(n) for n in nodes) // 1024...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!