OpenStack Nova-scheduler组件的源码解析(2)

时间:2021-05-27 17:18:11

感谢朋友支持本博客,欢迎共同探讨交流,由于能力和时间有限,错误之处在所难免,欢迎指正!
如果转载,请保留作者信息。
博客地址:http://blog.csdn.net/gaoxingnengjisuan
邮箱地址:dong.liu@siat.ac.cn


这篇博客中,我会针对建立虚拟机实例的请求,来解析Nova调度器选取最优主机节点的过程。

首先来看方法/nova/scheduler/manager.py----def run_instance:

def run_instance(self, context, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties):
"""
在驱动上尝试调用schedule_run_instance;
当引发异常的时候设置实例vm_state为ERROR;
context:上下文信息;
request_spec:请求规范;
admin_password:admin用户密码;
injected_files:注入的文件;
requested_networks:请求的网络信息;
is_first_time:标志是否是第一次;
filter_properties:过滤器属性信息;
"""

# 获取要运行的实例UUID值;
instance_uuids = request_spec['instance_uuids']
# EventReporter:上下文管理类;
# 获取EventReporter类的对象;
with compute_utils.EventReporter(context, conductor_api.LocalAPI(),
'schedule', *instance_uuids):

# schedule_run_instance这个方法在/nova/scheduler/chance.py和/nova/scheduler/filter_scheduler.py中都有定义;
# 这里具体调用的是哪一个方法,要看driver的定义;
# 这个类的初始化方法中定义了self.driver = importutils.import_object(scheduler_driver);
# 也就是说,调用哪一个方法,是由scheduler_driver来具体指定的;
# 初始化类中具体定义了,如果scheduler_driver没有被定义,则采用配置参数来给它赋值;
# 也就是scheduler_driver = CONF.scheduler_driver;
# 而配置参数CONF.scheduler_driver默认的值是nova.scheduler.filter_scheduler.FilterScheduler;
# 从而可以知道,当没有指明scheduler_driver的值时,默认调用/nova/scheduler/filter_scheduler.py中的schedule_run_instance方法;
# 这里我会把两个方法都分别跟踪进行分析的;

# /nova/scheduler/filter_scheduler.py中的schedule_run_instance方法完成了以下的操作;
# 获取要建立实例的数目;
# 循环每一个实例;
# 获取分配给这个实例的主机;
# 拨备创建实例请求的各种资源;
# 远程发送运行实例的消息到队列;
# 发布调度运行实例结束这个事件的通知;

# /nova/scheduler/chance.py中的schedule_run_instance方法完成了以下的操作;
# 循环遍历每个实例;
# 为每个实例随机挑选一个主机;
# 在一个实例上设置给定的属性并更新实例相关的数据;
# 远程发送建立实例的消息到队列;

#注:研究一下这两个方法的实现有什么不同;
#研究这两个方法之后会对很多东西学的明白许多;
try:
# driver = nova.scheduler.filter_scheduler.FilterScheduler
return self.driver.schedule_run_instance(context,
request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties)

except exception.NoValidHost as ex:
# don't re-raise
self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR,
'task_state': None},
context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR,
'task_state': None},
context, ex, request_spec)
我们来看语句return self.driver.schedule_run_instance(context,request_spec, admin_password, injected_files,requested_networks, is_first_time, filter_properties)

来看这个方法所处的类SchedulerManager的初始化方法:

class SchedulerManager(manager.Manager):
"""
选择一个主机来运行实例;
"""

RPC_API_VERSION = '2.6'

def __init__(self, scheduler_driver=None, *args, **kwargs):

# 如果scheduler_driver没有定义,则赋值CONF.scheduler_driver给scheduler_driver;
# scheduler_driver:这个配置参数默认值为nova.scheduler.filter_scheduler.FilterScheduler;
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver

# import_object:导入一个类,然后返回这个类的实例对象;
# 导入scheduler_driver指定的类;
# 例如scheduler_driver的值为nova.scheduler.filter_scheduler.FilterScheduler;
# 则导入nova.scheduler.filter_scheduler.FilterScheduler这个类;
# 这样也就实现了类的动态的导入;
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
再来看scheduler_driver的定义:
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',        default='nova.scheduler.filter_scheduler.FilterScheduler',        help='Default driver to use for the scheduler')
我们可以看到调度器所应用的选取主机方法是动态导入的,默认是应用nova.scheduler.filter_scheduler.FilterScheduler类所定义的基于主机过滤器的调度器方法。

再来看方法schedule_run_instance的代码实现:

def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
"""
这个方法被从nova.compute.api调用,来提供实例;
返回创建的实例的列表;

获取要建立实例的数目;
循环每一个实例,获取分配给这个实例的主机;
拨备创建实例请求的各种资源;
远程发送运行实例的消息到队列;
发布调度运行实例结束这个事件的通知;
"""
# request_spec:请求规范;
# 这个参数的格式字典化;
payload = dict(request_spec=request_spec)

# notifier.publisher_id("scheduler"):获取信息发布者ID;
# 返回值为"scheduler.host";
# notify:使用指定的驱动程序发布通知;(这个方法需要进一步分析它的具体实现过程)
notifier.notify(context, notifier.publisher_id("scheduler"),'scheduler.run_instance.start', notifier.INFO, payload)

# 从request_spec当中获取instance_uuids值;
instance_uuids = request_spec.pop('instance_uuids')
# 获取要建立实例的数目;
num_instances = len(instance_uuids)
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % locals())

# 返回满足要求规格的主机列表;
# 循环为每一个实例获取合适的主机后,返回选择的主机列表;
weighed_hosts = self._schedule(context, request_spec, filter_properties, instance_uuids)

# @@@这个语句没有弄明白,后面要继续看一下;
filter_properties.pop('context', None)

# 循环遍历instance_uuids;
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num

try:
# 获取分配给这个实例的主机;
try:
weighed_host = weighed_hosts.pop(0)
except IndexError:
raise exception.NoValidHost(reason="")

# context:上下文信息;
# weighed_host:获取分配给这个实例的主机;
# request_spec:请求规范;
# filter_properties:过滤器属性信息;
# requested_networks:请求的网络信息;
# injected_files:注入文件;
# admin_password:admin密码;
# is_first_time:标志是否是第一次;
# instance_uuid:每个实例的UUID;
# _provision_resource:在这个区域内拨备创建请求的各种资源;
# 远程发送建立实例的消息到队列;
self._provision_resource(context, weighed_host,
request_spec,
filter_properties,
requested_networks,
injected_files, admin_password,
is_first_time,
instance_uuid=instance_uuid)
except Exception as ex:
driver.handle_schedule_error(context, ex, instance_uuid, request_spec)
retry = filter_properties.get('retry', {})
retry['hosts'] = []

# @@@notify:使用指定的驱动程序发布通知;
# 应该是发布调度运行实例结束这个事件的通知;
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.end', notifier.INFO, payload)
我们比较关注语句weighed_hosts = self._schedule(context, request_spec, filter_properties, instance_uuids),这条语句通过调用方法_schedule实现了循环为每一个实例获取合适的主机后,返回可用的主机列表。具体来看方法_schedule的代码实现:
def _schedule(self, context, request_spec, filter_properties, instance_uuids=None):        """        返回满足要求规格的主机列表;        循环为每一个实例获取合适的主机后,返回选择的主机列表;                对主机进行过滤;        FilterManager类继承自Scheduler类;        在Scheduler类的初始化中,加载了所有可用的filter类;        根据配置文件中scheduler_default_filters字段的定义选择默认使用的一个或多个filter;        依次对每个主机调用filter类的host_passes()方法,如果返回都为True,则主机通过过滤;                对所有通过过滤的主机计算权值;        """        # elevated:返回带有admin标志设置的context的版本;        elevated = context.elevated()        # 获取实例属性信息(instance_properties);        instance_properties = request_spec['instance_properties']        # 获取实例类型信息(instance_type);        instance_type = request_spec.get("instance_type", None)        update_group_hosts = False        # 获取scheduler_hints;        scheduler_hints = filter_properties.get('scheduler_hints') or {}        # 获取group信息;        group = scheduler_hints.get('group', None)        if group:            # group_hosts:获取group中所有具有经过过滤后符合过滤条件的主机的列表;            group_hosts = self.group_hosts(elevated, group)            # 表示已经更新过group_hosts;            update_group_hosts = True            # 如果filter_properties中不包含'group_hosts',则增加'group_hosts'到filter_properties中;            if 'group_hosts' not in filter_properties:                filter_properties.update({'group_hosts': []})                            # 获取filter_properties中原有的'group_hosts';            # 把两部分'group_hosts'加在一起;(@@@应该都是符合条件的,自己的理解;)            configured_hosts = filter_properties['group_hosts']            filter_properties['group_hosts'] = configured_hosts + group_hosts        # 获取配置选项;        config_options = self._get_configuration_options()                # 要建立实例的属性信息对象拷贝;        properties = instance_properties.copy()                # 从instance_uuids获取properties['uuid'];        if instance_uuids:            properties['uuid'] = instance_uuids[0]        # 记录尝试次数;        self._populate_retry(filter_properties, properties)        # 更新过滤器属性信息;        filter_properties.update({'context': context,                                  'request_spec': request_spec,                                  'config_options': config_options,                                  'instance_type': instance_type})        # 从request_spec获取有用的信息,填充到过滤器属性当中;        # 分别是filter_properties['os_type']和filter_properties['project_id'];        self.populate_filter_properties(request_spec, filter_properties)        # 寻找可以接受的本地主机列表通过对我们的选项进行反复的过滤和称重;        # 这里我们使用了迭代,所以只遍历了一次这个列表;        # 获取并返回HostStates列表;                # 过滤掉不满足要求的主机;         hosts = self.host_manager.get_all_host_states(elevated)        selected_hosts = []                # 获取要建立的实例数目;        if instance_uuids:            num_instances = len(instance_uuids)        else:            num_instances = request_spec.get('num_instances', 1)                    # 遍历num_instances个实例,为每个实例选取合适的运行主机;        for num in xrange(num_instances):            # Filter local hosts based on requirements ...            # 基于具体要求过滤本地主机;            # get_filtered_hosts:过滤主机,返回那些通过所有过滤器的主机;            hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties)            if not hosts:                break            LOG.debug(_("Filtered %(hosts)s") % locals())                        # 对主机进行称重;            # 获取并返回一个WeighedObjects的主机排序列表(最高分排在第一);            weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties)            # scheduler_host_subset_size:这个参数定义了新的实例将会被调度到一个主机上,这个主机是随机的从最好的(分数最高的)N个主机组成的子集中选择出来的;            # 这个参数定义了这个子集的大小,供选择最好的主机使用;            # 如果值为1,则由称重函数返回第一个主机;            # 这个值至少为1,任何小于1的值都会被忽略,而且会被1所代替;            # 这个参数的默认值为1;            scheduler_host_subset_size = CONF.scheduler_host_subset_size            if scheduler_host_subset_size > len(weighed_hosts):                scheduler_host_subset_size = len(weighed_hosts)            if scheduler_host_subset_size < 1:                scheduler_host_subset_size = 1            # 按照上面的解释,从分数最高的若干主机组成的子集中,随机的选择一个主机出来;            # 新的实例将会被调度到这个主机上;            chosen_host = random.choice(                weighed_hosts[0:scheduler_host_subset_size])            LOG.debug(_("Choosing host %(chosen_host)s") % locals())            # 把选好的主机增加到selected_hosts列表中;            selected_hosts.append(chosen_host)            # 因为已经选好了一个主机,所以要在下一个实例选择主机前,更新主机资源信息;            chosen_host.obj.consume_from_instance(instance_properties)            if update_group_hosts is True:                filter_properties['group_hosts'].append(chosen_host.obj.host)                # 循环为每一个实例获取合适的主机后,返回选择的主机列表;        return selected_hosts
在这个方法的实现过程中,主要就是三步:

1.语句:hosts = self.host_manager.get_all_host_states(elevated)

实现了过滤掉不可用的主机节点,获取可用的主机节点列表;

2 语句:hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties)

针对建立一个虚拟机实例的要求,实现了用系统指定的过滤器对上面获取的可用主机节点列表进行过滤,进一步得到符合过滤器要求的主机列表;

3 语句:weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties)

实现了对过滤后的主机列表中的每一个主机节点进行称重操作,选取某个标准下最优的主机节点作为建立虚拟机实例的目标主机;

好了,具体来对这些语句进行解析。

1.hosts = self.host_manager.get_all_host_states(elevated)

具体来看方法get_all_host_states的代码实现:

def get_all_host_states(self, context):
"""
获取并返回HostStates列表;
HostStates代表了所有HostManager知道的主机;
另外,HostState中的每个consumable resources都是预填充的;
基于数据库中的数据进行调整;
过滤掉不满足要求的主机;
"""

# 获取可用计算节点的资源使用情况;
# 获取所有computeNodes(计算节点);
compute_nodes = db.compute_node_get_all(context)
# 集合;
seen_nodes = set()
for compute in compute_nodes:
service = compute['service']
if not service:
LOG.warn(_("No service for compute ID %s") % compute['id'])
continue
# 获取主机host;
host = service['host']
# 获取hypervisor_hostname作为节点名;
node = compute.get('hypervisor_hostname')
# 组成state_key;
state_key = (host, node)
# 获取capabilities;
capabilities = self.service_states.get(state_key, None)
# 获取host_state;
host_state = self.host_state_map.get(state_key)

# 更新只读的capabilities字典;
if host_state:
host_state.update_capabilities(capabilities, dict(service.iteritems()))
# @@@跟踪一台主机的可变和不可变的信息;
# 还试图删除以前使用的数据结构,并锁定访问;
else:
host_state = self.host_state_cls(host, node,capabilities=capabilities,service=dict(service.iteritems()))
self.host_state_map[state_key] = host_state

# update_from_compute_node:从compute信息更新它的主机信息;
host_state.update_from_compute_node(compute)
# 更新集合seen_nodes;
seen_nodes.add(state_key)

# 从host_state_map中删除状态不是活跃的计算节点;
# 获取并删除状态不活跃的计算节点;
dead_nodes = set(self.host_state_map.keys()) - seen_nodes
for state_key in dead_nodes:
host, node = state_key
LOG.info(_("Removing dead compute node %(host)s:%(node)s "
"from scheduler") % locals())
del self.host_state_map[state_key]

return self.host_state_map.itervalues()
1.1 compute_nodes = db.compute_node_get_all(context)

这条语句实现了从数据库获取所有computeNodes(计算节点),具体来看方法compute_node_get_all的代码实现:

def compute_node_get_all(context):
"""
通过查询数据库所有所有ComputeNode(数据节点);
"""
return model_query(context, models.ComputeNode).\
options(joinedload('service')).\
options(joinedload('stats')).\
all()

1.2 host_state = self.host_state_cls(host, node,capabilities=capabilities,service=dict(service.iteritems()))

这条语句实现了初始化主机的一些参数,具体来看代码:

host_state_cls = HostState
class HostState(object):
def __init__(self, host, node, capabilities=None, service=None):
self.host = host
self.nodename = node
self.update_capabilities(capabilities, service)

# Mutable available resources.
# These will change as resources are virtually "consumed".
self.total_usable_disk_gb = 0
self.disk_mb_used = 0
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_total = 0
self.vcpus_used = 0
# Valid vm types on this host: 'pv', 'hvm' or 'all'
if 'allowed_vm_type' in self.capabilities:
self.allowed_vm_type = self.capabilities['allowed_vm_type']
else:
self.allowed_vm_type = 'all'

# Additional host information from the compute node stats:
self.vm_states = {}
self.task_states = {}
self.num_instances = 0
self.num_instances_by_project = {}
self.num_instances_by_os_type = {}
self.num_io_ops = 0

# Resource oversubscription values for the compute host:
self.limits = {}

self.updated = None

1.3 host_state.update_from_compute_node(compute)

这条语句实现了从compute信息更新主机信息,具体来看代码:

def update_from_compute_node(self, compute):
"""
从compute_node信息更新它的主机信息;
"""

# 若果已经更新过,则直接返回;
if (self.updated and compute['updated_at'] and self.updated > compute['updated_at']):
return
# 获取all_ram_mb;
all_ram_mb = compute['memory_mb']

# 假设如果使用qcow2磁盘,则虚拟磁盘大小都被实例所消耗;
least = compute.get('disk_available_least')
# 获取剩余的磁盘空间free_disk_mb;
free_disk_mb = least if least is not None else compute['free_disk_gb']
free_disk_mb *= 1024

# 获取使用的磁盘空间;
self.disk_mb_used = compute['local_gb_used'] * 1024

# free_ram_mb可以是负值;
self.free_ram_mb = compute['free_ram_mb']
# 获取total_usable_ram_mb;
self.total_usable_ram_mb = all_ram_mb
# 获取total_usable_disk_gb;
self.total_usable_disk_gb = compute['local_gb']
self.free_disk_mb = free_disk_mb
self.vcpus_total = compute['vcpus']
self.vcpus_used = compute['vcpus_used']
self.updated = compute['updated_at']

stats = compute.get('stats', [])
statmap = self._statmap(stats)

# 追踪主机上的实例数目;
self.num_instances = int(statmap.get('num_instances', 0))

# 通过project_id获取实例数目;
project_id_keys = [k for k in statmap.keys() if
k.startswith("num_proj_")]
for key in project_id_keys:
project_id = key[9:]
self.num_instances_by_project[project_id] = int(statmap[key])

# 在一定的vm_states中追踪实例的数目;
vm_state_keys = [k for k in statmap.keys() if k.startswith("num_vm_")]
for key in vm_state_keys:
vm_state = key[7:]
self.vm_states[vm_state] = int(statmap[key])

# 在一定的task_states中追踪实例的数目;
task_state_keys = [k for k in statmap.keys() if
k.startswith("num_task_")]
for key in task_state_keys:
task_state = key[9:]
self.task_states[task_state] = int(statmap[key])

# 通过host_type追踪实例数目;
os_keys = [k for k in statmap.keys() if k.startswith("num_os_type_")]
for key in os_keys:
os = key[12:]
self.num_instances_by_os_type[os] = int(statmap[key])

# 获取num_io_ops;
self.num_io_ops = int(statmap.get('io_workload', 0))
2.hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties)

具体来看方法get_filtered_hosts的代码实现:

def get_filtered_hosts(self, hosts, filter_properties, filter_class_names=None):
"""
过滤主机,返回那些通过所有过滤器的主机;
"""

def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
for host in hosts_to_ignore:
if host in host_map:
del host_map[host]
ignored_hosts.append(host)
ignored_hosts_str = ', '.join(ignored_hosts)
msg = _('Host filter ignoring hosts: %(ignored_hosts_str)s')
LOG.debug(msg, locals())

def _match_forced_hosts(host_map, hosts_to_force):
for host in host_map.keys():
if host not in hosts_to_force:
del host_map[host]
if not host_map:
forced_hosts_str = ', '.join(hosts_to_force)
msg = _("No hosts matched due to not matching 'force_hosts'"
"value of '%(forced_hosts_str)s'")
LOG.debug(msg, locals())
return
forced_hosts_str = ', '.join(host_map.iterkeys())
msg = _('Host filter forcing available hosts to '
'%(forced_hosts_str)s')
LOG.debug(msg, locals())

# 返回经过验证的可用的过滤器;
filter_classes = self._choose_host_filters(filter_class_names)
ignore_hosts = filter_properties.get('ignore_hosts', [])
force_hosts = filter_properties.get('force_hosts', [])
if ignore_hosts or force_hosts:
name_to_cls_map = dict([(x.host, x) for x in hosts])
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
if not name_to_cls_map:
return []
if force_hosts:
_match_forced_hosts(name_to_cls_map, force_hosts)
# NOTE(vish): Skip filters on forced hosts.
if name_to_cls_map:
return name_to_cls_map.values()
hosts = name_to_cls_map.itervalues()
return self.filter_handler.get_filtered_objects(filter_classes, hosts, filter_properties)
2.1 filter_classes = self._choose_host_filters(filter_class_names)

这条语句实现了返回经过验证的可用的过滤器,具体来看方法_choose_host_filters的代码实现:

def _choose_host_filters(self, filter_cls_names):
"""
返回经过验证的可用的过滤器;
"""
# 使用默认过滤器;
if filter_cls_names is None:
# CONF.scheduler_default_filters:这个参数定义了当请求中没有指定特定的过滤器类的时候,默认应用的用于过滤主机的过滤器类的名称列表;
# 参数的默认值为:
# ['RetryFilter','AvailabilityZoneFilter','RamFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter']
filter_cls_names = CONF.scheduler_default_filters
if not isinstance(filter_cls_names, (list, tuple)):
filter_cls_names = [filter_cls_names]
good_filters = []
bad_filters = []

# 遍历所有配置的过滤器(我们使用的是默认过滤器);
for filter_name in filter_cls_names:
found_class = False
# 遍历所有可用的过滤器;
for cls in self.filter_classes:
# 如果在可用过滤器中找到配置的过滤器,则认为它可以使用;
if cls.__name__ == filter_name:
good_filters.append(cls)
found_class = True
break
if not found_class:
bad_filters.append(filter_name)
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters

2.2 return self.filter_handler.get_filtered_objects(filter_classes, hosts, filter_properties)

def get_filtered_objects(self, filter_classes, objs, filter_properties):
for filter_cls in filter_classes:
objs = filter_cls().filter_all(objs, filter_properties)
return list(objs)

def filter_all(self, filter_obj_list, filter_properties):
for obj in filter_obj_list:
if self._filter_one(obj, filter_properties):
yield obj
def _filter_one(self, obj, filter_properties):        """        如果通过过滤器则返回TRUE,否则返回FALSE;        """        return self.host_passes(obj, filter_properties)
3. weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties)

def get_weighed_hosts(self, hosts, weight_properties):
"""
对主机进行称重;
返回一个WeighedObjects的主机排序列表(最高分排在第一);
"""
# get_weighed_objects:返回一个WeighedObjects的主机排序列表(最高分排在第一);
return self.weight_handler.get_weighed_objects(self.weight_classes, hosts, weight_properties)
# scheduler_weight_classes:这个参数定义了使用哪个称重类来称重主机;# 默认值是nova.scheduler.weights.all_weighers;# 从名称列表CONF.scheduler_available_filters获取可装载的类;# 返回类的列表;# 所以这条语句的意思就是获取nova.scheduler.weights.all_weighers指定的所有的类;self.weight_classes = self.weight_handler.get_matching_classes(CONF.scheduler_weight_classes)
cfg.ListOpt('scheduler_weight_classes',            default=['nova.scheduler.weights.all_weighers'],            help='Which weight class names to use for weighing hosts'),# 这个参数定义了使用哪个称重类来称重主机;# 默认值是nova.scheduler.weights.all_weighers;
def all_weighers():    """    Return a list of weight plugin classes found in this directory.    """    # least_cost_functions:这个参数定义了是否应用称重方法LeastCostScheduler;    # 参数的默认值为None;    # compute_fill_first_cost_fn_weight:参数的默认值为None;    if (CONF.least_cost_functions is not None or            CONF.compute_fill_first_cost_fn_weight is not None):        LOG.deprecated(('least_cost has been deprecated in favor of the RAM Weigher.'))        return least_cost.get_least_cost_weighers()    return HostWeightHandler().get_all_classes()
def get_weighed_objects(self, weigher_classes, obj_list, weighing_properties):    """    返回一个WeighedObjects的排序列表(最高分排在第一);    """    if not obj_list:        return []    # object_class = WeighedObject    weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list]    for weigher_cls in weigher_classes:        weigher = weigher_cls()        weigher.weigh_objects(weighed_objs, weighing_properties)    return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)object_class = WeighedObject
class WeighedObject(object):    """    权重信息对象类;    """    def __init__(self, obj, weight):        self.obj = obj        self.weight = weight
def get_least_cost_weighers():    cost_functions = _get_cost_functions()    # Unfortunately we need to import this late so we don't have an    # import loop.    from nova.scheduler import weights    class _LeastCostWeigher(weights.BaseHostWeigher):        def weigh_objects(self, weighted_hosts, weight_properties):            for host in weighted_hosts:                host.weight = sum(weight * fn(host.obj, weight_properties)                            for weight, fn in cost_functions)    return [_LeastCostWeigher]
def _get_cost_functions():    """    Returns a list of tuples containing weights and cost functions to    use for weighing hosts    """    cost_fns_conf = CONF.least_cost_functions    if cost_fns_conf is None:        # The old default.  This will get fixed up below.        fn_str = 'nova.scheduler.least_cost.compute_fill_first_cost_fn'        cost_fns_conf = [fn_str]    cost_fns = []    for cost_fn_str in cost_fns_conf:        short_name = cost_fn_str.split('.')[-1]        if not (short_name.startswith('compute_') or                short_name.startswith('noop')):            continue        # Fix up any old paths to the new paths        if cost_fn_str.startswith('nova.scheduler.least_cost.'):            cost_fn_str = ('nova.scheduler.weights.least_cost' +                       cost_fn_str[25:])        try:            # NOTE: import_class is somewhat misnamed since            # the weighing function can be any non-class callable            # (i.e., no 'self')            cost_fn = importutils.import_class(cost_fn_str)        except ImportError:            raise exception.SchedulerCostFunctionNotFound(                    cost_fn_str=cost_fn_str)        try:            flag_name = "%s_weight" % cost_fn.__name__            weight = getattr(CONF, flag_name)        except AttributeError:            raise exception.SchedulerWeightFlagNotFound(                    flag_name=flag_name)        # Set the original default.        if (flag_name == 'compute_fill_first_cost_fn_weight' and                weight is None):            weight = -1.0        cost_fns.append((weight, cost_fn))    return cost_fns
其实,流程是比较简单的,就是过滤和称重的过程的。