# your code
def function(n):
if n==1:
return 1
else:
return n*function(n-1)
Sum=0
for i in range(1,21):
Sum+=function(i)
print(Sum)
2561327494111820313
# your code
s1=[9,7,8,3,2,1,55,6]
x=len(s1)
y=max(s1)
z=min(s1)
print("列表元素个数:",x,"最大数:",y,"最小数:",z)
s1.append(10)
print(s1)
s1.remove(55)
print(s1)
列表元素个数: 8 最大数: 55 最小数: 1 [9, 7, 8, 3, 2, 1, 55, 6, 10] [9, 7, 8, 3, 2, 1, 6, 10]
TTTTTx
TTTTxx
TTTxxx
TTxxxx
Txxxxx
# your code
for i in range(1,6):
for j in range(6-i):
print("T",end="")
for k in range(i):
print("X",end="")
print()
TTTTTX TTTTXX TTTXXX TTXXXX TXXXXX
# your code
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
print("选择操作")
print("1.相加")
print("2.相减")
print("3.相乘")
print("4.相除")
choice = input("选择(1/2/3/4): ")
num1 = float(input("输入第一个数字: "))
num2 = float(input("输入第二个数字: "))
if choice == '1':
print(num1,"+",num2,"=", add(num1,num2))
elif choice == '2':
print(num1,"-",num2,"=", subtract(num1,num2))
elif choice == '3':
print(num1,"*",num2,"=", multiply(num1,num2))
elif choice == '4':
print(num1,"/",num2,"=", divide(num1,num2))
else:
print("非法输入")
选择操作 1.相加 2.相减 3.相乘 4.相除 56.0 + 32.0 = 88.0
# your code
class Student:
def __init__(self,name,age,*cou):
self.name=name
self.age=age
self.course=cou
def get_name(self):
return self.name
def get_age(self):
return self.age
def get_course(self):
return max(max(self.course))
zm=Student('zhangming',20,[69,88,100])
print('学生姓名为:',zm.get_name(),'年龄为:',zm.get_age(),'最高分成绩为:',zm.get_course())
学生姓名为: zhangming 年龄为: 20 最高分成绩为: 100
X | Y | X | Y |
---|---|---|---|
-3.00 | 4 | 0.15 | 255 |
-2.50 | 12 | 0.75 | 170 |
-1.75 | 50 | 1.25 | 100 |
-1.15 | 120 | 1.85 | 20 |
-0.50 | 205 | 2.45 | 14 |
# your code
import numpy as np
import matplotlib.pyplot as plt
import random
# 准备数据
x_data = [-3.00,-2.50,-1.75,-1.15,-0.50,0.15,0.75,1.25,1.85,2.45]
y_data = [4,12,50,120,205,255,170,100,20,14]
# 正确显示中文和负号
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
# 画图,plt.bar()可以画柱状图
plt.style.use('ggplot') #添加网格线
for i in range(len(x_data)):
plt.bar(x_data[i], y_data[i])
plt.show()
注:训练集:测试集=8:2,随机种子采用你学号后两位,例如你学号后两位=01,则random_state=1,如果最后两位=34,则random_state=34。最终结果打印出各个回归的w和b系数即可。
序号 | X1 | X2 | X3 | X4 | Y |
---|---|---|---|---|---|
1 | 7 | 26 | 6 | 60 | 78.5 |
2 | 1 | 29 | 15 | 52 | 74.3 |
3 | 11 | 56 | 8 | 20 | 104.3 |
4 | 11 | 31 | 8 | 47 | 87.6 |
5 | 7 | 52 | 6 | 33 | 95.9 |
6 | 11 | 55 | 9 | 22 | 109.2 |
7 | 3 | 71 | 17 | 6 | 102.7 |
8 | 1 | 31 | 22 | 44 | 72.5 |
9 | 2 | 54 | 18 | 22 | 93.1 |
10 | 21 | 47 | 4 | 26 | 115.9 |
11 | 1 | 40 | 23 | 34 | 83.8 |
12 | 11 | 66 | 9 | 12 | 113.3 |
13 | 10 | 68 | 8 | 12 | 109.4 |
# your code
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Ridge, Lasso
# 读入数据
data = pd.DataFrame({
'X1': [7, 1, 11, 11, 7, 11, 3, 1, 2, 21, 1, 11, 10],
'X2': [26, 29, 56, 31, 52, 55, 71, 31, 54, 47, 40, 66, 68],
'X3': [6, 15, 8, 8, 6, 9, 17, 22, 18, 4, 23, 9, 8],
'X4': [60, 52, 20, 47, 33, 22, 6, 44, 22, 26, 34, 12, 12],
'Y': [78.5, 74.3, 104.3, 87.6, 95.9, 109.2, 102.7, 72.5, 93.1, 115.9, 83.8, 113.3, 109.4]
})
# 数据预处理
X = data.iloc[:, :-1]
Y = data.iloc[:, -1]
# 分割训练集和测试集
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=7)
# 线性回归
lr = LinearRegression()
lr.fit(X_train, Y_train)
print("线性回归:")
print("系数:", lr.coef_)
print("截距:", lr.intercept_)
print("训练集得分:", lr.score(X_train, Y_train))
print("测试集得分:", lr.score(X_test, Y_test))
# 岭回归
ridge = Ridge(alpha=1.0)
ridge.fit(X_train, Y_train)
print("\n岭回归:")
print("系数:", ridge.coef_)
print("截距:", ridge.intercept_)
print("训练集得分:", ridge.score(X_train, Y_train))
print("测试集得分:", ridge.score(X_test, Y_test))
# Lasso回归
lasso = Lasso(alpha=0.1)
lasso.fit(X_train, Y_train)
print("\nLasso回归:")
print("系数:", lasso.coef_)
print("截距:", lasso.intercept_)
print("训练集得分:", lasso.score(X_train, Y_train))
print("测试集得分:", lasso.score(X_test, Y_test))
线性回归: 系数: [ 0.1854168 -0.78663994 -1.43799533 -1.43759232] 截距: 191.93065178571953 训练集得分: 0.98890902348995 测试集得分: 0.7370799141278985 岭回归: 系数: [ 0.66584554 -0.3142857 -0.93664176 -0.96849268] 截距: 145.5152261708593 训练集得分: 0.9886072989605169 测试集得分: 0.8088243850078585 Lasso回归: 系数: [ 0.91920689 -0.06201951 -0.67298391 -0.71837576] 截距: 120.80874307100622 训练集得分: 0.9882008133967428 测试集得分: 0.8370130460379182
d:\Anaconda3\lib\site-packages\sklearn\linear_model\_coordinate_descent.py:647: ConvergenceWarning: Objective did not converge. You might want to increase the number of iterations, check the scale of the features or consider increasing regularisation. Duality gap: 1.692e+00, tolerance: 2.494e-01 model = cd_fast.enet_coordinate_descent(
注:训练集:测试集=1:1,随机种子采用你学号后两位,例如你学号后两位=01,则random_state=1,如果最后两位=34,则random_state=34。最终结果输出你预测结果、实际结果以及模型得分三项。
序号 | 年龄 | 收入 | 是否为学生 | 信誉 | 购买计算机 |
---|---|---|---|---|---|
1 | <=30 | 高 | 否 | 中 | 否 |
2 | <=30 | 高 | 否 | 优 | 否 |
3 | 31-40 | 高 | 否 | 中 | 是 |
4 | >40 | 中 | 否 | 中 | 是 |
5 | >40 | 低 | 是 | 中 | 是 |
6 | >40 | 低 | 是 | 优 | 否 |
7 | 31-40 | 低 | 是 | 优 | 是 |
8 | <=30 | 中 | 否 | 中 | 否 |
9 | <=30 | 低 | 是 | 中 | 是 |
10 | >40 | 中 | 是 | 中 | 是 |
11 | <=30 | 中 | 是 | 优 | 是 |
12 | 31-40 | 中 | 否 | 优 | 是 |
13 | 31-40 | 高 | 是 | 中 | 是 |
14 | >40 | 中 | 否 | 优 | 否 |
# your code
import pandas as pd
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# 读取数据
data = pd.DataFrame({
'年龄': ['<=30', '<=30', '31-40', '>40', '>40', '>40', '31-40', '<=30', '<=30', '>40', '<=30', '31-40', '31-40', '>40'],
'收入': ['高', '高', '高', '中', '低', '低', '低', '中', '低', '中', '中', '中', '高', '中'],
'是否为学生': ['否', '否', '否', '否', '是', '是', '是', '否', '是', '是', '是', '否', '是', '否'],
'信誉': ['中', '优', '中', '中', '中', '优', '优', '中', '中', '中', '优', '优', '中', '优'],
'购买计算机': ['否', '否', '是', '是', '是', '否', '是', '否', '是', '是', '是', '是', '是', '否']
})
# 将特征转换为数字
data.replace({'年龄': {'<=30': 1, '31-40': 2, '>40': 3},
'收入': {'低': 1, '中': 2, '高': 3},
'是否为学生': {'否': 0, '是': 1},
'信誉': {'中': 1, '优': 2}}, inplace=True)
# 分离特征和标签
X = data.iloc[:, :-1]
y = data.iloc[:, -1]
# 划分训练集和测试集,随机种子为学号后两位
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=7)
# 创建朴素贝叶斯分类器
clf = GaussianNB()
# 训练模型
clf.fit(X_train, y_train)
# 预测测试集结果
y_pred = clf.predict(X_test)
# 输出预测结果、实际结果以及模型得分
print("预测结果:", y_pred)
print("实际结果:", y_test.values)
print("模型得分:", accuracy_score(y_test, y_pred))
预测结果: ['是' '是' '是' '是' '是' '是' '是'] 实际结果: ['是' '是' '是' '否' '否' '否' '否'] 模型得分: 0.42857142857142855